msa1 / front /src /pages /FrenchChat.tsx
zouhairk's picture
add front et requirements
cba7fd1
import React, { useState, useRef } from 'react';
import { Mic, Play, Square, StopCircle } from 'lucide-react';
import { useChat } from '../context/ChatContext';
const FrenchChat = () => {
const { messages, addMessage } = useChat();
const [isRecording, setIsRecording] = useState(false);
const [isPlaying, setIsPlaying] = useState(false);
const mediaRecorderRef = useRef<MediaRecorder | null>(null);
const chunksRef = useRef<Blob[]>([]);
const recognitionRef = useRef<SpeechRecognition | null>(null);
const startRecording = async () => {
try {
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
const recognition = new SpeechRecognition();
recognitionRef.current = recognition;
recognition.lang = 'fr-FR';
recognition.continuous = true;
recognition.interimResults = true;
recognition.onresult = (event) => {
const transcript = Array.from(event.results)
.map(result => result[0].transcript)
.join('');
if (event.results[0].isFinal) {
const newMessage = {
id: Date.now(),
text: transcript,
isFrench: true,
timestamp: new Date(),
};
addMessage(newMessage);
}
};
recognition.start();
setIsRecording(true);
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
const mediaRecorder = new MediaRecorder(stream);
mediaRecorderRef.current = mediaRecorder;
chunksRef.current = [];
mediaRecorder.ondataavailable = (e) => {
chunksRef.current.push(e.data);
};
mediaRecorder.start();
} catch (err) {
console.error('Error accessing microphone:', err);
}
};
const stopRecording = () => {
if (recognitionRef.current) {
recognitionRef.current.stop();
}
if (mediaRecorderRef.current && isRecording) {
mediaRecorderRef.current.stop();
setIsRecording(false);
mediaRecorderRef.current.stream.getTracks().forEach(track => track.stop());
}
};
const speakText = (text: string) => {
if ('speechSynthesis' in window) {
window.speechSynthesis.cancel(); // Arrête toute lecture en cours
setIsPlaying(true);
const utterance = new SpeechSynthesisUtterance(text);
utterance.lang = 'fr-FR';
utterance.rate = 0.9;
utterance.pitch = 1.0;
utterance.volume = 1.0;
utterance.onend = () => {
setIsPlaying(false);
};
utterance.onerror = () => {
setIsPlaying(false);
};
window.speechSynthesis.speak(utterance);
}
};
const stopSpeaking = () => {
window.speechSynthesis.cancel();
setIsPlaying(false);
};
return (
<div className="container mx-auto p-4 max-w-2xl">
<div className="bg-white rounded-lg shadow-md h-[calc(100vh-12rem)] flex flex-col">
<div className="flex-1 overflow-y-auto p-4">
{messages.map((message) => (
<div
key={message.id}
className={`flex ${message.isFrench ? 'justify-end' : 'justify-start'} mb-4`}
>
<div
className={`rounded-lg p-3 max-w-[70%] ${
message.isFrench
? 'bg-blue-500 text-white'
: 'bg-white border border-gray-300'
}`}
>
<p>{message.isFrench ? message.text : message.translatedText}</p>
{!message.isFrench && (
<button
className="mt-2 p-2 bg-gray-100 rounded-full hover:bg-gray-200 transition-colors"
onClick={() => isPlaying ? stopSpeaking() : speakText(message.translatedText || '')}
title={isPlaying ? 'Arrêter la lecture' : 'Écouter le message'}
>
{isPlaying ? (
<StopCircle className="h-4 w-4 text-red-500" />
) : (
<Play className="h-4 w-4" />
)}
</button>
)}
</div>
</div>
))}
</div>
<div className="border-t p-4">
<div className="flex items-center justify-center space-x-4">
<button
onClick={isRecording ? stopRecording : startRecording}
className="relative p-4 rounded-full bg-blue-500 hover:bg-blue-600 text-white"
>
{isRecording ? (
<Square className="h-6 w-6" />
) : (
<Mic className="h-6 w-6" />
)}
{isRecording && (
<div className="absolute inset-0 animate-ping rounded-full bg-blue-400 opacity-75"></div>
)}
</button>
<p className="text-gray-600">Cliquer puis parler</p>
</div>
</div>
</div>
</div>
);
};
export default FrenchChat;