Spaces:
Runtime error
Runtime error
File size: 4,492 Bytes
b681a44 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 |
import React, { useState, useRef } from 'react';
import { Mic, Play, Square } from 'lucide-react';
import { useChat } from '../context/ChatContext';
const FrenchChat = () => {
const { messages, addMessage } = useChat();
const [isRecording, setIsRecording] = useState(false);
const mediaRecorderRef = useRef<MediaRecorder | null>(null);
const chunksRef = useRef<Blob[]>([]);
const recognitionRef = useRef<SpeechRecognition | null>(null);
const startRecording = async () => {
try {
// Initialize speech recognition
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
const recognition = new SpeechRecognition();
recognitionRef.current = recognition;
recognition.lang = 'fr-FR';
recognition.continuous = true;
recognition.interimResults = true;
recognition.onresult = (event) => {
const transcript = Array.from(event.results)
.map(result => result[0].transcript)
.join('');
if (event.results[0].isFinal) {
const newMessage = {
id: Date.now(),
text: transcript,
isFrench: true,
timestamp: new Date(),
};
addMessage(newMessage);
}
};
recognition.start();
setIsRecording(true);
// Also start audio recording for WAV file
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
const mediaRecorder = new MediaRecorder(stream);
mediaRecorderRef.current = mediaRecorder;
chunksRef.current = [];
mediaRecorder.ondataavailable = (e) => {
chunksRef.current.push(e.data);
};
mediaRecorder.start();
} catch (err) {
console.error('Error accessing microphone:', err);
}
};
const stopRecording = () => {
if (recognitionRef.current) {
recognitionRef.current.stop();
}
if (mediaRecorderRef.current && isRecording) {
mediaRecorderRef.current.stop();
setIsRecording(false);
mediaRecorderRef.current.stream.getTracks().forEach(track => track.stop());
}
};
const speakText = (text: string, isFrench: boolean) => {
if ('speechSynthesis' in window) {
window.speechSynthesis.cancel();
const utterance = new SpeechSynthesisUtterance(text);
utterance.lang = isFrench ? 'fr-FR' : 'ar-SA';
utterance.rate = 1.0;
utterance.pitch = 1.0;
utterance.volume = 1.0;
window.speechSynthesis.speak(utterance);
}
};
return (
<div className="container mx-auto p-4 max-w-2xl">
<div className="bg-white rounded-lg shadow-md h-[calc(100vh-12rem)] flex flex-col">
<div className="flex-1 overflow-y-auto p-4">
{messages.map((message) => (
<div
key={message.id}
className={`flex ${message.isFrench ? 'justify-end' : 'justify-start'} mb-4`}
>
<div
className={`rounded-lg p-3 max-w-[70%] ${
message.isFrench
? 'bg-blue-500 text-white'
: 'bg-white border border-gray-300'
}`}
>
<p>{message.text}</p>
{!message.isFrench && (
<button
className="mt-2 p-2 bg-gray-100 rounded-full hover:bg-gray-200 transition-colors"
onClick={() => speakText(message.text, false)}
title="Écouter le message"
>
<Play className="h-4 w-4" />
</button>
)}
</div>
</div>
))}
</div>
<div className="border-t p-4">
<div className="flex items-center justify-center space-x-4">
<button
onClick={isRecording ? stopRecording : startRecording}
className="relative p-4 rounded-full bg-blue-500 hover:bg-blue-600 text-white"
>
{isRecording ? (
<Square className="h-6 w-6" />
) : (
<Mic className="h-6 w-6" />
)}
{isRecording && (
<div className="absolute inset-0 animate-ping rounded-full bg-blue-400 opacity-75"></div>
)}
</button>
<p className="text-gray-600">Cliquer puis parler</p>
</div>
</div>
</div>
</div>
);
};
export default FrenchChat; |