ggwave / static /index.html
yasserrmd's picture
Update static/index.html
5398c25 verified
raw
history blame
11.5 kB
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Voice Chat Interface</title>
<!-- Bootstrap CSS -->
<link href="https://cdn.jsdelivr.net/npm/[email protected]/dist/css/bootstrap.min.css" rel="stylesheet">
<!-- Bootstrap Icons -->
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/[email protected]/font/bootstrap-icons.css">
<style>
body {
background-color: #f8f9fa;
padding-top: 30px;
}
.chat-container {
max-width: 600px;
margin: 0 auto;
background-color: white;
border-radius: 12px;
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.1);
padding: 25px;
}
.btn-record {
background-color: #ff4b4b;
border-color: #ff4b4b;
}
.btn-record:hover {
background-color: #e43c3c;
border-color: #e43c3c;
}
.btn-record.recording {
animation: pulse 1.5s infinite;
}
.btn-generate {
background-color: #4c6ef5;
border-color: #4c6ef5;
}
.btn-generate:hover {
background-color: #3b5bdb;
border-color: #3b5bdb;
}
.icon-spacing {
margin-right: 8px;
}
.control-label {
font-size: 0.9rem;
color: #6c757d;
margin-bottom: 8px;
}
.audio-container {
background-color: #f1f3f5;
border-radius: 8px;
padding: 15px;
margin-top: 20px;
}
audio {
width: 100%;
}
.status-indicator {
font-size: 0.9rem;
margin-top: 10px;
height: 24px;
}
@keyframes pulse {
0% { transform: scale(1); }
50% { transform: scale(1.05); }
100% { transform: scale(1); }
}
</style>
</head>
<body>
<div class="container">
<div class="chat-container">
<h1 class="text-center mb-4">
<i class="bi bi-mic-fill text-primary"></i> Voice Chat
</h1>
<div class="row g-4">
<div class="col-md-6">
<div class="d-grid">
<p class="control-label text-center">Voice Recognition</p>
<button id="recordButton" class="btn btn-record btn-lg text-white">
<i class="bi bi-mic-fill icon-spacing"></i> Start Listening
</button>
</div>
</div>
<div class="col-md-6">
<div class="d-grid">
<p class="control-label text-center">Text to Speech</p>
<button id="generateButton" class="btn btn-generate btn-lg text-white">
<i class="bi bi-soundwave icon-spacing"></i> Generate Speech
</button>
</div>
</div>
</div>
<div class="audio-container">
<p class="control-label mb-2">Audio Playback</p>
<audio id="audioPlayer" controls></audio>
<div id="statusMessage" class="status-indicator text-center text-secondary"></div>
</div>
</div>
</div>
<!-- Bootstrap JS Bundle -->
<script src="https://cdn.jsdelivr.net/npm/[email protected]/dist/js/bootstrap.bundle.min.js"></script>
<script>
let mediaRecorder;
let audioChunks = [];
const recordButton = document.getElementById("recordButton");
const generateButton = document.getElementById("generateButton");
const statusMessage = document.getElementById("statusMessage");
let recordingTimeout;
recordButton.addEventListener("click", async () => {
if (!mediaRecorder || mediaRecorder.state === "inactive") {
try {
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
mediaRecorder = new MediaRecorder(stream, { mimeType: "audio/webm" });
mediaRecorder.ondataavailable = event => audioChunks.push(event.data);
mediaRecorder.onstop = async () => {
statusMessage.textContent = "Processing audio...";
recordButton.innerHTML = '<i class="bi bi-mic-fill icon-spacing"></i> Start Listening';
recordButton.classList.remove("recording");
recordButton.classList.remove("btn-danger");
recordButton.classList.add("btn-record");
try {
const audioBlob = new Blob(audioChunks, { type: "audio/webm" });
const wavBlob = await convertWebMToWav(audioBlob);
const formData = new FormData();
formData.append("file", wavBlob, "recording.wav");
const response = await fetch("/chat/", {
method: "POST",
body: formData
});
if (response.ok) {
const audioData = await response.blob();
document.getElementById("audioPlayer").src = URL.createObjectURL(audioData);
statusMessage.textContent = "Response ready!";
} else {
statusMessage.textContent = "Error processing audio. Please try again.";
}
} catch (error) {
console.error("Error:", error);
statusMessage.textContent = "Error processing audio. Please try again.";
}
};
audioChunks = [];
mediaRecorder.start();
recordButton.innerHTML = '<i class="bi bi-stop-fill icon-spacing"></i> Listening...';
recordButton.classList.add("recording");
recordButton.classList.remove("btn-record");
recordButton.classList.add("btn-danger");
statusMessage.textContent = "Listening (5 seconds)...";
// Auto-stop after 5 seconds
recordingTimeout = setTimeout(() => {
if (mediaRecorder && mediaRecorder.state === "recording") {
mediaRecorder.stop();
}
}, 5000);
} catch (error) {
console.error("Error accessing microphone:", error);
statusMessage.textContent = "Could not access microphone. Please check permissions.";
}
} else if (mediaRecorder.state === "recording") {
// Stop recording if already recording
clearTimeout(recordingTimeout);
mediaRecorder.stop();
}
});
generateButton.addEventListener("click", async () => {
const text = prompt("Enter text to convert to speech:");
if (text) {
statusMessage.textContent = "Generating speech...";
try {
const response = await fetch("/tts/", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ text })
});
if (response.ok) {
const audioData = await response.blob();
document.getElementById("audioPlayer").src = URL.createObjectURL(audioData);
statusMessage.textContent = "Speech generated successfully!";
} else {
statusMessage.textContent = "Error generating speech. Please try again.";
}
} catch (error) {
console.error("Error:", error);
statusMessage.textContent = "Error generating speech. Please try again.";
}
}
});
async function convertWebMToWav(blob) {
return new Promise((resolve, reject) => {
try {
const reader = new FileReader();
reader.onload = function () {
const audioContext = new AudioContext();
audioContext.decodeAudioData(reader.result)
.then(buffer => {
const wavBuffer = audioBufferToWav(buffer);
resolve(new Blob([wavBuffer], { type: "audio/wav" }));
})
.catch(error => {
console.error("Error decoding audio data:", error);
reject(error);
});
};
reader.readAsArrayBuffer(blob);
} catch (error) {
console.error("Error in convertWebMToWav:", error);
reject(error);
}
});
}
function audioBufferToWav(buffer) {
let numOfChan = buffer.numberOfChannels,
length = buffer.length * numOfChan * 2 + 44,
bufferArray = new ArrayBuffer(length),
view = new DataView(bufferArray),
channels = [],
sampleRate = buffer.sampleRate,
offset = 0,
pos = 0;
setUint32(0x46464952); // "RIFF"
setUint32(length - 8);
setUint32(0x45564157); // "WAVE"
setUint32(0x20746d66); // "fmt " chunk
setUint32(16); // length = 16
setUint16(1); // PCM (uncompressed)
setUint16(numOfChan);
setUint32(sampleRate);
setUint32(sampleRate * 2 * numOfChan);
setUint16(numOfChan * 2);
setUint16(16); // bits per sample
setUint32(0x61746164); // "data" chunk
setUint32(length - pos - 4);
for (let i = 0; i < buffer.numberOfChannels; i++)
channels.push(buffer.getChannelData(i));
while (pos < length) {
for (let i = 0; i < numOfChan; i++) {
let sample = Math.max(-1, Math.min(1, channels[i][offset]));
sample = sample < 0 ? sample * 0x8000 : sample * 0x7FFF;
setUint16(sample);
}
offset++;
}
function setUint16(data) {
view.setUint16(pos, data, true);
pos += 2;
}
function setUint32(data) {
view.setUint32(pos, data, true);
pos += 4;
}
return bufferArray;
}
</script>
</body>
</html>