yasserrmd commited on
Commit
976e0aa
·
verified ·
1 Parent(s): 74a0136

Update static/index.html

Browse files
Files changed (1) hide show
  1. static/index.html +149 -182
static/index.html CHANGED
@@ -143,209 +143,176 @@
143
  <script src="https://cdn.jsdelivr.net/npm/[email protected]/dist/js/bootstrap.bundle.min.js"></script>
144
  <script>
145
  let mediaRecorder;
146
- let audioChunks = [];
147
- let audioStream;
148
- const recordButton = document.getElementById("recordButton");
149
- const generateButton = document.getElementById("generateButton");
150
- const statusMessage = document.getElementById("statusMessage");
151
- const messageInput = document.getElementById("messageInput");
152
- const userMessageHeader = document.getElementById("userMessageHeader");
153
- const llmResponseHeader = document.getElementById("llmResponseHeader");
154
- const receivedData = document.getElementById("receivedData");
155
- let recordingTimeout;
156
-
157
- recordButton.addEventListener("click", async () => {
158
- if (!mediaRecorder || mediaRecorder.state === "inactive") {
159
- try {
160
- // Stop any existing stream tracks before requesting a new one
161
- if (audioStream) {
162
- audioStream.getTracks().forEach(track => track.stop());
163
- }
164
-
165
- audioStream = await navigator.mediaDevices.getUserMedia({ audio: true });
166
- mediaRecorder = new MediaRecorder(audioStream, { mimeType: "audio/webm" });
167
-
168
- mediaRecorder.ondataavailable = event => audioChunks.push(event.data);
169
-
170
- mediaRecorder.onstop = async () => {
171
- statusMessage.textContent = "Processing audio data...";
172
- recordButton.innerHTML = '<i class="bi bi-mic-fill icon-spacing"></i> Start Listening';
173
- recordButton.classList.remove("recording");
174
- recordButton.classList.remove("btn-danger");
175
- recordButton.classList.add("btn-record");
176
-
177
- try {
178
- const audioBlob = new Blob(audioChunks, { type: "audio/webm" });
179
- const wavBlob = await convertWebMToWav(audioBlob);
180
-
181
- // This is the original code from your first implementation
182
- const formData = new FormData();
183
- formData.append("file", wavBlob, "recording.wav");
184
-
185
- const response = await fetch("/chat/", {
186
- method: "POST",
187
- body: formData
188
- });
189
-
190
- if (response.ok) {
191
- // Display response headers that we know are sent back from the endpoint
192
- const userMessage = response.headers.get("X-User-Message") || "No user message";
193
- const llmResponse = response.headers.get("X-LLM-Response") || "No response";
194
-
195
- // Update the header display
196
- userMessageHeader.innerHTML = `X-User-Message: <span class="text-primary">${userMessage}</span>`;
197
- llmResponseHeader.innerHTML = `X-LLM-Response: <span class="text-success">${llmResponse}</span>`;
198
-
199
- // Update received data
200
- receivedData.textContent = userMessage;
201
-
202
- // Get audio blob from response and play it
203
- const audioData = await response.blob();
204
- document.getElementById("audioPlayer").src = URL.createObjectURL(audioData);
205
- statusMessage.textContent = "Data decoded successfully!";
206
- } else {
207
- statusMessage.textContent = "Error processing audio data. Please try again.";
208
- }
209
- } catch (error) {
210
- console.error("Error:", error);
211
- statusMessage.textContent = "Error processing audio data. Please try again.";
212
- }
213
-
214
- // Clean up the audio tracks after processing is complete
215
- if (audioStream) {
216
- audioStream.getTracks().forEach(track => track.stop());
217
- }
218
- };
219
-
220
- audioChunks = [];
221
- mediaRecorder.start();
222
-
223
- recordButton.innerHTML = '<i class="bi bi-stop-fill icon-spacing"></i> Listening...';
224
- recordButton.classList.add("recording");
225
- recordButton.classList.remove("btn-record");
226
- recordButton.classList.add("btn-danger");
227
- statusMessage.textContent = "Listening for data transmission...";
228
-
229
- // Auto-stop after 5 seconds
230
- recordingTimeout = setTimeout(() => {
231
- if (mediaRecorder && mediaRecorder.state === "recording") {
232
- mediaRecorder.stop();
233
- }
234
- }, 5000);
235
- } catch (error) {
236
- console.error("Error accessing microphone:", error);
237
- statusMessage.textContent = "Could not access microphone. Please check permissions.";
238
- }
239
- } else if (mediaRecorder.state === "recording") {
240
- // Stop recording if already recording
241
- clearTimeout(recordingTimeout);
242
- mediaRecorder.stop();
243
  }
244
- });
245
-
246
- generateButton.addEventListener("click", async () => {
247
- const text = messageInput.value.trim();
248
- if (text) {
249
- statusMessage.textContent = "Encoding data to sound...";
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
250
  try {
251
- const response = await fetch("/tts/", {
 
 
 
 
 
 
 
252
  method: "POST",
253
- headers: {
254
- "Content-Type": "application/json"
255
- },
256
- body: JSON.stringify({ text })
257
  });
258
-
259
  if (response.ok) {
 
 
 
 
 
 
 
260
  const audioData = await response.blob();
261
- document.getElementById("audioPlayer").src = URL.createObjectURL(audioData);
262
- statusMessage.textContent = "Data encoded as sound. Ready to transmit!";
263
- // Auto-play option
264
- document.getElementById("audioPlayer").play();
265
  } else {
266
- statusMessage.textContent = "Error encoding data. Please try again.";
267
  }
268
  } catch (error) {
269
  console.error("Error:", error);
270
- statusMessage.textContent = "Error encoding data. Please try again.";
271
  }
272
- } else {
273
- statusMessage.textContent = "Please enter a message to transmit.";
274
- }
275
- });
276
 
277
- async function convertWebMToWav(blob) {
278
- return new Promise((resolve, reject) => {
279
- try {
280
- const reader = new FileReader();
281
- reader.onload = function () {
282
- const audioContext = new AudioContext();
283
- audioContext.decodeAudioData(reader.result)
284
- .then(buffer => {
285
- const wavBuffer = audioBufferToWav(buffer);
286
- resolve(new Blob([wavBuffer], { type: "audio/wav" }));
287
- })
288
- .catch(error => {
289
- console.error("Error decoding audio data:", error);
290
- reject(error);
291
- });
292
- };
293
- reader.readAsArrayBuffer(blob);
294
- } catch (error) {
295
- console.error("Error in convertWebMToWav:", error);
296
- reject(error);
297
  }
298
- });
299
- }
300
 
301
- function audioBufferToWav(buffer) {
302
- let numOfChan = buffer.numberOfChannels,
303
- length = buffer.length * numOfChan * 2 + 44,
304
- bufferArray = new ArrayBuffer(length),
305
- view = new DataView(bufferArray),
306
- channels = [],
307
- sampleRate = buffer.sampleRate,
308
- offset = 0,
309
- pos = 0;
310
 
311
- setUint32(0x46464952); // "RIFF"
312
- setUint32(length - 8);
313
- setUint32(0x45564157); // "WAVE"
314
- setUint32(0x20746d66); // "fmt " chunk
315
- setUint32(16); // length = 16
316
- setUint16(1); // PCM (uncompressed)
317
- setUint16(numOfChan);
318
- setUint32(sampleRate);
319
- setUint32(sampleRate * 2 * numOfChan);
320
- setUint16(numOfChan * 2);
321
- setUint16(16); // bits per sample
322
- setUint32(0x61746164); // "data" chunk
323
- setUint32(length - pos - 4);
 
324
 
325
- for (let i = 0; i < buffer.numberOfChannels; i++)
326
- channels.push(buffer.getChannelData(i));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
327
 
328
- while (pos < length) {
329
- for (let i = 0; i < numOfChan; i++) {
330
- let sample = Math.max(-1, Math.min(1, channels[i][offset]));
331
- sample = sample < 0 ? sample * 0x8000 : sample * 0x7FFF;
332
- setUint16(sample);
333
- }
334
- offset++;
335
- }
 
336
 
337
- function setUint16(data) {
338
- view.setUint16(pos, data, true);
339
- pos += 2;
340
- }
 
 
 
 
 
 
 
 
 
341
 
342
- function setUint32(data) {
343
- view.setUint32(pos, data, true);
344
- pos += 4;
345
- }
346
 
347
- return bufferArray;
 
 
 
 
348
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
349
  </script>
350
  </body>
351
  </html>
 
143
  <script src="https://cdn.jsdelivr.net/npm/[email protected]/dist/js/bootstrap.bundle.min.js"></script>
144
  <script>
145
  let mediaRecorder;
146
+ let audioChunks = [];
147
+ let audioStream;
148
+ const recordButton = document.getElementById("recordButton");
149
+ const statusMessage = document.getElementById("statusMessage");
150
+ const userMessageHeader = document.getElementById("userMessageHeader");
151
+ const llmResponseHeader = document.getElementById("llmResponseHeader");
152
+ const receivedData = document.getElementById("receivedData");
153
+ const audioPlayer = document.getElementById("audioPlayer");
154
+ let recordingTimeout;
155
+
156
+ recordButton.addEventListener("click", async () => {
157
+ if (!mediaRecorder || mediaRecorder.state === "inactive") {
158
+ try {
159
+ if (audioStream) {
160
+ audioStream.getTracks().forEach(track => track.stop());
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
161
  }
162
+
163
+ audioStream = await navigator.mediaDevices.getUserMedia({ audio: true });
164
+ mediaRecorder = new MediaRecorder(audioStream, { mimeType: "audio/webm" });
165
+ audioChunks = []; // Reset chunks
166
+
167
+ mediaRecorder.ondataavailable = event => {
168
+ if (event.data.size > 0) {
169
+ audioChunks.push(event.data);
170
+ }
171
+ };
172
+
173
+ mediaRecorder.onstop = async () => {
174
+ if (audioChunks.length === 0) {
175
+ statusMessage.textContent = "No audio recorded. Try again.";
176
+ return;
177
+ }
178
+
179
+ statusMessage.textContent = "Processing audio data...";
180
+ recordButton.innerHTML = '<i class="bi bi-mic-fill icon-spacing"></i> Start Listening';
181
+ recordButton.classList.remove("recording", "btn-danger");
182
+ recordButton.classList.add("btn-record");
183
+
184
  try {
185
+ const audioBlob = new Blob(audioChunks, { type: "audio/webm" });
186
+ const wavBlob = await convertWebMToWav(audioBlob);
187
+ const wavFile = new File([wavBlob], "recording.wav", { type: "audio/wav" });
188
+
189
+ const formData = new FormData();
190
+ formData.append("file", wavFile);
191
+
192
+ const response = await fetch("/chat/", {
193
  method: "POST",
194
+ body: formData
 
 
 
195
  });
196
+
197
  if (response.ok) {
198
+ const userMessage = response.headers.get("X-User-Message") || "No user message";
199
+ const llmResponse = response.headers.get("X-LLM-Response") || "No response";
200
+
201
+ userMessageHeader.innerHTML = `X-User-Message: <span class="text-primary">${userMessage}</span>`;
202
+ llmResponseHeader.innerHTML = `X-LLM-Response: <span class="text-success">${llmResponse}</span>`;
203
+
204
+ receivedData.textContent = userMessage;
205
  const audioData = await response.blob();
206
+ audioPlayer.src = URL.createObjectURL(audioData);
207
+ statusMessage.textContent = "Data decoded successfully!";
 
 
208
  } else {
209
+ statusMessage.textContent = "Error processing audio data. Please try again.";
210
  }
211
  } catch (error) {
212
  console.error("Error:", error);
213
+ statusMessage.textContent = "Error processing audio data. Please try again.";
214
  }
 
 
 
 
215
 
216
+ if (audioStream) {
217
+ audioStream.getTracks().forEach(track => track.stop());
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
218
  }
219
+ };
 
220
 
221
+ mediaRecorder.start();
222
+ recordButton.innerHTML = '<i class="bi bi-stop-fill icon-spacing"></i> Listening...';
223
+ recordButton.classList.add("recording", "btn-danger");
224
+ recordButton.classList.remove("btn-record");
225
+ statusMessage.textContent = "Listening for data transmission...";
 
 
 
 
226
 
227
+ recordingTimeout = setTimeout(() => {
228
+ if (mediaRecorder.state === "recording") {
229
+ mediaRecorder.stop();
230
+ }
231
+ }, 5000);
232
+ } catch (error) {
233
+ console.error("Error accessing microphone:", error);
234
+ statusMessage.textContent = "Could not access microphone. Please check permissions.";
235
+ }
236
+ } else if (mediaRecorder.state === "recording") {
237
+ clearTimeout(recordingTimeout);
238
+ mediaRecorder.stop();
239
+ }
240
+ });
241
 
242
+ async function convertWebMToWav(blob) {
243
+ return new Promise((resolve, reject) => {
244
+ try {
245
+ const reader = new FileReader();
246
+ reader.onload = function () {
247
+ const audioContext = new AudioContext();
248
+ audioContext.decodeAudioData(reader.result)
249
+ .then(buffer => {
250
+ const wavBuffer = audioBufferToWav(buffer);
251
+ resolve(new Blob([wavBuffer], { type: "audio/wav" }));
252
+ })
253
+ .catch(error => {
254
+ console.error("Error decoding audio data:", error);
255
+ reject(error);
256
+ });
257
+ };
258
+ reader.readAsArrayBuffer(blob);
259
+ } catch (error) {
260
+ console.error("Error in convertWebMToWav:", error);
261
+ reject(error);
262
+ }
263
+ });
264
+ }
265
 
266
+ function audioBufferToWav(buffer) {
267
+ let numOfChan = buffer.numberOfChannels,
268
+ length = buffer.length * numOfChan * 2 + 44,
269
+ bufferArray = new ArrayBuffer(length),
270
+ view = new DataView(bufferArray),
271
+ channels = [],
272
+ sampleRate = buffer.sampleRate,
273
+ offset = 0,
274
+ pos = 0;
275
 
276
+ setUint32(0x46464952); // "RIFF"
277
+ setUint32(length - 8);
278
+ setUint32(0x45564157); // "WAVE"
279
+ setUint32(0x20746d66); // "fmt " chunk
280
+ setUint32(16);
281
+ setUint16(1);
282
+ setUint16(numOfChan);
283
+ setUint32(sampleRate);
284
+ setUint32(sampleRate * 2 * numOfChan);
285
+ setUint16(numOfChan * 2);
286
+ setUint16(16);
287
+ setUint32(0x61746164);
288
+ setUint32(length - pos - 4);
289
 
290
+ for (let i = 0; i < buffer.numberOfChannels; i++) {
291
+ channels.push(buffer.getChannelData(i));
292
+ }
 
293
 
294
+ while (pos < length) {
295
+ for (let i = 0; i < numOfChan; i++) {
296
+ let sample = Math.max(-1, Math.min(1, channels[i][offset]));
297
+ sample = sample < 0 ? sample * 0x8000 : sample * 0x7FFF;
298
+ setUint16(sample);
299
  }
300
+ offset++;
301
+ }
302
+
303
+ function setUint16(data) {
304
+ view.setUint16(pos, data, true);
305
+ pos += 2;
306
+ }
307
+
308
+ function setUint32(data) {
309
+ view.setUint32(pos, data, true);
310
+ pos += 4;
311
+ }
312
+
313
+ return bufferArray;
314
+ }
315
+
316
  </script>
317
  </body>
318
  </html>