Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,25 +1,64 @@
|
|
1 |
import gradio as gr
|
2 |
import numpy as np
|
3 |
-
import time
|
4 |
|
5 |
-
def
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
ret = audio
|
11 |
-
else:
|
12 |
-
ret = (audio[0], np.concatenate((instream[1], audio[1])))
|
13 |
-
return ret, ret
|
14 |
|
15 |
with gr.Blocks() as demo:
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
|
24 |
if __name__ == "__main__":
|
25 |
-
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
import numpy as np
|
|
|
3 |
|
4 |
+
def process_audio(audio):
|
5 |
+
# This function will receive audio data from the client
|
6 |
+
# and can perform any server-side processing
|
7 |
+
# For this example, we'll just return the audio as-is
|
8 |
+
return audio
|
|
|
|
|
|
|
|
|
9 |
|
10 |
with gr.Blocks() as demo:
|
11 |
+
audio_input = gr.Audio(source="microphone", streaming=True, visible=False)
|
12 |
+
audio_output = gr.Audio(streaming=True, visible=False)
|
13 |
+
|
14 |
+
# Custom HTML for Web Audio API
|
15 |
+
html = gr.HTML("""
|
16 |
+
<button id="startButton">Start</button>
|
17 |
+
<button id="stopButton">Stop</button>
|
18 |
+
<div id="status"></div>
|
19 |
+
|
20 |
+
<script>
|
21 |
+
let audioContext;
|
22 |
+
let mediaStreamSource;
|
23 |
+
let processor;
|
24 |
+
let recording = false;
|
25 |
+
|
26 |
+
async function startRecording() {
|
27 |
+
audioContext = new (window.AudioContext || window.webkitAudioContext)();
|
28 |
+
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
|
29 |
+
mediaStreamSource = audioContext.createMediaStreamSource(stream);
|
30 |
+
|
31 |
+
processor = audioContext.createScriptProcessor(1024, 1, 1);
|
32 |
+
mediaStreamSource.connect(processor);
|
33 |
+
processor.connect(audioContext.destination);
|
34 |
+
|
35 |
+
processor.onaudioprocess = function(e) {
|
36 |
+
if (!recording) return;
|
37 |
+
const audioData = e.inputBuffer.getChannelData(0);
|
38 |
+
// Send audio data to the server
|
39 |
+
gradioApp().querySelector('#component-0').querySelector('input[type=file]').files = new FileList([new File([audioData], 'audio.wav', {type: 'audio/wav'})]);
|
40 |
+
gradioApp().querySelector('#component-0').querySelector('button[type=submit]').click();
|
41 |
+
};
|
42 |
+
|
43 |
+
recording = true;
|
44 |
+
document.getElementById('status').textContent = 'Recording...';
|
45 |
+
}
|
46 |
+
|
47 |
+
function stopRecording() {
|
48 |
+
if (processor) {
|
49 |
+
processor.disconnect();
|
50 |
+
mediaStreamSource.disconnect();
|
51 |
+
}
|
52 |
+
recording = false;
|
53 |
+
document.getElementById('status').textContent = 'Stopped';
|
54 |
+
}
|
55 |
+
|
56 |
+
document.getElementById('startButton').addEventListener('click', startRecording);
|
57 |
+
document.getElementById('stopButton').addEventListener('click', stopRecording);
|
58 |
+
</script>
|
59 |
+
""")
|
60 |
+
|
61 |
+
audio_input.stream(process_audio, inputs=audio_input, outputs=audio_output)
|
62 |
|
63 |
if __name__ == "__main__":
|
64 |
+
demo.launch()
|