akshansh36 commited on
Commit
28e5df5
·
verified ·
1 Parent(s): c8fa60c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +57 -18
app.py CHANGED
@@ -1,25 +1,64 @@
1
  import gradio as gr
2
  import numpy as np
3
- import time
4
 
5
- def add_to_stream(audio, instream):
6
- time.sleep(1)
7
- if audio is None:
8
- return gr.update(), instream
9
- if instream is None:
10
- ret = audio
11
- else:
12
- ret = (audio[0], np.concatenate((instream[1], audio[1])))
13
- return ret, ret
14
 
15
  with gr.Blocks() as demo:
16
- inp = gr.Audio(source="microphone", streaming=True) # Enable streaming mode
17
- out = gr.Audio(streaming=True) # Enable streaming output
18
- stream = gr.State()
19
- clear = gr.Button("Clear")
20
-
21
- inp.stream(add_to_stream, [inp, stream], [out, stream])
22
- clear.click(lambda: [None, None, None], None, [inp, out, stream])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
  if __name__ == "__main__":
25
- demo.launch()
 
1
  import gradio as gr
2
  import numpy as np
 
3
 
4
+ def process_audio(audio):
5
+ # This function will receive audio data from the client
6
+ # and can perform any server-side processing
7
+ # For this example, we'll just return the audio as-is
8
+ return audio
 
 
 
 
9
 
10
  with gr.Blocks() as demo:
11
+ audio_input = gr.Audio(source="microphone", streaming=True, visible=False)
12
+ audio_output = gr.Audio(streaming=True, visible=False)
13
+
14
+ # Custom HTML for Web Audio API
15
+ html = gr.HTML("""
16
+ <button id="startButton">Start</button>
17
+ <button id="stopButton">Stop</button>
18
+ <div id="status"></div>
19
+
20
+ <script>
21
+ let audioContext;
22
+ let mediaStreamSource;
23
+ let processor;
24
+ let recording = false;
25
+
26
+ async function startRecording() {
27
+ audioContext = new (window.AudioContext || window.webkitAudioContext)();
28
+ const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
29
+ mediaStreamSource = audioContext.createMediaStreamSource(stream);
30
+
31
+ processor = audioContext.createScriptProcessor(1024, 1, 1);
32
+ mediaStreamSource.connect(processor);
33
+ processor.connect(audioContext.destination);
34
+
35
+ processor.onaudioprocess = function(e) {
36
+ if (!recording) return;
37
+ const audioData = e.inputBuffer.getChannelData(0);
38
+ // Send audio data to the server
39
+ gradioApp().querySelector('#component-0').querySelector('input[type=file]').files = new FileList([new File([audioData], 'audio.wav', {type: 'audio/wav'})]);
40
+ gradioApp().querySelector('#component-0').querySelector('button[type=submit]').click();
41
+ };
42
+
43
+ recording = true;
44
+ document.getElementById('status').textContent = 'Recording...';
45
+ }
46
+
47
+ function stopRecording() {
48
+ if (processor) {
49
+ processor.disconnect();
50
+ mediaStreamSource.disconnect();
51
+ }
52
+ recording = false;
53
+ document.getElementById('status').textContent = 'Stopped';
54
+ }
55
+
56
+ document.getElementById('startButton').addEventListener('click', startRecording);
57
+ document.getElementById('stopButton').addEventListener('click', stopRecording);
58
+ </script>
59
+ """)
60
+
61
+ audio_input.stream(process_audio, inputs=audio_input, outputs=audio_output)
62
 
63
  if __name__ == "__main__":
64
+ demo.launch()