Tlanextli commited on
Commit
788669e
·
1 Parent(s): 722d68b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -4
app.py CHANGED
@@ -5,18 +5,29 @@ from transformers import pipeline
5
  pipeline = pipeline(task="automatic-speech-recognition", model="jonatasgrosman/wav2vec2-large-xlsr-53-german")
6
  #pipeline = pipeline(task="automatic-speech-recognition", model="openai/whisper-large")
7
 
8
- def transcribe(audio_path : str) -> str:
9
  transcription = pipeline(audio_path)
10
  return transcription["text"]
11
 
 
 
 
 
12
 
13
- demo = gr.Interface(
14
- fn=transcribe,
15
  inputs=gr.inputs.Audio(label="Upload audio file", type="filepath")
16
- # inputs=[gr.inputs.Audio(label="Upload audio file", type="filepath"), "microphone"],
17
  outputs="text"
18
  )
19
 
20
 
 
 
 
 
 
 
 
 
21
  if __name__ == "__main__":
22
  demo.launch()
 
5
  pipeline = pipeline(task="automatic-speech-recognition", model="jonatasgrosman/wav2vec2-large-xlsr-53-german")
6
  #pipeline = pipeline(task="automatic-speech-recognition", model="openai/whisper-large")
7
 
8
+ def transcribeFile(audio_path : str) -> str:
9
  transcription = pipeline(audio_path)
10
  return transcription["text"]
11
 
12
+ def transcribeMic(audio):
13
+ sr, data = audio
14
+ transcription = pipeline(data)
15
+ return transcription["text"]
16
 
17
+ app1 = gr.Interface(
18
+ fn=transcribeFile,
19
  inputs=gr.inputs.Audio(label="Upload audio file", type="filepath")
 
20
  outputs="text"
21
  )
22
 
23
 
24
+ app2 = gr.Interface(
25
+ fn=transcribeMic,
26
+ inputs="microphone",
27
+ outputs="text"
28
+ )
29
+
30
+ demo = gr.TabbedInterface([app1, app2], ["Audio File", "Microphone"])
31
+
32
  if __name__ == "__main__":
33
  demo.launch()