TomoHiro123 commited on
Commit
952954d
·
verified ·
1 Parent(s): 2dd4a69

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -0
app.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import pipeline
2
+ import gradio as gr
3
+
4
+ model = pipeline(model="openai/whisper-base")
5
+ en_jp_translator = pipeline("translation", model="Helsinki-NLP/opus-mt-en-jap")
6
+
7
+ # "automatic-speech-recognition"
8
+ # transcriber = pipeline(model="openai/whisper-base")
9
+ # transcriber("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/1.flac")
10
+
11
+
12
+ def transcribe_audio(mic=None, file=None):
13
+ if mic is not None:
14
+ audio = mic
15
+ elif file is not None:
16
+ audio = file
17
+ else:
18
+ return "You must either provide a mic recording or a file"
19
+ transcription = model(audio)["text"]
20
+ return transcription
21
+
22
+ def translate_text(transcription):
23
+ return en_jp_translator(transcription)[0]["translation_text"]
24
+
25
+ def combined_function(b):
26
+ transcribe_audio(inputs=audio_file, outputs=text)
27
+ translate_text(inputs=text, outputs=translate)
28
+
29
+ demo = gr.Blocks()
30
+
31
+ with demo:
32
+ audio_file = gr.Audio(type="filepath")
33
+ text = gr.Textbox()
34
+ translate = gr.Textbox()
35
+
36
+ # b1 = gr.Button("Recognize Speech & Translate")
37
+ b1 = gr.Button("Recognize Speech")
38
+ b2 = gr.Button("Translate")
39
+
40
+ # b1.click(combined_function)
41
+ b1.click(transcribe_audio, inputs=audio_file, outputs=text)
42
+ b2.click(translate_text, inputs=text, outputs= translate)
43
+
44
+ demo.launch()