|
import gradio as gr |
|
import os |
|
|
|
from gradio_client import Client |
|
|
|
link = os.environ['link'] |
|
client = Client(link) |
|
|
|
|
|
def s2t(source, mic, fi, lang): |
|
if source == 'file': |
|
in_file = fi |
|
else: |
|
in_file = mic |
|
|
|
result = client.predict( |
|
in_file, |
|
"transcribe", |
|
True, |
|
api_name="/predict" |
|
) |
|
print(result) |
|
return result |
|
|
|
|
|
def update_audio_ui(audio_source: str, input_audio_mic, input_audio_file): |
|
mic = audio_source == "microphone" |
|
|
|
|
|
return gr.update(visible=mic, value=None), gr.update(visible=not mic, value=None) |
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown('<h1 style="text-align: center;">Speech to Text</h1>') |
|
with gr.Group(): |
|
with gr.Row() as audio_box: |
|
with gr.Column(): |
|
input_lang = gr.Dropdown(['auto','vi', 'ja', 'en-us', 'cn', 'ko'], label='Language?', value='auto', interactive=True) |
|
audio_source = gr.Radio( |
|
label="Audio source", |
|
choices=["file", "microphone"], |
|
value="file", |
|
interactive=True |
|
) |
|
input_audio_mic = gr.Audio( |
|
label="Input speech", |
|
type="filepath", |
|
sources="microphone", |
|
visible=False, |
|
) |
|
input_audio_file = gr.Audio( |
|
label="Input speech", |
|
type="filepath", |
|
sources="upload", |
|
visible=True, |
|
) |
|
js = gr.JSON(label="json") |
|
with gr.Row(): |
|
btn = gr.Button("Run") |
|
btn_clean = gr.ClearButton([input_audio_mic, input_audio_file, js]) |
|
|
|
audio_source.change( |
|
fn=update_audio_ui, |
|
inputs=[audio_source, input_audio_mic, input_audio_file], |
|
outputs=[ |
|
input_audio_mic, |
|
input_audio_file, |
|
], |
|
queue=False, |
|
api_name=False, |
|
) |
|
|
|
btn.click(fn=s2t, inputs=[audio_source, input_audio_mic, input_audio_file, input_lang], outputs=[js]) |
|
|
|
if __name__ == "__main__": |
|
demo.launch() |