import gradio as gr import os from gradio_client import Client link = os.environ['link'] client = Client(link) def s2t(source, mic, fi, lang): if source == 'file': in_file = fi else: in_file = mic result = client.predict( in_file, # str (filepath or URL to file) in 'inputs' Audio component "transcribe", # str in 'Task' Radio component True, # bool in 'Return timestamps' Checkbox component api_name="/predict" ) print(result) return result def update_audio_ui(audio_source: str, input_audio_mic, input_audio_file): mic = audio_source == "microphone" # input_audio_mic.visible = mic # print(type(gr.update(visible=mic, value=None))) return gr.update(visible=mic, value=None), gr.update(visible=not mic, value=None) with gr.Blocks() as demo: gr.Markdown('

Speech to Text

') with gr.Group(): with gr.Row() as audio_box: with gr.Column(): input_lang = gr.Dropdown(['auto','vi', 'ja', 'en-us', 'cn', 'ko'], label='Language?', value='auto', interactive=True) audio_source = gr.Radio( label="Audio source", choices=["file", "microphone"], value="file", interactive=True ) input_audio_mic = gr.Audio( label="Input speech", type="filepath", sources="microphone", visible=False, ) input_audio_file = gr.Audio( label="Input speech", type="filepath", sources="upload", visible=True, ) js = gr.JSON(label="json") with gr.Row(): btn = gr.Button("Run") btn_clean = gr.ClearButton([input_audio_mic, input_audio_file, js]) audio_source.change( fn=update_audio_ui, inputs=[audio_source, input_audio_mic, input_audio_file], outputs=[ input_audio_mic, input_audio_file, ], queue=False, api_name=False, ) btn.click(fn=s2t, inputs=[audio_source, input_audio_mic, input_audio_file, input_lang], outputs=[js]) if __name__ == "__main__": demo.launch()