T.Masuda commited on
Commit
8a11b0e
·
1 Parent(s): c4d1e20

update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -19,8 +19,8 @@ def speech_to_text(mic, upload, state):
19
  yield prediction['text']
20
 
21
  def tab_select(evt: gr.SelectData, state):
22
- state['active'] = evt.target.id
23
- print('select {}'.format(evt.target.id))
24
  return state
25
 
26
  with gr.Blocks(title='OpenAI Whisper Demo') as app:
@@ -32,11 +32,11 @@ with gr.Blocks(title='OpenAI Whisper Demo') as app:
32
  with gr.Row():
33
  with gr.Column():
34
  inputs = []
35
- with gr.Tab(label='microphone', id=0) as tab1:
36
  mic = gr.Audio(source='microphone', type='filepath')
37
  inputs.append(mic)
38
  tab1.select(tab_select, inputs=state, outputs=state)
39
- with gr.Tab(label='upload', id=1) as tab2:
40
  upload = gr.Audio(source='upload', type='filepath')
41
  inputs.append(upload)
42
  tab2.select(tab_select, inputs=state, outputs=state)
 
19
  yield prediction['text']
20
 
21
  def tab_select(evt: gr.SelectData, state):
22
+ state['active'] = evt.index
23
+ print('select {}'.format(evt.index))
24
  return state
25
 
26
  with gr.Blocks(title='OpenAI Whisper Demo') as app:
 
32
  with gr.Row():
33
  with gr.Column():
34
  inputs = []
35
+ with gr.Tab('microphone') as tab1:
36
  mic = gr.Audio(source='microphone', type='filepath')
37
  inputs.append(mic)
38
  tab1.select(tab_select, inputs=state, outputs=state)
39
+ with gr.Tab('upload') as tab2:
40
  upload = gr.Audio(source='upload', type='filepath')
41
  inputs.append(upload)
42
  tab2.select(tab_select, inputs=state, outputs=state)