cstr commited on
Commit
ae4be5c
·
verified ·
1 Parent(s): d5bbd76

update model selector

Browse files
Files changed (1) hide show
  1. app.py +15 -5
app.py CHANGED
@@ -166,9 +166,6 @@ def get_model_options(pipeline_type):
166
  return ["openai/whisper-large-v3", "openai/whisper-large-v3-turbo", "primeline/whisper-large-v3-german"]
167
  else:
168
  return []
169
-
170
- def update_model_dropdown(pipeline_type):
171
- return gr.Dropdown.update(choices=get_model_options(pipeline_type), value=get_model_options(pipeline_type)[0])
172
 
173
  def transcribe_audio(input_source, pipeline_type, model_id, dtype, batch_size, download_method, start_time=None, end_time=None, verbose=False):
174
  try:
@@ -259,7 +256,20 @@ def transcribe_audio(input_source, pipeline_type, model_id, dtype, batch_size, d
259
  os.remove(trimmed_audio_path)
260
  except:
261
  pass
262
-
 
 
 
 
 
 
 
 
 
 
 
 
 
263
  with gr.Blocks() as iface:
264
  gr.Markdown("# Multi-Pipeline Transcription")
265
  gr.Markdown("Transcribe audio using multiple pipelines and models.")
@@ -286,7 +296,7 @@ with gr.Blocks() as iface:
286
  transcription_output = gr.Textbox(label="Transcription", lines=10)
287
  transcription_file = gr.File(label="Download Transcription")
288
 
289
- pipeline_type.change(update_model_dropdown, inputs=[pipeline_type], outputs=[model_id])
290
 
291
  transcribe_button.click(
292
  transcribe_audio,
 
166
  return ["openai/whisper-large-v3", "openai/whisper-large-v3-turbo", "primeline/whisper-large-v3-german"]
167
  else:
168
  return []
 
 
 
169
 
170
  def transcribe_audio(input_source, pipeline_type, model_id, dtype, batch_size, download_method, start_time=None, end_time=None, verbose=False):
171
  try:
 
256
  os.remove(trimmed_audio_path)
257
  except:
258
  pass
259
+
260
+ def update_model_dropdown(pipeline_type):
261
+ model_choices = get_model_options(pipeline_type)
262
+ return gr.Dropdown.update(choices=model_choices, value=model_choices[0])
263
+
264
+ def get_model_options(pipeline_type):
265
+ if pipeline_type == "faster-batched":
266
+ return ["cstr/whisper-large-v3-turbo-int8_float32"]
267
+ elif pipeline_type == "faster-sequenced":
268
+ return ["deepdml/faster-whisper-large-v3-turbo-ct2"]
269
+ elif pipeline_type == "transformers":
270
+ return ["openai/whisper-large-v3"]
271
+ return []
272
+
273
  with gr.Blocks() as iface:
274
  gr.Markdown("# Multi-Pipeline Transcription")
275
  gr.Markdown("Transcribe audio using multiple pipelines and models.")
 
296
  transcription_output = gr.Textbox(label="Transcription", lines=10)
297
  transcription_file = gr.File(label="Download Transcription")
298
 
299
+ pipeline_type.change(update_model_dropdown, inputs=pipeline_type, outputs=model_id)
300
 
301
  transcribe_button.click(
302
  transcribe_audio,