alan commited on
Commit
3d952d8
·
1 Parent(s): cb5b6f4

disabling examples

Browse files
Files changed (2) hide show
  1. app.py +96 -93
  2. packages.txt +1 -0
app.py CHANGED
@@ -301,85 +301,85 @@ with gr.Blocks(css="style.css") as demo:
301
  )
302
  output_text = gr.Textbox(label="Translated text")
303
 
304
- with gr.Row(visible=True) as s2st_example_row:
305
- s2st_examples = gr.Examples(
306
- examples=[
307
- ["assets/sample_input.mp3", "French"],
308
- ["assets/sample_input.mp3", "Mandarin Chinese"],
309
- ["assets/sample_input_2.mp3", "Hindi"],
310
- ["assets/sample_input_2.mp3", "Spanish"],
311
- ],
312
- inputs=[input_audio_file, target_language],
313
- outputs=[output_audio, output_text],
314
- fn=process_s2st_example,
315
- cache_examples=CACHE_EXAMPLES,
316
- )
317
- with gr.Row(visible=False) as s2tt_example_row:
318
- s2tt_examples = gr.Examples(
319
- examples=[
320
- ["assets/sample_input.mp3", "French"],
321
- ["assets/sample_input.mp3", "Mandarin Chinese"],
322
- ["assets/sample_input_2.mp3", "Hindi"],
323
- ["assets/sample_input_2.mp3", "Spanish"],
324
- ],
325
- inputs=[input_audio_file, target_language],
326
- outputs=[output_audio, output_text],
327
- fn=process_s2tt_example,
328
- cache_examples=CACHE_EXAMPLES,
329
- )
330
- with gr.Row(visible=False) as t2st_example_row:
331
- t2st_examples = gr.Examples(
332
- examples=[
333
- ["My favorite animal is the elephant.", "English", "French"],
334
- ["My favorite animal is the elephant.", "English", "Mandarin Chinese"],
335
- [
336
- "Meta AI's Seamless M4T model is democratising spoken communication across language barriers",
337
- "English",
338
- "Hindi",
339
- ],
340
- [
341
- "Meta AI's Seamless M4T model is democratising spoken communication across language barriers",
342
- "English",
343
- "Spanish",
344
- ],
345
- ],
346
- inputs=[input_text, source_language, target_language],
347
- outputs=[output_audio, output_text],
348
- fn=process_t2st_example,
349
- cache_examples=CACHE_EXAMPLES,
350
- )
351
- with gr.Row(visible=False) as t2tt_example_row:
352
- t2tt_examples = gr.Examples(
353
- examples=[
354
- ["My favorite animal is the elephant.", "English", "French"],
355
- ["My favorite animal is the elephant.", "English", "Mandarin Chinese"],
356
- [
357
- "Meta AI's Seamless M4T model is democratising spoken communication across language barriers",
358
- "English",
359
- "Hindi",
360
- ],
361
- [
362
- "Meta AI's Seamless M4T model is democratising spoken communication across language barriers",
363
- "English",
364
- "Spanish",
365
- ],
366
- ],
367
- inputs=[input_text, source_language, target_language],
368
- outputs=[output_audio, output_text],
369
- fn=process_t2tt_example,
370
- cache_examples=CACHE_EXAMPLES,
371
- )
372
- with gr.Row(visible=False) as asr_example_row:
373
- asr_examples = gr.Examples(
374
- examples=[
375
- ["assets/sample_input.mp3", "English"],
376
- ["assets/sample_input_2.mp3", "English"],
377
- ],
378
- inputs=[input_audio_file, target_language],
379
- outputs=[output_audio, output_text],
380
- fn=process_asr_example,
381
- cache_examples=CACHE_EXAMPLES,
382
- )
383
 
384
  audio_source.change(
385
  fn=update_audio_ui,
@@ -408,19 +408,20 @@ with gr.Blocks(css="style.css") as demo:
408
  outputs=[output_audio, output_text],
409
  queue=False,
410
  api_name=False,
411
- ).then(
412
- fn=update_example_ui,
413
- inputs=task_name,
414
- outputs=[
415
- s2st_example_row,
416
- s2tt_example_row,
417
- t2st_example_row,
418
- t2tt_example_row,
419
- asr_example_row,
420
- ],
421
- queue=False,
422
- api_name=False,
423
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
424
 
425
  btn.click(
426
  fn=predict,
@@ -436,8 +437,10 @@ with gr.Blocks(css="style.css") as demo:
436
  outputs=[output_audio, output_text],
437
  api_name="run",
438
  )
439
- # demo.launch()
440
- demo.queue(max_size=50).launch()
 
 
441
 
442
  # Linking models to the space
443
  # 'facebook/seamless-m4t-large'
 
301
  )
302
  output_text = gr.Textbox(label="Translated text")
303
 
304
+ # with gr.Row(visible=True) as s2st_example_row:
305
+ # s2st_examples = gr.Examples(
306
+ # examples=[
307
+ # ["assets/sample_input.mp3", "French"],
308
+ # ["assets/sample_input.mp3", "Mandarin Chinese"],
309
+ # ["assets/sample_input_2.mp3", "Hindi"],
310
+ # ["assets/sample_input_2.mp3", "Spanish"],
311
+ # ],
312
+ # inputs=[input_audio_file, target_language],
313
+ # outputs=[output_audio, output_text],
314
+ # fn=process_s2st_example,
315
+ # cache_examples=CACHE_EXAMPLES,
316
+ # )
317
+ # with gr.Row(visible=False) as s2tt_example_row:
318
+ # s2tt_examples = gr.Examples(
319
+ # examples=[
320
+ # ["assets/sample_input.mp3", "French"],
321
+ # ["assets/sample_input.mp3", "Mandarin Chinese"],
322
+ # ["assets/sample_input_2.mp3", "Hindi"],
323
+ # ["assets/sample_input_2.mp3", "Spanish"],
324
+ # ],
325
+ # inputs=[input_audio_file, target_language],
326
+ # outputs=[output_audio, output_text],
327
+ # fn=process_s2tt_example,
328
+ # cache_examples=CACHE_EXAMPLES,
329
+ # )
330
+ # with gr.Row(visible=False) as t2st_example_row:
331
+ # t2st_examples = gr.Examples(
332
+ # examples=[
333
+ # ["My favorite animal is the elephant.", "English", "French"],
334
+ # ["My favorite animal is the elephant.", "English", "Mandarin Chinese"],
335
+ # [
336
+ # "Meta AI's Seamless M4T model is democratising spoken communication across language barriers",
337
+ # "English",
338
+ # "Hindi",
339
+ # ],
340
+ # [
341
+ # "Meta AI's Seamless M4T model is democratising spoken communication across language barriers",
342
+ # "English",
343
+ # "Spanish",
344
+ # ],
345
+ # ],
346
+ # inputs=[input_text, source_language, target_language],
347
+ # outputs=[output_audio, output_text],
348
+ # fn=process_t2st_example,
349
+ # cache_examples=CACHE_EXAMPLES,
350
+ # )
351
+ # with gr.Row(visible=False) as t2tt_example_row:
352
+ # t2tt_examples = gr.Examples(
353
+ # examples=[
354
+ # ["My favorite animal is the elephant.", "English", "French"],
355
+ # ["My favorite animal is the elephant.", "English", "Mandarin Chinese"],
356
+ # [
357
+ # "Meta AI's Seamless M4T model is democratising spoken communication across language barriers",
358
+ # "English",
359
+ # "Hindi",
360
+ # ],
361
+ # [
362
+ # "Meta AI's Seamless M4T model is democratising spoken communication across language barriers",
363
+ # "English",
364
+ # "Spanish",
365
+ # ],
366
+ # ],
367
+ # inputs=[input_text, source_language, target_language],
368
+ # outputs=[output_audio, output_text],
369
+ # fn=process_t2tt_example,
370
+ # cache_examples=CACHE_EXAMPLES,
371
+ # )
372
+ # with gr.Row(visible=False) as asr_example_row:
373
+ # asr_examples = gr.Examples(
374
+ # examples=[
375
+ # ["assets/sample_input.mp3", "English"],
376
+ # ["assets/sample_input_2.mp3", "English"],
377
+ # ],
378
+ # inputs=[input_audio_file, target_language],
379
+ # outputs=[output_audio, output_text],
380
+ # fn=process_asr_example,
381
+ # cache_examples=CACHE_EXAMPLES,
382
+ # )
383
 
384
  audio_source.change(
385
  fn=update_audio_ui,
 
408
  outputs=[output_audio, output_text],
409
  queue=False,
410
  api_name=False,
 
 
 
 
 
 
 
 
 
 
 
 
411
  )
412
+ # .then(
413
+ # fn=update_example_ui,
414
+ # inputs=task_name,
415
+ # outputs=[
416
+ # s2st_example_row,
417
+ # s2tt_example_row,
418
+ # t2st_example_row,
419
+ # t2tt_example_row,
420
+ # asr_example_row,
421
+ # ],
422
+ # queue=False,
423
+ # api_name=False,
424
+ # )
425
 
426
  btn.click(
427
  fn=predict,
 
437
  outputs=[output_audio, output_text],
438
  api_name="run",
439
  )
440
+
441
+ if __name__ == "__main__":
442
+ demo.launch()
443
+ # demo.queue(max_size=50).launch()
444
 
445
  # Linking models to the space
446
  # 'facebook/seamless-m4t-large'
packages.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ ffmpeg