NeoPy commited on
Commit
a9ed33a
·
verified ·
1 Parent(s): 4da29a7

Update main/app/tabs/inference/inference.py

Browse files
main/app/tabs/inference/inference.py CHANGED
@@ -458,21 +458,17 @@ def inference_tabs():
458
  gr.Markdown(translations["convert_with_whisper_info"])
459
  with gr.Row():
460
  with gr.Column():
461
-
462
  with gr.Accordion(translations["model_accordion"] + " 1", open=True):
463
  with gr.Row():
464
  model_pth2 = gr.Dropdown(label=translations["model_name"], choices=model_name, value=model_name[0] if len(model_name) >= 1 else "", interactive=True, allow_custom_value=True)
465
  model_index2 = gr.Dropdown(label=translations["index_path"], choices=index_path, value=index_path[0] if len(index_path) >= 1 else "", interactive=True, allow_custom_value=True)
466
-
467
  refesh2 = gr.Button(translations["refesh"])
468
  with gr.Accordion(translations["model_accordion"] + " 2", open=True):
469
  with gr.Row():
470
  model_pth3 = gr.Dropdown(label=translations["model_name"], choices=model_name, value=model_name[0] if len(model_name) >= 1 else "", interactive=True, allow_custom_value=True)
471
  model_index3 = gr.Dropdown(label=translations["index_path"], choices=index_path, value=index_path[0] if len(index_path) >= 1 else "", interactive=True, allow_custom_value=True)
472
  refesh3 = gr.Button(translations["refesh"])
473
-
474
  with gr.Group():
475
-
476
  with gr.Row():
477
  cleaner2 = gr.Checkbox(label=translations["clear_audio"], value=False, interactive=True)
478
  autotune2 = gr.Checkbox(label=translations["autotune"], value=False, interactive=True)
@@ -485,9 +481,9 @@ def inference_tabs():
485
  convert_button3 = gr.Button(translations["convert_audio"], variant="primary")
486
  with gr.Row():
487
  with gr.Column():
488
- with gr.Row():
489
- pitch3 = gr.Slider(minimum=-20, maximum=20, step=1, info=translations["pitch_info"], label=translations["pitch"], value=0, interactive=True)
490
- index_strength2 = gr.Slider(label=translations["index_strength"], info=translations["index_strength_info"], minimum=0, maximum=1, value=0.5, step=0.01, interactive=True, visible=model_index2.value != "")
491
  with gr.Accordion(translations["input_output"], open=False):
492
  with gr.Column():
493
  export_format2 = gr.Radio(label=translations["export_format"], info=translations["export_info"], choices=["wav", "mp3", "flac", "ogg", "opus", "m4a", "mp4", "aac", "alac", "wma", "aiff", "webm", "ac3"], value="wav", interactive=True)
 
458
  gr.Markdown(translations["convert_with_whisper_info"])
459
  with gr.Row():
460
  with gr.Column():
 
461
  with gr.Accordion(translations["model_accordion"] + " 1", open=True):
462
  with gr.Row():
463
  model_pth2 = gr.Dropdown(label=translations["model_name"], choices=model_name, value=model_name[0] if len(model_name) >= 1 else "", interactive=True, allow_custom_value=True)
464
  model_index2 = gr.Dropdown(label=translations["index_path"], choices=index_path, value=index_path[0] if len(index_path) >= 1 else "", interactive=True, allow_custom_value=True)
 
465
  refesh2 = gr.Button(translations["refesh"])
466
  with gr.Accordion(translations["model_accordion"] + " 2", open=True):
467
  with gr.Row():
468
  model_pth3 = gr.Dropdown(label=translations["model_name"], choices=model_name, value=model_name[0] if len(model_name) >= 1 else "", interactive=True, allow_custom_value=True)
469
  model_index3 = gr.Dropdown(label=translations["index_path"], choices=index_path, value=index_path[0] if len(index_path) >= 1 else "", interactive=True, allow_custom_value=True)
470
  refesh3 = gr.Button(translations["refesh"])
 
471
  with gr.Group():
 
472
  with gr.Row():
473
  cleaner2 = gr.Checkbox(label=translations["clear_audio"], value=False, interactive=True)
474
  autotune2 = gr.Checkbox(label=translations["autotune"], value=False, interactive=True)
 
481
  convert_button3 = gr.Button(translations["convert_audio"], variant="primary")
482
  with gr.Row():
483
  with gr.Column():
484
+ with gr.Row():
485
+ pitch3 = gr.Slider(minimum=-20, maximum=20, step=1, info=translations["pitch_info"], label=translations["pitch"], value=0, interactive=True)
486
+ index_strength2 = gr.Slider(label=translations["index_strength"], info=translations["index_strength_info"], minimum=0, maximum=1, value=0.5, step=0.01, interactive=True, visible=model_index2.value != "")
487
  with gr.Accordion(translations["input_output"], open=False):
488
  with gr.Column():
489
  export_format2 = gr.Radio(label=translations["export_format"], info=translations["export_info"], choices=["wav", "mp3", "flac", "ogg", "opus", "m4a", "mp4", "aac", "alac", "wma", "aiff", "webm", "ac3"], value="wav", interactive=True)