Spaces:
Running
Running
Update main/app/tabs/inference/inference.py
Browse files
main/app/tabs/inference/inference.py
CHANGED
@@ -16,7 +16,7 @@ def inference_tabs():
|
|
16 |
with gr.Row():
|
17 |
model_pth = gr.Dropdown(label=translations["model_name"], choices=model_name, value=model_name[0] if len(model_name) >= 1 else "", interactive=True, allow_custom_value=True)
|
18 |
model_index = gr.Dropdown(label=translations["index_path"], choices=index_path, value=index_path[0] if len(index_path) >= 1 else "", interactive=True, allow_custom_value=True)
|
19 |
-
|
20 |
|
21 |
with gr.Row():
|
22 |
with gr.Column():
|
@@ -312,6 +312,7 @@ def inference_tabs():
|
|
312 |
with gr.Row():
|
313 |
model_pth0 = gr.Dropdown(label=translations["model_name"], choices=model_name, value=model_name[0] if len(model_name) >= 1 else "", interactive=True, allow_custom_value=True)
|
314 |
model_index0 = gr.Dropdown(label=translations["index_path"], choices=index_path, value=index_path[0] if len(index_path) >= 1 else "", interactive=True, allow_custom_value=True)
|
|
|
315 |
|
316 |
with gr.Row():
|
317 |
with gr.Column():
|
@@ -332,7 +333,7 @@ def inference_tabs():
|
|
332 |
tts_voice = gr.Dropdown(label=translations["voice"], choices=edgetts, interactive=True, value="vi-VN-NamMinhNeural")
|
333 |
tts_pitch = gr.Slider(minimum=-20, maximum=20, step=1, info=translations["pitch_info_2"], label=translations["pitch"], value=0, interactive=True)
|
334 |
with gr.Column():
|
335 |
-
|
336 |
with gr.Row():
|
337 |
index_strength0 = gr.Slider(label=translations["index_strength"], info=translations["index_strength_info"], minimum=0, maximum=1, value=0.5, step=0.01, interactive=True, visible=model_index0.value != "")
|
338 |
with gr.Accordion(translations["output_path"], open=False):
|
@@ -457,7 +458,21 @@ def inference_tabs():
|
|
457 |
gr.Markdown(translations["convert_with_whisper_info"])
|
458 |
with gr.Row():
|
459 |
with gr.Column():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
460 |
with gr.Group():
|
|
|
461 |
with gr.Row():
|
462 |
cleaner2 = gr.Checkbox(label=translations["clear_audio"], value=False, interactive=True)
|
463 |
autotune2 = gr.Checkbox(label=translations["autotune"], value=False, interactive=True)
|
@@ -470,12 +485,7 @@ def inference_tabs():
|
|
470 |
convert_button3 = gr.Button(translations["convert_audio"], variant="primary")
|
471 |
with gr.Row():
|
472 |
with gr.Column():
|
473 |
-
|
474 |
-
with gr.Row():
|
475 |
-
model_pth2 = gr.Dropdown(label=translations["model_name"], choices=model_name, value=model_name[0] if len(model_name) >= 1 else "", interactive=True, allow_custom_value=True)
|
476 |
-
model_index2 = gr.Dropdown(label=translations["index_path"], choices=index_path, value=index_path[0] if len(index_path) >= 1 else "", interactive=True, allow_custom_value=True)
|
477 |
-
with gr.Row():
|
478 |
-
refesh2 = gr.Button(translations["refesh"])
|
479 |
with gr.Row():
|
480 |
pitch3 = gr.Slider(minimum=-20, maximum=20, step=1, info=translations["pitch_info"], label=translations["pitch"], value=0, interactive=True)
|
481 |
index_strength2 = gr.Slider(label=translations["index_strength"], info=translations["index_strength_info"], minimum=0, maximum=1, value=0.5, step=0.01, interactive=True, visible=model_index2.value != "")
|
@@ -489,12 +499,7 @@ def inference_tabs():
|
|
489 |
with gr.Row():
|
490 |
input2 = gr.File(label=translations["drop_audio"], file_types=[".wav", ".mp3", ".flac", ".ogg", ".opus", ".m4a", ".mp4", ".aac", ".alac", ".wma", ".aiff", ".webm", ".ac3"])
|
491 |
with gr.Column():
|
492 |
-
|
493 |
-
with gr.Row():
|
494 |
-
model_pth3 = gr.Dropdown(label=translations["model_name"], choices=model_name, value=model_name[0] if len(model_name) >= 1 else "", interactive=True, allow_custom_value=True)
|
495 |
-
model_index3 = gr.Dropdown(label=translations["index_path"], choices=index_path, value=index_path[0] if len(index_path) >= 1 else "", interactive=True, allow_custom_value=True)
|
496 |
-
with gr.Row():
|
497 |
-
refesh3 = gr.Button(translations["refesh"])
|
498 |
with gr.Row():
|
499 |
pitch4 = gr.Slider(minimum=-20, maximum=20, step=1, info=translations["pitch_info"], label=translations["pitch"], value=0, interactive=True)
|
500 |
index_strength3 = gr.Slider(label=translations["index_strength"], info=translations["index_strength_info"], minimum=0, maximum=1, value=0.5, step=0.01, interactive=True, visible=model_index3.value != "")
|
|
|
16 |
with gr.Row():
|
17 |
model_pth = gr.Dropdown(label=translations["model_name"], choices=model_name, value=model_name[0] if len(model_name) >= 1 else "", interactive=True, allow_custom_value=True)
|
18 |
model_index = gr.Dropdown(label=translations["index_path"], choices=index_path, value=index_path[0] if len(index_path) >= 1 else "", interactive=True, allow_custom_value=True)
|
19 |
+
refesh = gr.Button(translations["refesh"])
|
20 |
|
21 |
with gr.Row():
|
22 |
with gr.Column():
|
|
|
312 |
with gr.Row():
|
313 |
model_pth0 = gr.Dropdown(label=translations["model_name"], choices=model_name, value=model_name[0] if len(model_name) >= 1 else "", interactive=True, allow_custom_value=True)
|
314 |
model_index0 = gr.Dropdown(label=translations["index_path"], choices=index_path, value=index_path[0] if len(index_path) >= 1 else "", interactive=True, allow_custom_value=True)
|
315 |
+
refesh1 = gr.Button(translations["refesh"])
|
316 |
|
317 |
with gr.Row():
|
318 |
with gr.Column():
|
|
|
333 |
tts_voice = gr.Dropdown(label=translations["voice"], choices=edgetts, interactive=True, value="vi-VN-NamMinhNeural")
|
334 |
tts_pitch = gr.Slider(minimum=-20, maximum=20, step=1, info=translations["pitch_info_2"], label=translations["pitch"], value=0, interactive=True)
|
335 |
with gr.Column():
|
336 |
+
|
337 |
with gr.Row():
|
338 |
index_strength0 = gr.Slider(label=translations["index_strength"], info=translations["index_strength_info"], minimum=0, maximum=1, value=0.5, step=0.01, interactive=True, visible=model_index0.value != "")
|
339 |
with gr.Accordion(translations["output_path"], open=False):
|
|
|
458 |
gr.Markdown(translations["convert_with_whisper_info"])
|
459 |
with gr.Row():
|
460 |
with gr.Column():
|
461 |
+
|
462 |
+
with gr.Accordion(translations["model_accordion"] + " 1", open=True):
|
463 |
+
with gr.Row():
|
464 |
+
model_pth2 = gr.Dropdown(label=translations["model_name"], choices=model_name, value=model_name[0] if len(model_name) >= 1 else "", interactive=True, allow_custom_value=True)
|
465 |
+
model_index2 = gr.Dropdown(label=translations["index_path"], choices=index_path, value=index_path[0] if len(index_path) >= 1 else "", interactive=True, allow_custom_value=True)
|
466 |
+
|
467 |
+
refesh2 = gr.Button(translations["refesh"])
|
468 |
+
with gr.Accordion(translations["model_accordion"] + " 2", open=True):
|
469 |
+
with gr.Row():
|
470 |
+
model_pth3 = gr.Dropdown(label=translations["model_name"], choices=model_name, value=model_name[0] if len(model_name) >= 1 else "", interactive=True, allow_custom_value=True)
|
471 |
+
model_index3 = gr.Dropdown(label=translations["index_path"], choices=index_path, value=index_path[0] if len(index_path) >= 1 else "", interactive=True, allow_custom_value=True)
|
472 |
+
refesh3 = gr.Button(translations["refesh"])
|
473 |
+
|
474 |
with gr.Group():
|
475 |
+
|
476 |
with gr.Row():
|
477 |
cleaner2 = gr.Checkbox(label=translations["clear_audio"], value=False, interactive=True)
|
478 |
autotune2 = gr.Checkbox(label=translations["autotune"], value=False, interactive=True)
|
|
|
485 |
convert_button3 = gr.Button(translations["convert_audio"], variant="primary")
|
486 |
with gr.Row():
|
487 |
with gr.Column():
|
488 |
+
|
|
|
|
|
|
|
|
|
|
|
489 |
with gr.Row():
|
490 |
pitch3 = gr.Slider(minimum=-20, maximum=20, step=1, info=translations["pitch_info"], label=translations["pitch"], value=0, interactive=True)
|
491 |
index_strength2 = gr.Slider(label=translations["index_strength"], info=translations["index_strength_info"], minimum=0, maximum=1, value=0.5, step=0.01, interactive=True, visible=model_index2.value != "")
|
|
|
499 |
with gr.Row():
|
500 |
input2 = gr.File(label=translations["drop_audio"], file_types=[".wav", ".mp3", ".flac", ".ogg", ".opus", ".m4a", ".mp4", ".aac", ".alac", ".wma", ".aiff", ".webm", ".ac3"])
|
501 |
with gr.Column():
|
502 |
+
|
|
|
|
|
|
|
|
|
|
|
503 |
with gr.Row():
|
504 |
pitch4 = gr.Slider(minimum=-20, maximum=20, step=1, info=translations["pitch_info"], label=translations["pitch"], value=0, interactive=True)
|
505 |
index_strength3 = gr.Slider(label=translations["index_strength"], info=translations["index_strength_info"], minimum=0, maximum=1, value=0.5, step=0.01, interactive=True, visible=model_index3.value != "")
|