Spaces:
Running
Running
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import os, sys
|
3 |
+
from colorama import Fore
|
4 |
+
|
5 |
+
now_dir = os.getcwd()
|
6 |
+
sys.path.append(now_dir)
|
7 |
+
|
8 |
+
|
9 |
+
# Function to detect the .pth and .index files
|
10 |
+
def detect_files(model_name):
|
11 |
+
model_dir = f"{now_dir}/assets/weights/{model_name}"
|
12 |
+
index_dir = f"{now_dir}/logs/{model_name}"
|
13 |
+
|
14 |
+
# Detect .pth file
|
15 |
+
model_pth_file = None
|
16 |
+
for file in os.listdir(model_dir):
|
17 |
+
if file.endswith(".pth"):
|
18 |
+
model_pth_file = os.path.join(model_dir, file)
|
19 |
+
break
|
20 |
+
|
21 |
+
# Detect .index file
|
22 |
+
index_file = None
|
23 |
+
for file in os.listdir(index_dir):
|
24 |
+
if file.endswith(".index"):
|
25 |
+
index_file = os.path.join(index_dir, file)
|
26 |
+
break
|
27 |
+
|
28 |
+
if model_pth_file and index_file:
|
29 |
+
return f"Model .pth file: {model_pth_file}\nIndex file: {index_file}"
|
30 |
+
else:
|
31 |
+
return "Model .pth or index file not found."
|
32 |
+
|
33 |
+
# Function to process the audio using the detected files
|
34 |
+
def process_audio(model_name, pitch, input_path, f0_method, save_as, index_rate, volume_normalization, consonant_protection):
|
35 |
+
model_dir = f"{now_dir}/assets/weights/{model_name}"
|
36 |
+
index_dir = f"{now_dir}/logs/{model_name}"
|
37 |
+
|
38 |
+
# Detect files
|
39 |
+
model_pth_file = None
|
40 |
+
index_file = None
|
41 |
+
for file in os.listdir(model_dir):
|
42 |
+
if file.endswith(".pth"):
|
43 |
+
model_pth_file = os.path.join(model_dir, file)
|
44 |
+
break
|
45 |
+
|
46 |
+
for file in os.listdir(index_dir):
|
47 |
+
if file.endswith(".index"):
|
48 |
+
index_file = os.path.join(index_dir, file)
|
49 |
+
break
|
50 |
+
|
51 |
+
if not model_pth_file or not index_file:
|
52 |
+
return "Model .pth or index file not found.", None
|
53 |
+
|
54 |
+
if not os.path.exists(input_path):
|
55 |
+
return f"{input_path} was not found in your RVC folder.", None
|
56 |
+
|
57 |
+
# Set environment variables for paths
|
58 |
+
os.environ['index_root'] = os.path.dirname(index_file)
|
59 |
+
index_path = os.path.basename(index_file)
|
60 |
+
os.environ['weight_root'] = os.path.dirname(model_pth_file)
|
61 |
+
|
62 |
+
# Remove any previous output
|
63 |
+
if os.path.exists(save_as):
|
64 |
+
os.remove(save_as)
|
65 |
+
|
66 |
+
# Execute the CLI command
|
67 |
+
os.system(f"python {now_dir}/tools/infer_cli.py --f0up_key {pitch} --input_path {input_path} --index_path {index_path} --f0method {f0_method} --opt_path {save_as} --model_name {model_name} --index_rate {index_rate} --device 'cuda:0' --is_half True --filter_radius 3 --resample_sr 0 --rms_mix_rate {volume_normalization} --protect {consonant_protection}")
|
68 |
+
|
69 |
+
if os.path.exists(save_as):
|
70 |
+
return "Processing complete. Here is your output audio:", save_as
|
71 |
+
else:
|
72 |
+
return "Error in processing audio.", None
|
73 |
+
|
74 |
+
# Gradio interface
|
75 |
+
with gr.Blocks() as demo:
|
76 |
+
gr.Markdown("# 🔊 **LISTEN TO YOUR MODEL**")
|
77 |
+
|
78 |
+
model_name = gr.Textbox(label="Model Name", value="Ren")
|
79 |
+
pitch = gr.Slider(minimum=-12, maximum=12, step=1, label="Pitch", value=0)
|
80 |
+
input_path = gr.Dropdown(label="",choices=show_available('audios'),value='',interactive=True)
|
81 |
+
f0_method = gr.Radio(choices=["rmvpe", "pm", "crepe"], label="F0 Method", value="rmvpe")
|
82 |
+
save_as = gr.Textbox(label="Save As", value="/content/RVC/audios/cli_output.wav")
|
83 |
+
index_rate = gr.Slider(minimum=0, maximum=1, step=0.01, label="Index Rate", value=0.5)
|
84 |
+
volume_normalization = gr.Slider(minimum=0, maximum=1, step=0.01, label="Volume Normalization", value=0)
|
85 |
+
consonant_protection = gr.Slider(minimum=0, maximum=1, step=0.01, label="Consonant Protection", value=0.5)
|
86 |
+
|
87 |
+
output_text = gr.Textbox(label="Output")
|
88 |
+
output_audio = gr.Audio(label="Processed Audio")
|
89 |
+
|
90 |
+
# Button to detect files
|
91 |
+
detect_btn = gr.Button("Detect Files")
|
92 |
+
detect_btn.click(fn=detect_files, inputs=[model_name], outputs=output_text)
|
93 |
+
|
94 |
+
# Button to process the audio and return audio output
|
95 |
+
submit_btn = gr.Button("Submit")
|
96 |
+
submit_btn.click(fn=process_audio,
|
97 |
+
inputs=[model_name, pitch, input_path, f0_method, save_as, index_rate, volume_normalization, consonant_protection],
|
98 |
+
outputs=[output_text, output_audio])
|
99 |
+
|
100 |
+
# Launch the app
|
101 |
+
demo.launch()
|