Blane187 commited on
Commit
a622183
1 Parent(s): 951072c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +312 -19
app.py CHANGED
@@ -1,12 +1,306 @@
1
  import gradio as gr
2
  from web import *
3
 
4
- with gr.Blocks(title="RVC UI") as app:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  gr.Markdown("<center><h1> RVC UI 🗣️")
6
  gr.Markdown("<h1>this ui not done yet!")
7
  with gr.Tabs():
8
  with gr.TabItem("Single inference"):
9
- models = gr.Dropdown(label="voice model", choices=sorted(names))
10
  with gr.Row():
11
  clean_button = gr.Button("Refresh model", variant="primary")
12
  with gr.Row():
@@ -26,10 +320,9 @@ with gr.Blocks(title="RVC UI") as app:
26
  protect0 = gr.Slider(minimum=0, maximum=0.5, label="Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy", value=0.33, step=0.01, interactive=True)
27
  filter_radius0 = gr.Slider(minimum=0, maximum=7, label="If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness.", value=3, step=1, interactive=True)
28
  f0_file = gr.File(label="F0 curve file (optional). One pitch per line. Replaces the default F0 and pitch modulation", visible=False)
29
-
30
  vc_output1 = gr.Textbox(label="Output information", interactive=False)
31
  vc_output2 = gr.Audio(label="Export audio (click on the three dots in the lower right corner to download)", type="filepath", interactive=False)
32
- #refresh_button.click(fn=change_choices, inputs=[], outputs=[models, file_index2], api_name="infer_refresh")
33
  with gr.TabItem("Batch inference"):
34
  gr.Markdown("<center>Batch conversion\n. Enter the folder containing the audio files to be converted or upload multiple audio files. The converted audio will be output in the specified folder (default: 'opt').")
35
  vc_transform1 = gr.Number(label="Transpose (integer, number of semitones, raise by an octave: 12, lower by an octave: -12)", value=0)
@@ -38,7 +331,7 @@ with gr.Blocks(title="RVC UI") as app:
38
  opt_input = gr.Textbox(label="Specify output folder", value="opt")
39
  file_index4 = gr.Dropdown(label="Auto-detect index path and select from the dropdown", choices=sorted(index_paths), interactive=True)
40
  file_index3 = gr.File(label="Path to the feature index file. Leave blank to use the selected result from the dropdown")
41
- #refresh_button.click(fn=lambda: change_choices()[1], inputs=[], outputs=file_index4, api_name="infer_refresh_batch")
42
  f0method1 = gr.Radio(label="Select the pitch extraction algorithm ('pm': faster extraction but lower-quality speech; 'harvest': better bass but extremely slow; 'crepe': better quality but GPU intensive), 'rmvpe': best quality, and little GPU requirement", choices=["pm", "harvest", "crepe", "rmvpe", "fcpe"], value="rmvpe", interactive=True)
43
  resample_sr1 = gr.Slider(minimum=0, maximum=48000, label="Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling", value=0, step=1, interactive=True)
44
  rms_mix_rate1 = gr.Slider(minimum=0, maximum=1, label="Adjust the volume envelope scaling. Closer to 0, the more it mimicks the volume of the original vocals. Can help mask noise and make volume sound more natural when set relatively low. Closer to 1 will be more of a consistently loud volume", value=1, interactive=True)
@@ -53,7 +346,7 @@ with gr.Blocks(title="RVC UI") as app:
53
  gr.Markdown("### Step 1. Fill in the experimental configuration.\nExperimental data is stored in the 'logs' folder, with each experiment having a separate folder. Manually enter the experiment name path, which contains the experimental configuration, logs, and trained model files.")
54
  exp_dir1 = gr.Textbox(label="Enter the experiment name", value="my voice")
55
  author = gr.Textbox(label="Model Author (Nullable)")
56
- #np7 = gr.Slider(minimum=0, maximum=config.n_cpu, step=1, label="Number of CPU processes used for pitch extraction and data processing", value=int(np.ceil(config.n_cpu / 1.5)), interactive=True)
57
  sr2 = gr.Radio(label="Target sample rate", choices=["40k", "48k"], value="40k", interactive=True)
58
  if_f0_3 = gr.Radio(label="Whether the model has pitch guidance (required for singing, optional for speech)", choices=["Yes", "No"], value="Yes", interactive=True)
59
  version19 = gr.Radio(label="Version", choices=["v1", "v2"], value="v2", interactive=True, visible=True)
@@ -62,16 +355,16 @@ with gr.Blocks(title="RVC UI") as app:
62
  spk_id5 = gr.Slider(minimum=0, maximum=4, step=1, label="Please specify the speaker/singer ID", value=0, interactive=True)
63
  but1 = gr.Button("Process data", variant="primary")
64
  info1 = gr.Textbox(label="Output information", value="")
65
- #but1.click(preprocess_dataset,[trainset_dir4, exp_dir1, sr2, np7],[info1],api_name="train_preprocess")
66
  gr.Markdown("#### 2. Feature extraction.\nUse CPU to extract pitch (if the model has pitch), use GPU to extract features (select GPU index).")
67
- #gpu_info9 = gr.Textbox(label="GPU Information",value=gpu_info,visible=F0GPUVisible)
68
- #gpus6 = gr.Textbox(label="Enter the GPU index(es) separated by '-', e.g., 0-1-2 to use GPU 0, 1, and 2",value=gpus,interactive=True,visible=F0GPUVisible)
69
- #gpus_rmvpe = gr.Textbox(label="Enter the GPU index(es) separated by '-', e.g., 0-0-1 to use 2 processes in GPU0 and 1 process in GPU1",value="%s-%s" % (gpus, gpus),interactive=True,visible=F0GPUVisible)
70
  f0method8 = gr.Radio(label="Select the pitch extraction algorithm: when extracting singing, you can use 'pm' to speed up. For high-quality speech with fast performance, but worse CPU usage, you can use 'dio'. 'harvest' results in better quality but is slower. 'rmvpe' has the best results and consumes less CPU/GPU", choices=["pm", "harvest", "dio", "rmvpe", "rmvpe_gpu"], value="rmvpe_gpu", interactive=True)
71
  but2 = gr.Button("Feature extraction", variant="primary")
72
  info2 = gr.Textbox(label="Output information", value="")
73
- #f0method8.change(fn=change_f0_method,inputs=[f0method8],outputs=[gpus_rmvpe])
74
- #but2.click(extract_f0_feature,[gpus6,np7,f0method8,if_f0_3,exp_dir1,version19,gpus_rmvpe,],[info2],api_name="train_extract_f0_feature")
75
  gr.Markdown("### Step 3. Start training.\nFill in the training settings and start training the model and index.")
76
  save_epoch10 = gr.Slider(minimum=1, maximum=50, step=1, label="Save frequency (save_every_epoch)", value=5, interactive=True)
77
  total_epoch11 = gr.Slider(minimum=2, maximum=1000, step=1, label="Total training epochs (total_epoch)", value=20, interactive=True)
@@ -82,16 +375,16 @@ with gr.Blocks(title="RVC UI") as app:
82
  pretrained_G14 = gr.Textbox(label="Load pre-trained base model G path", value="assets/pretrained_v2/f0G40k.pth", interactive=True)
83
  pretrained_D15 = gr.Textbox(label="Load pre-trained base model D path", value="assets/pretrained_v2/f0D40k.pth", interactive=True)
84
  gpus16 = gr.Textbox(label="Enter the GPU index(es) separated by '-', e.g., 0-1-2 to use GPU 0, 1, and 2", value="0", interactive=True)
85
- #sr2.change(change_sr2,[sr2, if_f0_3, version19],[pretrained_G14, pretrained_D15])
86
- #version19.change(change_version19,[sr2, if_f0_3, version19],[pretrained_G14, pretrained_D15, sr2])
87
- #if_f0_3.change(fn=lambda: None, inputs=[if_f0_3, sr2, version19], outputs=[f0method8, gpus_rmvpe, pretrained_G14, pretrained_D15])
88
  but3 = gr.Button("Train model", variant="primary")
89
  but4 = gr.Button("Train feature index", variant="primary")
90
  but5 = gr.Button("One-click training", variant="primary")
91
- #info3 = gr.Textbox(label=i18n("Output information"), value="")
92
- #but3.click(click_train,[exp_dir1,sr2,if_f0_3,spk_id5,save_epoch10,total_epoch11,batch_size12,if_save_latest13,pretrained_G14,pretrained_D15,gpus16,if_cache_gpu17,if_save_every_weights18,version19,author,],info3,api_name="train_start")
93
- #but4.click(train_index, [exp_dir1, version19], info3)
94
- #but5.click(train1key,[exp_dir1,sr2,if_f0_3,trainset_dir4,spk_id5,np7,f0method8,save_epoch10,total_epoch11,batch_size12,if_save_latest13,pretrained_G14,pretrained_D15,gpus16,if_cache_gpu17,if_save_every_weights18,version19,gpus_rmvpe,author],info3,api_name="train_start_all")
95
 
96
  with gr.TabItem('Credits'):
97
  gr.Markdown(
 
1
  import gradio as gr
2
  from web import *
3
 
4
+ # theme
5
+
6
+
7
+ from typing import Iterable
8
+ import gradio as gr
9
+
10
+ # gr.themes.builder()
11
+ from gradio.themes.soft import Base
12
+ from gradio.themes.utils import colors, fonts, sizes
13
+ import time
14
+
15
+
16
+
17
+
18
+ # Applio Theme
19
+ class Applio(Base):
20
+ def __init__(
21
+ self,
22
+ *,
23
+ primary_hue: colors.Color | str = colors.neutral,
24
+ secondary_hue: colors.Color | str = colors.neutral,
25
+ neutral_hue: colors.Color | str = colors.neutral,
26
+ spacing_size: sizes.Size | str = sizes.spacing_md,
27
+ radius_size: sizes.Size | str = sizes.radius_md,
28
+ text_size: sizes.Size | str = sizes.text_lg,
29
+ font: fonts.Font | str | Iterable[fonts.Font | str] = (
30
+ "Syne V",
31
+ fonts.GoogleFont("Syne"),
32
+ "ui-sans-serif",
33
+ "system-ui",
34
+ ),
35
+ font_mono: fonts.Font | str | Iterable[fonts.Font | str] = (
36
+ "ui-monospace",
37
+ fonts.GoogleFont("Nunito Sans"),
38
+ ),
39
+ ):
40
+ super().__init__(
41
+ primary_hue=primary_hue,
42
+ secondary_hue=secondary_hue,
43
+ neutral_hue=neutral_hue,
44
+ spacing_size=spacing_size,
45
+ radius_size=radius_size,
46
+ text_size=text_size,
47
+ font=font,
48
+ font_mono=font_mono,
49
+ )
50
+ self.name = ("Applio",)
51
+ self.secondary_100 = ("#dbeafe",)
52
+ self.secondary_200 = ("#bfdbfe",)
53
+ self.secondary_300 = ("#93c5fd",)
54
+ self.secondary_400 = ("#60a5fa",)
55
+ self.secondary_50 = ("#eff6ff",)
56
+ self.secondary_500 = ("#3b82f6",)
57
+ self.secondary_600 = ("#2563eb",)
58
+ self.secondary_700 = ("#1d4ed8",)
59
+ self.secondary_800 = ("#1e40af",)
60
+ self.secondary_900 = ("#1e3a8a",)
61
+ self.secondary_950 = ("#1d3660",)
62
+
63
+ super().set(
64
+ # Blaise
65
+ background_fill_primary="#110F0F",
66
+ background_fill_primary_dark="#110F0F",
67
+ background_fill_secondary="#110F0F",
68
+ background_fill_secondary_dark="#110F0F",
69
+ block_background_fill="*neutral_800",
70
+ block_background_fill_dark="*neutral_800",
71
+ block_border_color="*border_color_primary",
72
+ block_border_color_dark="*border_color_primary",
73
+ block_border_width="1px",
74
+ block_border_width_dark="1px",
75
+ block_info_text_color="*body_text_color_subdued",
76
+ block_info_text_color_dark="*body_text_color_subdued",
77
+ block_info_text_size="*text_sm",
78
+ block_info_text_weight="400",
79
+ block_label_background_fill="*background_fill_primary",
80
+ block_label_background_fill_dark="*background_fill_secondary",
81
+ block_label_border_color="*border_color_primary",
82
+ block_label_border_color_dark="*border_color_primary",
83
+ block_label_border_width="1px",
84
+ block_label_border_width_dark="1px",
85
+ block_label_margin="0",
86
+ block_label_padding="*spacing_sm *spacing_lg",
87
+ block_label_radius="calc(*radius_lg - 1px) 0 calc(*radius_lg - 1px) 0",
88
+ block_label_right_radius="0 calc(*radius_lg - 1px) 0 calc(*radius_lg - 1px)",
89
+ block_label_shadow="*block_shadow",
90
+ block_label_text_color="*#110F0F",
91
+ block_label_text_color_dark="*#110F0F",
92
+ block_label_text_weight="400",
93
+ block_padding="*spacing_xl",
94
+ block_radius="*radius_md",
95
+ block_shadow="none",
96
+ block_shadow_dark="none",
97
+ block_title_background_fill="rgb(255,255,255)",
98
+ block_title_background_fill_dark="rgb(255,255,255)",
99
+ block_title_border_color="none",
100
+ block_title_border_color_dark="none",
101
+ block_title_border_width="0px",
102
+ block_title_padding="*block_label_padding",
103
+ block_title_radius="*block_label_radius",
104
+ block_title_text_color="#110F0F",
105
+ block_title_text_color_dark="#110F0F",
106
+ block_title_text_size="*text_md",
107
+ block_title_text_weight="600",
108
+ body_background_fill="#110F0F",
109
+ body_background_fill_dark="#110F0F",
110
+ body_text_color="white",
111
+ body_text_color_dark="white",
112
+ body_text_color_subdued="*neutral_400",
113
+ body_text_color_subdued_dark="*neutral_400",
114
+ body_text_size="*text_md",
115
+ body_text_weight="400",
116
+ border_color_accent="*neutral_600",
117
+ border_color_accent_dark="*neutral_600",
118
+ border_color_primary="*neutral_800",
119
+ border_color_primary_dark="*neutral_800",
120
+ button_border_width="*input_border_width",
121
+ button_border_width_dark="*input_border_width",
122
+ button_cancel_background_fill="*button_secondary_background_fill",
123
+ button_cancel_background_fill_dark="*button_secondary_background_fill",
124
+ button_cancel_background_fill_hover="*button_cancel_background_fill",
125
+ button_cancel_background_fill_hover_dark="*button_cancel_background_fill",
126
+ button_cancel_border_color="*button_secondary_border_color",
127
+ button_cancel_border_color_dark="*button_secondary_border_color",
128
+ button_cancel_border_color_hover="*button_cancel_border_color",
129
+ button_cancel_border_color_hover_dark="*button_cancel_border_color",
130
+ button_cancel_text_color="#110F0F",
131
+ button_cancel_text_color_dark="#110F0F",
132
+ button_cancel_text_color_hover="#110F0F",
133
+ button_cancel_text_color_hover_dark="#110F0F",
134
+ button_large_padding="*spacing_lg calc(2 * *spacing_lg)",
135
+ button_large_radius="*radius_lg",
136
+ button_large_text_size="*text_lg",
137
+ button_large_text_weight="600",
138
+ button_primary_background_fill="*primary_600",
139
+ button_primary_background_fill_dark="*primary_600",
140
+ button_primary_background_fill_hover="*primary_500",
141
+ button_primary_background_fill_hover_dark="*primary_500",
142
+ button_primary_border_color="*primary_500",
143
+ button_primary_border_color_dark="*primary_500",
144
+ button_primary_border_color_hover="*primary_400",
145
+ button_primary_border_color_hover_dark="*primary_400",
146
+ button_primary_text_color="white",
147
+ button_primary_text_color_dark="white",
148
+ button_primary_text_color_hover="#110F0F",
149
+ button_primary_text_color_hover_dark="#110F0F",
150
+ button_secondary_background_fill="transparent",
151
+ button_secondary_background_fill_dark="transparent",
152
+ button_secondary_background_fill_hover="*neutral_800",
153
+ button_secondary_background_fill_hover_dark="*neutral_800",
154
+ button_secondary_border_color="*neutral_700",
155
+ button_secondary_border_color_dark="*neutral_700",
156
+ button_secondary_border_color_hover="*neutral_600",
157
+ button_secondary_border_color_hover_dark="*neutral_600",
158
+ button_secondary_text_color="white",
159
+ button_secondary_text_color_dark="white",
160
+ button_secondary_text_color_hover="*button_secondary_text_color",
161
+ button_secondary_text_color_hover_dark="*button_secondary_text_color",
162
+ button_shadow="none",
163
+ button_shadow_active="*shadow_inset",
164
+ button_shadow_hover="none",
165
+ button_small_padding="*spacing_sm calc(2 * *spacing_sm)",
166
+ button_small_radius="*radius_lg",
167
+ button_small_text_size="*text_md",
168
+ button_small_text_weight="400",
169
+ button_transition="0.3s ease all",
170
+ checkbox_background_color="*neutral_700",
171
+ checkbox_background_color_dark="*neutral_700",
172
+ checkbox_background_color_focus="*checkbox_background_color",
173
+ checkbox_background_color_focus_dark="*checkbox_background_color",
174
+ checkbox_background_color_hover="*checkbox_background_color",
175
+ checkbox_background_color_hover_dark="*checkbox_background_color",
176
+ checkbox_background_color_selected="*secondary_600",
177
+ checkbox_background_color_selected_dark="*secondary_600",
178
+ checkbox_border_color="*neutral_700",
179
+ checkbox_border_color_dark="*neutral_700",
180
+ checkbox_border_color_focus="*secondary_500",
181
+ checkbox_border_color_focus_dark="*secondary_500",
182
+ checkbox_border_color_hover="*neutral_600",
183
+ checkbox_border_color_hover_dark="*neutral_600",
184
+ checkbox_border_color_selected="*secondary_600",
185
+ checkbox_border_color_selected_dark="*secondary_600",
186
+ checkbox_border_radius="*radius_sm",
187
+ checkbox_border_width="*input_border_width",
188
+ checkbox_border_width_dark="*input_border_width",
189
+ checkbox_check="url(\"data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3cpath d='M12.207 4.793a1 1 0 010 1.414l-5 5a1 1 0 01-1.414 0l-2-2a1 1 0 011.414-1.414L6.5 9.086l4.293-4.293a1 1 0 011.414 0z'/%3e%3c/svg%3e\")",
190
+ checkbox_label_background_fill="transparent",
191
+ checkbox_label_background_fill_dark="transparent",
192
+ checkbox_label_background_fill_hover="transparent",
193
+ checkbox_label_background_fill_hover_dark="transparent",
194
+ checkbox_label_background_fill_selected="transparent",
195
+ checkbox_label_background_fill_selected_dark="transparent",
196
+ checkbox_label_border_color="transparent",
197
+ checkbox_label_border_color_dark="transparent",
198
+ checkbox_label_border_color_hover="transparent",
199
+ checkbox_label_border_color_hover_dark="transparent",
200
+ checkbox_label_border_width="transparent",
201
+ checkbox_label_border_width_dark="transparent",
202
+ checkbox_label_gap="*spacing_lg",
203
+ checkbox_label_padding="*spacing_md calc(2 * *spacing_md)",
204
+ checkbox_label_shadow="none",
205
+ checkbox_label_text_color="*body_text_color",
206
+ checkbox_label_text_color_dark="*body_text_color",
207
+ checkbox_label_text_color_selected="*checkbox_label_text_color",
208
+ checkbox_label_text_color_selected_dark="*checkbox_label_text_color",
209
+ checkbox_label_text_size="*text_md",
210
+ checkbox_label_text_weight="400",
211
+ checkbox_shadow="*input_shadow",
212
+ color_accent="*primary_500",
213
+ color_accent_soft="*primary_50",
214
+ color_accent_soft_dark="*neutral_700",
215
+ container_radius="*radius_xl",
216
+ embed_radius="*radius_lg",
217
+ error_background_fill="*background_fill_primary",
218
+ error_background_fill_dark="*background_fill_primary",
219
+ error_border_color="*border_color_primary",
220
+ error_border_color_dark="*border_color_primary",
221
+ error_border_width="1px",
222
+ error_border_width_dark="1px",
223
+ error_text_color="#ef4444",
224
+ error_text_color_dark="#ef4444",
225
+ form_gap_width="0px",
226
+ input_background_fill="*neutral_900",
227
+ input_background_fill_dark="*neutral_900",
228
+ input_background_fill_focus="*secondary_600",
229
+ input_background_fill_focus_dark="*secondary_600",
230
+ input_background_fill_hover="*input_background_fill",
231
+ input_background_fill_hover_dark="*input_background_fill",
232
+ input_border_color="*neutral_700",
233
+ input_border_color_dark="*neutral_700",
234
+ input_border_color_focus="*secondary_600",
235
+ input_border_color_focus_dark="*primary_600",
236
+ input_border_color_hover="*input_border_color",
237
+ input_border_color_hover_dark="*input_border_color",
238
+ input_border_width="1px",
239
+ input_border_width_dark="1px",
240
+ input_padding="*spacing_xl",
241
+ input_placeholder_color="*neutral_500",
242
+ input_placeholder_color_dark="*neutral_500",
243
+ input_radius="*radius_lg",
244
+ input_shadow="none",
245
+ input_shadow_dark="none",
246
+ input_shadow_focus="*input_shadow",
247
+ input_shadow_focus_dark="*input_shadow",
248
+ input_text_size="*text_md",
249
+ input_text_weight="400",
250
+ layout_gap="*spacing_xxl",
251
+ link_text_color="*secondary_500",
252
+ link_text_color_active="*secondary_500",
253
+ link_text_color_active_dark="*secondary_500",
254
+ link_text_color_dark="*secondary_500",
255
+ link_text_color_hover="*secondary_400",
256
+ link_text_color_hover_dark="*secondary_400",
257
+ link_text_color_visited="*secondary_600",
258
+ link_text_color_visited_dark="*secondary_600",
259
+ loader_color="*color_accent",
260
+ loader_color_dark="*color_accent",
261
+ panel_background_fill="*background_fill_secondary",
262
+ panel_background_fill_dark="*background_fill_secondary",
263
+ panel_border_color="*border_color_primary",
264
+ panel_border_color_dark="*border_color_primary",
265
+ panel_border_width="1px",
266
+ panel_border_width_dark="1px",
267
+ prose_header_text_weight="600",
268
+ prose_text_size="*text_md",
269
+ prose_text_weight="400",
270
+ radio_circle="url(\"data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3ccircle cx='8' cy='8' r='3'/%3e%3c/svg%3e\")",
271
+ section_header_text_size="*text_md",
272
+ section_header_text_weight="400",
273
+ shadow_drop="rgba(0,0,0,0.05) 0px 1px 2px 0px",
274
+ shadow_drop_lg="0 1px 3px 0 rgb(0 0 0 / 0.1), 0 1px 2px -1px rgb(0 0 0 / 0.1)",
275
+ shadow_inset="rgba(0,0,0,0.05) 0px 2px 4px 0px inset",
276
+ shadow_spread="3px",
277
+ shadow_spread_dark="1px",
278
+ slider_color="#9E9E9E",
279
+ slider_color_dark="#9E9E9E",
280
+ stat_background_fill="*primary_500",
281
+ stat_background_fill_dark="*primary_500",
282
+ table_border_color="*neutral_700",
283
+ table_border_color_dark="*neutral_700",
284
+ table_even_background_fill="*neutral_950",
285
+ table_even_background_fill_dark="*neutral_950",
286
+ table_odd_background_fill="*neutral_900",
287
+ table_odd_background_fill_dark="*neutral_900",
288
+ table_radius="*radius_lg",
289
+ table_row_focus="*color_accent_soft",
290
+ table_row_focus_dark="*color_accent_soft",
291
+ )
292
+
293
+
294
+ applio = Applio()
295
+
296
+
297
+
298
+ with gr.Blocks(theme=applio, title="RVC UI") as app:
299
  gr.Markdown("<center><h1> RVC UI 🗣️")
300
  gr.Markdown("<h1>this ui not done yet!")
301
  with gr.Tabs():
302
  with gr.TabItem("Single inference"):
303
+ models = gr.Dropdown(label="voice model", choices=sorted(names), interactive=True)
304
  with gr.Row():
305
  clean_button = gr.Button("Refresh model", variant="primary")
306
  with gr.Row():
 
320
  protect0 = gr.Slider(minimum=0, maximum=0.5, label="Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy", value=0.33, step=0.01, interactive=True)
321
  filter_radius0 = gr.Slider(minimum=0, maximum=7, label="If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness.", value=3, step=1, interactive=True)
322
  f0_file = gr.File(label="F0 curve file (optional). One pitch per line. Replaces the default F0 and pitch modulation", visible=False)
 
323
  vc_output1 = gr.Textbox(label="Output information", interactive=False)
324
  vc_output2 = gr.Audio(label="Export audio (click on the three dots in the lower right corner to download)", type="filepath", interactive=False)
325
+ refresh_button.click(fn=change_choices, inputs=[], outputs=[models, file_index2], api_name="infer_refresh")
326
  with gr.TabItem("Batch inference"):
327
  gr.Markdown("<center>Batch conversion\n. Enter the folder containing the audio files to be converted or upload multiple audio files. The converted audio will be output in the specified folder (default: 'opt').")
328
  vc_transform1 = gr.Number(label="Transpose (integer, number of semitones, raise by an octave: 12, lower by an octave: -12)", value=0)
 
331
  opt_input = gr.Textbox(label="Specify output folder", value="opt")
332
  file_index4 = gr.Dropdown(label="Auto-detect index path and select from the dropdown", choices=sorted(index_paths), interactive=True)
333
  file_index3 = gr.File(label="Path to the feature index file. Leave blank to use the selected result from the dropdown")
334
+ refresh_button.click(fn=lambda: change_choices()[1], inputs=[], outputs=file_index4, api_name="infer_refresh_batch")
335
  f0method1 = gr.Radio(label="Select the pitch extraction algorithm ('pm': faster extraction but lower-quality speech; 'harvest': better bass but extremely slow; 'crepe': better quality but GPU intensive), 'rmvpe': best quality, and little GPU requirement", choices=["pm", "harvest", "crepe", "rmvpe", "fcpe"], value="rmvpe", interactive=True)
336
  resample_sr1 = gr.Slider(minimum=0, maximum=48000, label="Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling", value=0, step=1, interactive=True)
337
  rms_mix_rate1 = gr.Slider(minimum=0, maximum=1, label="Adjust the volume envelope scaling. Closer to 0, the more it mimicks the volume of the original vocals. Can help mask noise and make volume sound more natural when set relatively low. Closer to 1 will be more of a consistently loud volume", value=1, interactive=True)
 
346
  gr.Markdown("### Step 1. Fill in the experimental configuration.\nExperimental data is stored in the 'logs' folder, with each experiment having a separate folder. Manually enter the experiment name path, which contains the experimental configuration, logs, and trained model files.")
347
  exp_dir1 = gr.Textbox(label="Enter the experiment name", value="my voice")
348
  author = gr.Textbox(label="Model Author (Nullable)")
349
+ np7 = gr.Slider(minimum=0, maximum=config.n_cpu, step=1, label="Number of CPU processes used for pitch extraction and data processing", value=int(np.ceil(config.n_cpu / 1.5)), interactive=True)
350
  sr2 = gr.Radio(label="Target sample rate", choices=["40k", "48k"], value="40k", interactive=True)
351
  if_f0_3 = gr.Radio(label="Whether the model has pitch guidance (required for singing, optional for speech)", choices=["Yes", "No"], value="Yes", interactive=True)
352
  version19 = gr.Radio(label="Version", choices=["v1", "v2"], value="v2", interactive=True, visible=True)
 
355
  spk_id5 = gr.Slider(minimum=0, maximum=4, step=1, label="Please specify the speaker/singer ID", value=0, interactive=True)
356
  but1 = gr.Button("Process data", variant="primary")
357
  info1 = gr.Textbox(label="Output information", value="")
358
+ but1.click(preprocess_dataset,[trainset_dir4, exp_dir1, sr2, np7],[info1],api_name="train_preprocess")
359
  gr.Markdown("#### 2. Feature extraction.\nUse CPU to extract pitch (if the model has pitch), use GPU to extract features (select GPU index).")
360
+ gpu_info9 = gr.Textbox(label="GPU Information",value=gpu_info,visible=F0GPUVisible)
361
+ gpus6 = gr.Textbox(label="Enter the GPU index(es) separated by '-', e.g., 0-1-2 to use GPU 0, 1, and 2",value=gpus,interactive=True,visible=F0GPUVisible)
362
+ gpus_rmvpe = gr.Textbox(label="Enter the GPU index(es) separated by '-', e.g., 0-0-1 to use 2 processes in GPU0 and 1 process in GPU1",value="%s-%s" % (gpus, gpus),interactive=True,visible=F0GPUVisible)
363
  f0method8 = gr.Radio(label="Select the pitch extraction algorithm: when extracting singing, you can use 'pm' to speed up. For high-quality speech with fast performance, but worse CPU usage, you can use 'dio'. 'harvest' results in better quality but is slower. 'rmvpe' has the best results and consumes less CPU/GPU", choices=["pm", "harvest", "dio", "rmvpe", "rmvpe_gpu"], value="rmvpe_gpu", interactive=True)
364
  but2 = gr.Button("Feature extraction", variant="primary")
365
  info2 = gr.Textbox(label="Output information", value="")
366
+ f0method8.change(fn=change_f0_method,inputs=[f0method8],outputs=[gpus_rmvpe])
367
+ but2.click(extract_f0_feature,[gpus6,np7,f0method8,if_f0_3,exp_dir1,version19,gpus_rmvpe,],[info2],api_name="train_extract_f0_feature")
368
  gr.Markdown("### Step 3. Start training.\nFill in the training settings and start training the model and index.")
369
  save_epoch10 = gr.Slider(minimum=1, maximum=50, step=1, label="Save frequency (save_every_epoch)", value=5, interactive=True)
370
  total_epoch11 = gr.Slider(minimum=2, maximum=1000, step=1, label="Total training epochs (total_epoch)", value=20, interactive=True)
 
375
  pretrained_G14 = gr.Textbox(label="Load pre-trained base model G path", value="assets/pretrained_v2/f0G40k.pth", interactive=True)
376
  pretrained_D15 = gr.Textbox(label="Load pre-trained base model D path", value="assets/pretrained_v2/f0D40k.pth", interactive=True)
377
  gpus16 = gr.Textbox(label="Enter the GPU index(es) separated by '-', e.g., 0-1-2 to use GPU 0, 1, and 2", value="0", interactive=True)
378
+ sr2.change(change_sr2,[sr2, if_f0_3, version19],[pretrained_G14, pretrained_D15])
379
+ version19.change(change_version19,[sr2, if_f0_3, version19],[pretrained_G14, pretrained_D15, sr2])
380
+ if_f0_3.change(fn=lambda: None, inputs=[if_f0_3, sr2, version19], outputs=[f0method8, gpus_rmvpe, pretrained_G14, pretrained_D15])
381
  but3 = gr.Button("Train model", variant="primary")
382
  but4 = gr.Button("Train feature index", variant="primary")
383
  but5 = gr.Button("One-click training", variant="primary")
384
+ info3 = gr.Textbox(label=i18n("Output information"), value="")
385
+ but3.click(click_train,[exp_dir1,sr2,if_f0_3,spk_id5,save_epoch10,total_epoch11,batch_size12,if_save_latest13,pretrained_G14,pretrained_D15,gpus16,if_cache_gpu17,if_save_every_weights18,version19,author,],info3,api_name="train_start")
386
+ but4.click(train_index, [exp_dir1, version19], info3)
387
+ but5.click(train1key,[exp_dir1,sr2,if_f0_3,trainset_dir4,spk_id5,np7,f0method8,save_epoch10,total_epoch11,batch_size12,if_save_latest13,pretrained_G14,pretrained_D15,gpus16,if_cache_gpu17,if_save_every_weights18,version19,gpus_rmvpe,author],info3,api_name="train_start_all")
388
 
389
  with gr.TabItem('Credits'):
390
  gr.Markdown(