File size: 14,599 Bytes
7ff2ba3
 
 
c096360
5468b2f
c096360
 
fafd9bc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bffe6ee
7ff2ba3
 
 
 
 
0c95b2e
7ff2ba3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c096360
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7ff2ba3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b602c5e
7ff2ba3
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
from web import *
import gradio as gr
import os
import shutil, glob
from rvcfuncs import download_from_url, CachedModels
os.makedirs("dataset",exist_ok=True)
model_library = CachedModels()

assets_folder = "./assets/"
if not os.path.exists(assets_folder):
    os.makedirs(assets_folder)

files = {
    "rmvpe/rmvpe.pt": "https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/rmvpe.pt",
    "hubert/hubert_base.pt": "https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/hubert_base.pt"
}

for file, link in files.items():
    file_path = os.path.join(assets_folder, file)
    if not os.path.exists(file_path):
        try:
            os.system(f'wget {link} -O {file_path}')
        except OSError as e:
            print(f"Error downloading {file}: {e}")

with gr.Blocks(title="RVC UI") as app:
    gr.Markdown("<center><h1> **RVC UI**")
    gr.Markdown("<h1>this ui not done yet!")
    with gr.TabItem("inference"):
        with gr.Tabs():        
            models = gr.Dropdown(label="Model Voice", choices=sorted(names), value=lambda:sorted(names)[0] if len(sorted(names)) > 0 else '', interactive=True)
            with gr.Row():
                clean_button = gr.Button("Refresh model", variant="primary")
                with gr.Row():
                    with gr.Row():
                        pith_voice = gr.Number(label="Transpose 12 for femal, -12 fo male)", value=0)
                        spk_item = gr.Slider(minimum=0, maximum=2333, step=1, label="Select Speaker/Singer ID", value=0, visible=False, interactive=False)
                        clean_button.click(fn=clean, inputs=[], outputs=[models], api_name="infer_clean")
            modelinfo = gr.Textbox(label="Model info", max_lines=8, visible=False)
    
            input_audio0 = gr.Audio(label="The audio file to be processed", type="filepath")
            file_index1 = gr.File(label="Path to the feature index file. Leave blank to use the selected result from the dropdown")
            with gr.Column():
                f0method0 = gr.Radio(label="Select the pitch extraction algorithm ('pm': faster extraction but lower-quality speech; 'harvest': better bass but extremely slow; 'crepe': better quality but GPU intensive), 'rmvpe': best quality, and little GPU requirement", choices=["pm", "dio", "harvest", "crepe", "rmvpe", "fcpe"], value="rmvpe", interactive=True)
                resample_sr0 = gr.Slider(minimum=0, maximum=48000, label="Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling", value=0, step=1, interactive=True)
                rms_mix_rate0 = gr.Slider(minimum=0, maximum=1, label="Adjust the volume envelope scaling. Closer to 0, the more it mimicks the volume of the original vocals. Can help mask noise and make volume sound more natural when set relatively low. Closer to 1 will be more of a consistently loud volume", value=0.25, interactive=True)
                protect0 = gr.Slider(minimum=0, maximum=0.5, label="Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy", value=0.33, step=0.01, interactive=True)
                filter_radius0 = gr.Slider(minimum=0, maximum=7, label="If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness.", value=3, step=1, interactive=True)                             
                f0_file = gr.File(label="F0 curve file (optional). One pitch per line. Replaces the default F0 and pitch modulation", visible=False)
                but0 = gr.Button("Convert", variant="primary")
                vc_output1 = gr.Textbox(label="Output information", interactive=False)
                vc_output2 = gr.Audio(label="Export audio (click on the three dots in the lower right corner to download)", type="filepath", interactive=False)
                #refresh_button.click(fn=change_choices, inputs=[], outputs=[models, file_index2], api_name="infer_refresh")
                with gr.Accordion("Batch inference"):
                    gr.Markdown("<center>Batch conversion\n. Enter the folder containing the audio files to be converted or upload multiple audio files. The converted audio will be output in the specified folder (default: 'opt').")
                    vc_transform1 = gr.Number(label="Transpose (integer, number of semitones, raise by an octave: 12, lower by an octave: -12)", value=0)
                    dir_input = gr.Textbox(label="Enter the path of the audio folder to be processed (copy it from the address bar of the file manager)", placeholder="C:\\Users\\Desktop\\input_vocal_dir")
                    inputs = gr.File(file_count="multiple", label="Multiple audio files can also be imported. If a folder path exists, this input is ignored.")
                    opt_input = gr.Textbox(label="Specify output folder", value="opt")
                    file_index4 = gr.Dropdown(label="Auto-detect index path and select from the dropdown", choices=sorted(index_paths), interactive=True)
                    file_index3 = gr.File(label="Path to the feature index file. Leave blank to use the selected result from the dropdown")
                    #refresh_button.click(fn=lambda: change_choices()[1], inputs=[], outputs=file_index4, api_name="infer_refresh_batch")
                    f0method1 = gr.Radio(label="Select the pitch extraction algorithm ('pm': faster extraction but lower-quality speech; 'harvest': better bass but extremely slow; 'crepe': better quality but GPU intensive), 'rmvpe': best quality, and little GPU requirement", choices=["pm", "harvest", "crepe", "rmvpe", "fcpe"], value="rmvpe", interactive=True)
                    resample_sr1 = gr.Slider(minimum=0, maximum=48000, label="Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling", value=0, step=1, interactive=True)        
                    rms_mix_rate1 = gr.Slider(minimum=0, maximum=1, label="Adjust the volume envelope scaling. Closer to 0, the more it mimicks the volume of the original vocals. Can help mask noise and make volume sound more natural when set relatively low. Closer to 1 will be more of a consistently loud volume", value=1, interactive=True)
                    protect1 = gr.Slider(minimum=0, maximum=0.5, label="Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy", value=0.33, step=0.01, interactive=True)
                    filter_radius1 = gr.Slider(minimum=0, maximum=7, label="If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness.", value=3, step=1, interactive=True)
                    index_rate2 = gr.Slider(minimum=0, maximum=1, label="Feature searching ratio", value=1, interactive=True)
                    format1 = gr.Radio(label="Export file format", choices=["wav", "flac", "mp3", "m4a"], value="wav", interactive=True)
                    but1 = gr.Button("Convert", variant="primary")
                    vc_output3 = gr.Textbox(
                        label="Output information",
                        interactive=False
                    )
    with gr.TabItem("Download Models"):
        with gr.Row():
            url_input = gr.Textbox(label="URL to model", value="",placeholder="https://...", scale=6)
            name_output = gr.Textbox(label="Save as", value="",placeholder="MyModel",scale=2)
            url_download = gr.Button(value="Download Model",scale=2)
            url_download.click(
                inputs=[url_input,name_output],
                outputs=[url_input],
                fn=download_from_url,
            )
        with gr.Row():
            model_browser = gr.Dropdown(choices=list(model_library.models.keys()),label="OR Search Models (Quality UNKNOWN)",scale=5)
            download_from_browser = gr.Button(value="Get",scale=2)
            download_from_browser.click(
                inputs=[model_browser],
                outputs=[model_browser],
                fn=lambda model: download_from_url(model_library.models[model],model),
            )
    with gr.TabItem("Train"):
        gr.Markdown("### Step 1. Fill in the experimental configuration.\nExperimental data is stored in the 'logs' folder, with each experiment having a separate folder. Manually enter the experiment name path, which contains the experimental configuration, logs, and trained model files.")
        exp_dir1 = gr.Textbox(label="Enter the experiment name", value="my voice")
        author = gr.Textbox(label="Model Author (Nullable)")
        np7 = gr.Slider(minimum=0, maximum=config.n_cpu, step=1, label="Number of CPU processes used for pitch extraction and data processing", value=int(np.ceil(config.n_cpu / 1.5)), interactive=True)
        sr2 = gr.Radio(label="Target sample rate", choices=["40k", "48k"], value="40k", interactive=True)
        if_f0_3 = gr.Radio(label="Whether the model has pitch guidance (required for singing, optional for speech)", choices=["Yes", "No"], value="Yes", interactive=True)
        version19 = gr.Radio(label="Version", choices=["v1", "v2"], value="v2", interactive=True, visible=True)
        gr.Markdown("### Step 2. Audio processing. \n#### 1. Slicing.\nAutomatically traverse all files in the training folder that can be decoded into audio and perform slice normalization. Generates 2 wav folders in the experiment directory. Currently, only single-singer/speaker training is supported.")
        trainset_dir4 = gr.Textbox(label="Enter the path of the training folder")
        spk_id5 = gr.Slider(minimum=0, maximum=4, step=1, label="Please specify the speaker/singer ID", value=0, interactive=True)
        gr.Markdown("#### 2. Feature extraction.\nUse CPU to extract pitch (if the model has pitch), use GPU to extract features (select GPU index).")
        gpu_info9 = gr.Textbox(label="GPU Information",value=gpu_info,visible=F0GPUVisible)
        gpus6 = gr.Textbox(label="Enter the GPU index(es) separated by '-', e.g., 0-1-2 to use GPU 0, 1, and 2",value=gpus,interactive=True,visible=F0GPUVisible)
        gpus_rmvpe = gr.Textbox(label="Enter the GPU index(es) separated by '-', e.g., 0-0-1 to use 2 processes in GPU0 and 1 process in GPU1",value="%s-%s" % (gpus, gpus),interactive=True,visible=F0GPUVisible)
        f0method8 = gr.Radio(label="Select the pitch extraction algorithm: when extracting singing, you can use 'pm' to speed up. For high-quality speech with fast performance, but worse CPU usage, you can use 'dio'. 'harvest' results in better quality but is slower. 'rmvpe' has the best results and consumes less CPU/GPU", choices=["pm", "harvest", "dio", "rmvpe", "rmvpe_gpu"], value="rmvpe_gpu", interactive=True)
        f0method8.change(fn=change_f0_method,inputs=[f0method8],outputs=[gpus_rmvpe])
        gr.Markdown("### Step 3. Start training.\nFill in the training settings and start training the model and index.")
        save_epoch10 = gr.Slider(minimum=1, maximum=50, step=1, label="Save frequency (save_every_epoch)", value=5, interactive=True)
        total_epoch11 = gr.Slider(minimum=2, maximum=1000, step=1, label="Total training epochs (total_epoch)", value=20, interactive=True)
        batch_size12 = gr.Slider(minimum=1, maximum=40, step=1, label="Batch size per GPU", value=20, interactive=True)
        if_save_latest13 = gr.Radio(label="Save only the latest '.ckpt' file to save disk space", choices=["Yes", "No"], value="No", interactive=True)
        if_cache_gpu17 = gr.Radio(label="Cache all training sets to GPU memory. Caching small datasets (less than 10 minutes) can speed up training, but caching large datasets will consume a lot of GPU memory and may not provide much speed improvement", choices=["Yes", "No"], value="No", interactive=True)
        if_save_every_weights18 = gr.Radio(label="Save a small final model to the 'weights' folder at each save point", choices=["Yes","No"], value="No", interactive=True)               
        pretrained_G14 = gr.Textbox(label="Load pre-trained base model G path", value="assets/pretrained_v2/f0G40k.pth", interactive=True)
        pretrained_D15 = gr.Textbox(label="Load pre-trained base model D path", value="assets/pretrained_v2/f0D40k.pth", interactive=True)
        gpus16 = gr.Textbox(label="Enter the GPU index(es) separated by '-', e.g., 0-1-2 to use GPU 0, 1, and 2", value="0", interactive=True)
        sr2.change(change_sr2,[sr2, if_f0_3, version19],[pretrained_G14, pretrained_D15])
        version19.change(change_version19,[sr2, if_f0_3, version19],[pretrained_G14, pretrained_D15, sr2])
        if_f0_3.change(fn=lambda: None, inputs=[if_f0_3, sr2, version19], outputs=[f0method8, gpus_rmvpe, pretrained_G14, pretrained_D15])
        but1 = gr.Button("Process data", variant="primary")
        but2 = gr.Button("Feature extraction", variant="primary")
        but3 = gr.Button("Train model", variant="primary")
        but4 = gr.Button("Train feature index", variant="primary")
        but5 = gr.Button("One-click training", variant="primary")
        info1 = gr.Textbox(label="Output information", value="")
        
        but2.click(extract_f0_feature,[gpus6,np7,f0method8,if_f0_3,exp_dir1,version19,gpus_rmvpe,],[info1],api_name="train_extract_f0_feature")
        but1.click(preprocess_dataset,[trainset_dir4, exp_dir1, sr2, np7],[info1],api_name="train_preprocess")
        but3.click(click_train,[exp_dir1,sr2,if_f0_3,spk_id5,save_epoch10,total_epoch11,batch_size12,if_save_latest13,pretrained_G14,pretrained_D15,gpus16,if_cache_gpu17,if_save_every_weights18,version19,author,],info1,api_name="train_start")                                                                                                                                                                                                                                                                
        but4.click(train_index, [exp_dir1, version19], info1)
        but5.click(train1key,[exp_dir1,sr2,if_f0_3,trainset_dir4,spk_id5,np7,f0method8,save_epoch10,total_epoch11,batch_size12,if_save_latest13,pretrained_G14,pretrained_D15,gpus16,if_cache_gpu17,if_save_every_weights18,version19,gpus_rmvpe,author],info1,api_name="train_start_all")

    with gr.TabItem('Credits'):
        gr.Markdown(
            f"""
            This UI's Made by Blane187
            """
        )
app.launch()