Politrees commited on
Commit
1eebed4
1 Parent(s): eecd1c9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +368 -2
app.py CHANGED
@@ -450,7 +450,7 @@ def get_vc(sid):
450
  cpt = None
451
  return {"visible": False, "__type__": "update"}
452
  person = "%s/%s" % (weight_root, sid)
453
- print("Загрузка %s" % person)
454
  cpt = torch.load(person, map_location="cpu")
455
  tgt_sr = cpt["config"][-1]
456
  cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
@@ -1582,6 +1582,7 @@ with gr.Blocks(theme=gr.themes.Base(), title='Mangio-RVC-Web 💻') as app:
1582
  label="Mangio-Crepe Hop Length. Более высокие числа уменьшат вероятность экстремального изменения высоты тона, но более низкие числа увеличат точность. 64-192 - хороший диапазон для экспериментов.",
1583
  value=120,
1584
  interactive=True,
 
1585
  )
1586
  f0method0.change(fn=whethercrepeornah, inputs=[f0method0], outputs=[crepe_hop_length])
1587
  filter_radius0 = gr.Slider(
@@ -1616,7 +1617,125 @@ with gr.Blocks(theme=gr.themes.Base(), title='Mangio-RVC-Web 💻') as app:
1616
  step=0.01,
1617
  interactive=True,
1618
  )
 
1619
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1620
  with gr.TabItem("Загрузить модель"):
1621
  with gr.Row():
1622
  url=gr.Textbox(label="Введите URL-адрес модели:")
@@ -1626,6 +1745,253 @@ with gr.Blocks(theme=gr.themes.Base(), title='Mangio-RVC-Web 💻') as app:
1626
  with gr.Row():
1627
  status_bar=gr.Textbox(label="")
1628
  download_button.click(fn=download_from_url, inputs=[url, model], outputs=[status_bar])
1629
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1630
  app.queue(concurrency_count=511, max_size=1022).launch(share=False, quiet=True)
1631
  #endregion
 
450
  cpt = None
451
  return {"visible": False, "__type__": "update"}
452
  person = "%s/%s" % (weight_root, sid)
453
+ print("loading %s" % person)
454
  cpt = torch.load(person, map_location="cpu")
455
  tgt_sr = cpt["config"][-1]
456
  cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
 
1582
  label="Mangio-Crepe Hop Length. Более высокие числа уменьшат вероятность экстремального изменения высоты тона, но более низкие числа увеличат точность. 64-192 - хороший диапазон для экспериментов.",
1583
  value=120,
1584
  interactive=True,
1585
+ visible=False
1586
  )
1587
  f0method0.change(fn=whethercrepeornah, inputs=[f0method0], outputs=[crepe_hop_length])
1588
  filter_radius0 = gr.Slider(
 
1617
  step=0.01,
1618
  interactive=True,
1619
  )
1620
+
1621
 
1622
+ with gr.Accordion("Batch Conversion",open=False):
1623
+ with gr.Row():
1624
+ with gr.Column():
1625
+ vc_transform1 = gr.Number(
1626
+ label=i18n("变调(整数, 半音数量, 升八度12降八度-12)"), value=0
1627
+ )
1628
+ opt_input = gr.Textbox(label=i18n("指定输出文件夹"), value="opt")
1629
+ f0method1 = gr.Radio(
1630
+ label=i18n(
1631
+ "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU"
1632
+ ),
1633
+ choices=["pm", "harvest", "crepe", "rmvpe"],
1634
+ value="rmvpe",
1635
+ interactive=True,
1636
+ )
1637
+ filter_radius1 = gr.Slider(
1638
+ minimum=0,
1639
+ maximum=7,
1640
+ label=i18n(">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音"),
1641
+ value=3,
1642
+ step=1,
1643
+ interactive=True,
1644
+ )
1645
+ with gr.Column():
1646
+ file_index3 = gr.Textbox(
1647
+ label=i18n("特征检索库文件路径,为空则使用下拉的选择结果"),
1648
+ value="",
1649
+ interactive=True,
1650
+ )
1651
+ file_index4 = gr.Dropdown(
1652
+ label=i18n("自动检测index路径,下拉式选择(dropdown)"),
1653
+ choices=sorted(index_paths),
1654
+ interactive=True,
1655
+ )
1656
+ refresh_button.click(
1657
+ fn=lambda: change_choices()[1],
1658
+ inputs=[],
1659
+ outputs=file_index4,
1660
+ )
1661
+ # file_big_npy2 = gr.Textbox(
1662
+ # label=i18n("特征文件路径"),
1663
+ # value="E:\\codes\\py39\\vits_vc_gpu_train\\logs\\mi-test-1key\\total_fea.npy",
1664
+ # interactive=True,
1665
+ # )
1666
+ index_rate2 = gr.Slider(
1667
+ minimum=0,
1668
+ maximum=1,
1669
+ label=i18n("检索特征占比"),
1670
+ value=1,
1671
+ interactive=True,
1672
+ )
1673
+ with gr.Column():
1674
+ resample_sr1 = gr.Slider(
1675
+ minimum=0,
1676
+ maximum=48000,
1677
+ label=i18n("后处理重采样至最终采样率,0为不进行重采样"),
1678
+ value=0,
1679
+ step=1,
1680
+ interactive=True,
1681
+ )
1682
+ rms_mix_rate1 = gr.Slider(
1683
+ minimum=0,
1684
+ maximum=1,
1685
+ label=i18n("输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络"),
1686
+ value=1,
1687
+ interactive=True,
1688
+ )
1689
+ protect1 = gr.Slider(
1690
+ minimum=0,
1691
+ maximum=0.5,
1692
+ label=i18n(
1693
+ "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果"
1694
+ ),
1695
+ value=0.33,
1696
+ step=0.01,
1697
+ interactive=True,
1698
+ )
1699
+ with gr.Column():
1700
+ dir_input = gr.Textbox(
1701
+ label=i18n("输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)"),
1702
+ value="E:\codes\py39\\test-20230416b\\todo-songs",
1703
+ )
1704
+ inputs = gr.File(
1705
+ file_count="multiple", label=i18n("也可批量输入音频文件, 二选一, 优先读文件夹")
1706
+ )
1707
+ with gr.Row():
1708
+ format1 = gr.Radio(
1709
+ label=i18n("导出文件格式"),
1710
+ choices=["wav", "flac", "mp3", "m4a"],
1711
+ value="flac",
1712
+ interactive=True,
1713
+ )
1714
+ but1 = gr.Button(i18n("转换"), variant="primary")
1715
+ vc_output3 = gr.Textbox(label=i18n("输出信息"))
1716
+ but1.click(
1717
+ vc_multi,
1718
+ [
1719
+ spk_item,
1720
+ dir_input,
1721
+ opt_input,
1722
+ inputs,
1723
+ vc_transform1,
1724
+ f0method1,
1725
+ file_index3,
1726
+ file_index4,
1727
+ # file_big_npy2,
1728
+ index_rate2,
1729
+ filter_radius1,
1730
+ resample_sr1,
1731
+ rms_mix_rate1,
1732
+ protect1,
1733
+ format1,
1734
+ crepe_hop_length,
1735
+ ],
1736
+ [vc_output3],
1737
+ )
1738
+ but1.click(fn=lambda: easy_uploader.clear())
1739
  with gr.TabItem("Загрузить модель"):
1740
  with gr.Row():
1741
  url=gr.Textbox(label="Введите URL-адрес модели:")
 
1745
  with gr.Row():
1746
  status_bar=gr.Textbox(label="")
1747
  download_button.click(fn=download_from_url, inputs=[url, model], outputs=[status_bar])
1748
+
1749
+ def has_two_files_in_pretrained_folder():
1750
+ pretrained_folder = "./pretrained/"
1751
+ if not os.path.exists(pretrained_folder):
1752
+ return False
1753
+
1754
+ files_in_folder = os.listdir(pretrained_folder)
1755
+ num_files = len(files_in_folder)
1756
+ return num_files >= 2
1757
+
1758
+ if has_two_files_in_pretrained_folder():
1759
+ print("Pretrained weights are downloaded. Training tab enabled!\n-------------------------------")
1760
+ with gr.TabItem("Train", visible=False):
1761
+ with gr.Row():
1762
+ with gr.Column():
1763
+ exp_dir1 = gr.Textbox(label="Voice Name:", value="My-Voice")
1764
+ sr2 = gr.Radio(
1765
+ label=i18n("目标采样率"),
1766
+ choices=["40k", "48k"],
1767
+ value="40k",
1768
+ interactive=True,
1769
+ visible=False
1770
+ )
1771
+ if_f0_3 = gr.Radio(
1772
+ label=i18n("模型是否带音高指导(唱歌一定要, 语音可以不要)"),
1773
+ choices=[True, False],
1774
+ value=True,
1775
+ interactive=True,
1776
+ visible=False
1777
+ )
1778
+ version19 = gr.Radio(
1779
+ label="RVC version",
1780
+ choices=["v1", "v2"],
1781
+ value="v2",
1782
+ interactive=True,
1783
+ visible=False,
1784
+ )
1785
+ np7 = gr.Slider(
1786
+ minimum=0,
1787
+ maximum=config.n_cpu,
1788
+ step=1,
1789
+ label="# of CPUs for data processing (Leave as it is)",
1790
+ value=config.n_cpu,
1791
+ interactive=True,
1792
+ visible=True
1793
+ )
1794
+ trainset_dir4 = gr.Textbox(label="Path to your dataset (audios, not zip):", value="./dataset")
1795
+ easy_uploader = gr.Files(label='OR Drop your audios here. They will be uploaded in your dataset path above.',file_types=['audio'])
1796
+ but1 = gr.Button("1. Process The Dataset", variant="primary")
1797
+ info1 = gr.Textbox(label="Status (wait until it says 'end preprocess'):", value="")
1798
+ easy_uploader.upload(fn=upload_to_dataset, inputs=[easy_uploader, trainset_dir4], outputs=[info1])
1799
+ but1.click(
1800
+ preprocess_dataset, [trainset_dir4, exp_dir1, sr2, np7], [info1]
1801
+ )
1802
+ with gr.Column():
1803
+ spk_id5 = gr.Slider(
1804
+ minimum=0,
1805
+ maximum=4,
1806
+ step=1,
1807
+ label=i18n("请指定说话人id"),
1808
+ value=0,
1809
+ interactive=True,
1810
+ visible=False
1811
+ )
1812
+ with gr.Accordion('GPU Settings', open=False, visible=False):
1813
+ gpus6 = gr.Textbox(
1814
+ label=i18n("以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2"),
1815
+ value=gpus,
1816
+ interactive=True,
1817
+ visible=False
1818
+ )
1819
+ gpu_info9 = gr.Textbox(label=i18n("显卡信息"), value=gpu_info)
1820
+ f0method8 = gr.Radio(
1821
+ label=i18n(
1822
+ "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢"
1823
+ ),
1824
+ choices=["harvest","crepe", "mangio-crepe", "rmvpe"], # Fork feature: Crepe on f0 extraction for training.
1825
+ value="rmvpe",
1826
+ interactive=True,
1827
+ )
1828
+
1829
+ extraction_crepe_hop_length = gr.Slider(
1830
+ minimum=1,
1831
+ maximum=512,
1832
+ step=1,
1833
+ label=i18n("crepe_hop_length"),
1834
+ value=128,
1835
+ interactive=True,
1836
+ visible=False,
1837
+ )
1838
+ f0method8.change(fn=whethercrepeornah, inputs=[f0method8], outputs=[extraction_crepe_hop_length])
1839
+ but2 = gr.Button("2. Pitch Extraction", variant="primary")
1840
+ info2 = gr.Textbox(label="Status(Check the Colab Notebook's cell output):", value="", max_lines=8)
1841
+ but2.click(
1842
+ extract_f0_feature,
1843
+ [gpus6, np7, f0method8, if_f0_3, exp_dir1, version19, extraction_crepe_hop_length],
1844
+ [info2],
1845
+ )
1846
+ with gr.Row():
1847
+ with gr.Column():
1848
+ total_epoch11 = gr.Slider(
1849
+ minimum=1,
1850
+ maximum=5000,
1851
+ step=10,
1852
+ label="Total # of training epochs (IF you choose a value too high, your model will sound horribly overtrained.):",
1853
+ value=250,
1854
+ interactive=True,
1855
+ )
1856
+ butstop = gr.Button(
1857
+ "Stop Training",
1858
+ variant='primary',
1859
+ visible=False,
1860
+ )
1861
+ but3 = gr.Button("3. Train Model", variant="primary", visible=True)
1862
+
1863
+ but3.click(fn=stoptraining, inputs=[gr.Number(value=0, visible=False)], outputs=[but3, butstop])
1864
+ butstop.click(fn=stoptraining, inputs=[gr.Number(value=1, visible=False)], outputs=[butstop, but3])
1865
+
1866
+
1867
+ but4 = gr.Button("4.Train Index", variant="primary")
1868
+ info3 = gr.Textbox(label="Status(Check the Colab Notebook's cell output):", value="", max_lines=10)
1869
+ with gr.Accordion("Training Preferences (You can leave these as they are)", open=False):
1870
+ #gr.Markdown(value=i18n("step3: 填写训练设置, 开始训练模型和索引"))
1871
+ with gr.Column():
1872
+ save_epoch10 = gr.Slider(
1873
+ minimum=1,
1874
+ maximum=200,
1875
+ step=1,
1876
+ label="Backup every X amount of epochs:",
1877
+ value=10,
1878
+ interactive=True,
1879
+ )
1880
+ batch_size12 = gr.Slider(
1881
+ minimum=1,
1882
+ maximum=40,
1883
+ step=1,
1884
+ label="Batch Size (LEAVE IT unless you know what you're doing!):",
1885
+ value=default_batch_size,
1886
+ interactive=True,
1887
+ )
1888
+ if_save_latest13 = gr.Checkbox(
1889
+ label="Save only the latest '.ckpt' file to save disk space.",
1890
+ value=True,
1891
+ interactive=True,
1892
+ )
1893
+ if_cache_gpu17 = gr.Checkbox(
1894
+ label="Cache all training sets to GPU memory. Caching small datasets (less than 10 minutes) can speed up training, but caching large datasets will consume a lot of GPU memory and may not provide much speed improvement.",
1895
+ value=False,
1896
+ interactive=True,
1897
+ )
1898
+ if_save_every_weights18 = gr.Checkbox(
1899
+ label="Save a small final model to the 'weights' folder at each save point.",
1900
+ value=True,
1901
+ interactive=True,
1902
+ )
1903
+ zip_model = gr.Button('5. Download Model')
1904
+ zipped_model = gr.Files(label='Your Model and Index file can be downloaded here:')
1905
+ zip_model.click(fn=zip_downloader, inputs=[exp_dir1], outputs=[zipped_model, info3])
1906
+ with gr.Group():
1907
+ with gr.Accordion("Base Model Locations:", open=False, visible=False):
1908
+ pretrained_G14 = gr.Textbox(
1909
+ label=i18n("加载预训练底模G路径"),
1910
+ value="pretrained_v2/f0G40k.pth",
1911
+ interactive=True,
1912
+ )
1913
+ pretrained_D15 = gr.Textbox(
1914
+ label=i18n("加载预训练底模D路径"),
1915
+ value="pretrained_v2/f0D40k.pth",
1916
+ interactive=True,
1917
+ )
1918
+ gpus16 = gr.Textbox(
1919
+ label=i18n("以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2"),
1920
+ value=gpus,
1921
+ interactive=True,
1922
+ )
1923
+ sr2.change(
1924
+ change_sr2,
1925
+ [sr2, if_f0_3, version19],
1926
+ [pretrained_G14, pretrained_D15, version19],
1927
+ )
1928
+ version19.change(
1929
+ change_version19,
1930
+ [sr2, if_f0_3, version19],
1931
+ [pretrained_G14, pretrained_D15],
1932
+ )
1933
+ if_f0_3.change(
1934
+ change_f0,
1935
+ [if_f0_3, sr2, version19],
1936
+ [f0method8, pretrained_G14, pretrained_D15],
1937
+ )
1938
+ but5 = gr.Button(i18n("一键训练"), variant="primary", visible=False)
1939
+ but3.click(
1940
+ click_train,
1941
+ [
1942
+ exp_dir1,
1943
+ sr2,
1944
+ if_f0_3,
1945
+ spk_id5,
1946
+ save_epoch10,
1947
+ total_epoch11,
1948
+ batch_size12,
1949
+ if_save_latest13,
1950
+ pretrained_G14,
1951
+ pretrained_D15,
1952
+ gpus16,
1953
+ if_cache_gpu17,
1954
+ if_save_every_weights18,
1955
+ version19,
1956
+ ],
1957
+ [
1958
+ info3,
1959
+ butstop,
1960
+ but3,
1961
+ ],
1962
+ )
1963
+ but4.click(train_index, [exp_dir1, version19], info3)
1964
+ but5.click(
1965
+ train1key,
1966
+ [
1967
+ exp_dir1,
1968
+ sr2,
1969
+ if_f0_3,
1970
+ trainset_dir4,
1971
+ spk_id5,
1972
+ np7,
1973
+ f0method8,
1974
+ save_epoch10,
1975
+ total_epoch11,
1976
+ batch_size12,
1977
+ if_save_latest13,
1978
+ pretrained_G14,
1979
+ pretrained_D15,
1980
+ gpus16,
1981
+ if_cache_gpu17,
1982
+ if_save_every_weights18,
1983
+ version19,
1984
+ extraction_crepe_hop_length
1985
+ ],
1986
+ info3,
1987
+ )
1988
+
1989
+ else:
1990
+ print(
1991
+ "Pretrained weights not downloaded. Disabling training tab.\n"
1992
+ "Wondering how to train a voice? Visit here for the RVC model training guide: https://t.ly/RVC_Training_Guide\n"
1993
+ "-------------------------------\n"
1994
+ )
1995
+
1996
  app.queue(concurrency_count=511, max_size=1022).launch(share=False, quiet=True)
1997
  #endregion