_Noxty commited on
Commit
299dd8b
·
verified ·
1 Parent(s): 686d062

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -1021
app.py CHANGED
@@ -529,176 +529,6 @@ def if_done_multi(done, ps):
529
  done[0] = True
530
 
531
 
532
- def preprocess_dataset(trainset_dir, exp_dir, sr, n_p):
533
- sr = sr_dict[sr]
534
- os.makedirs("%s/logs/%s" % (now_dir, exp_dir), exist_ok=True)
535
- f = open("%s/logs/%s/preprocess.log" % (now_dir, exp_dir), "w")
536
- f.close()
537
- cmd = (
538
- config.python_cmd
539
- + " trainset_preprocess_pipeline_print.py %s %s %s %s/logs/%s "
540
- % (trainset_dir, sr, n_p, now_dir, exp_dir)
541
- + str(config.noparallel)
542
- )
543
- print(cmd)
544
- p = Popen(cmd, shell=True) # , stdin=PIPE, stdout=PIPE,stderr=PIPE,cwd=now_dir
545
- ###煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读
546
- done = [False]
547
- threading.Thread(
548
- target=if_done,
549
- args=(
550
- done,
551
- p,
552
- ),
553
- ).start()
554
- while 1:
555
- with open("%s/logs/%s/preprocess.log" % (now_dir, exp_dir), "r") as f:
556
- yield (f.read())
557
- sleep(1)
558
- if done[0] == True:
559
- break
560
- with open("%s/logs/%s/preprocess.log" % (now_dir, exp_dir), "r") as f:
561
- log = f.read()
562
- print(log)
563
- yield log
564
-
565
- # but2.click(extract_f0,[gpus6,np7,f0method8,if_f0_3,trainset_dir4],[info2])
566
- def extract_f0_feature(gpus, n_p, f0method, if_f0, exp_dir, version19, echl):
567
- gpus = gpus.split("-")
568
- os.makedirs("%s/logs/%s" % (now_dir, exp_dir), exist_ok=True)
569
- f = open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "w")
570
- f.close()
571
- if if_f0:
572
- cmd = config.python_cmd + " extract_f0_print.py %s/logs/%s %s %s %s" % (
573
- now_dir,
574
- exp_dir,
575
- n_p,
576
- f0method,
577
- echl,
578
- )
579
- print(cmd)
580
- p = Popen(cmd, shell=True, cwd=now_dir) # , stdin=PIPE, stdout=PIPE,stderr=PIPE
581
- ###煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读
582
- done = [False]
583
- threading.Thread(
584
- target=if_done,
585
- args=(
586
- done,
587
- p,
588
- ),
589
- ).start()
590
- while 1:
591
- with open(
592
- "%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "r"
593
- ) as f:
594
- yield (f.read())
595
- sleep(1)
596
- if done[0] == True:
597
- break
598
- with open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "r") as f:
599
- log = f.read()
600
- print(log)
601
- yield log
602
- ####对不同part分别开多进程
603
- """
604
- n_part=int(sys.argv[1])
605
- i_part=int(sys.argv[2])
606
- i_gpu=sys.argv[3]
607
- exp_dir=sys.argv[4]
608
- os.environ["CUDA_VISIBLE_DEVICES"]=str(i_gpu)
609
- """
610
- leng = len(gpus)
611
- ps = []
612
- for idx, n_g in enumerate(gpus):
613
- cmd = (
614
- config.python_cmd
615
- + " extract_feature_print.py %s %s %s %s %s/logs/%s %s"
616
- % (
617
- config.device,
618
- leng,
619
- idx,
620
- n_g,
621
- now_dir,
622
- exp_dir,
623
- version19,
624
- )
625
- )
626
- print(cmd)
627
- p = Popen(
628
- cmd, shell=True, cwd=now_dir
629
- ) # , shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=now_dir
630
- ps.append(p)
631
- ###煞笔gr, popen read都非得全跑完了再一次性读取, 不用gr就正常读一句输出一句;只能额外弄出一个文本流定时读
632
- done = [False]
633
- threading.Thread(
634
- target=if_done_multi,
635
- args=(
636
- done,
637
- ps,
638
- ),
639
- ).start()
640
- while 1:
641
- with open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "r") as f:
642
- yield (f.read())
643
- sleep(1)
644
- if done[0] == True:
645
- break
646
- with open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "r") as f:
647
- log = f.read()
648
- print(log)
649
- yield log
650
-
651
-
652
- def change_sr2(sr2, if_f0_3, version19):
653
- path_str = "" if version19 == "v1" else "_v2"
654
- f0_str = "f0" if if_f0_3 else ""
655
- if_pretrained_generator_exist = os.access("pretrained%s/%sG%s.pth" % (path_str, f0_str, sr2), os.F_OK)
656
- if_pretrained_discriminator_exist = os.access("pretrained%s/%sD%s.pth" % (path_str, f0_str, sr2), os.F_OK)
657
- if (if_pretrained_generator_exist == False):
658
- print("pretrained%s/%sG%s.pth" % (path_str, f0_str, sr2), "not exist, will not use pretrained model")
659
- if (if_pretrained_discriminator_exist == False):
660
- print("pretrained%s/%sD%s.pth" % (path_str, f0_str, sr2), "not exist, will not use pretrained model")
661
- return (
662
- ("pretrained%s/%sG%s.pth" % (path_str, f0_str, sr2)) if if_pretrained_generator_exist else "",
663
- ("pretrained%s/%sD%s.pth" % (path_str, f0_str, sr2)) if if_pretrained_discriminator_exist else "",
664
- {"visible": True, "__type__": "update"}
665
- )
666
-
667
- def change_version19(sr2, if_f0_3, version19):
668
- path_str = "" if version19 == "v1" else "_v2"
669
- f0_str = "f0" if if_f0_3 else ""
670
- if_pretrained_generator_exist = os.access("pretrained%s/%sG%s.pth" % (path_str, f0_str, sr2), os.F_OK)
671
- if_pretrained_discriminator_exist = os.access("pretrained%s/%sD%s.pth" % (path_str, f0_str, sr2), os.F_OK)
672
- if (if_pretrained_generator_exist == False):
673
- print("pretrained%s/%sG%s.pth" % (path_str, f0_str, sr2), "not exist, will not use pretrained model")
674
- if (if_pretrained_discriminator_exist == False):
675
- print("pretrained%s/%sD%s.pth" % (path_str, f0_str, sr2), "not exist, will not use pretrained model")
676
- return (
677
- ("pretrained%s/%sG%s.pth" % (path_str, f0_str, sr2)) if if_pretrained_generator_exist else "",
678
- ("pretrained%s/%sD%s.pth" % (path_str, f0_str, sr2)) if if_pretrained_discriminator_exist else "",
679
- )
680
-
681
-
682
- def change_f0(if_f0_3, sr2, version19): # f0method8,pretrained_G14,pretrained_D15
683
- path_str = "" if version19 == "v1" else "_v2"
684
- if_pretrained_generator_exist = os.access("pretrained%s/f0G%s.pth" % (path_str, sr2), os.F_OK)
685
- if_pretrained_discriminator_exist = os.access("pretrained%s/f0D%s.pth" % (path_str, sr2), os.F_OK)
686
- if (if_pretrained_generator_exist == False):
687
- print("pretrained%s/f0G%s.pth" % (path_str, sr2), "not exist, will not use pretrained model")
688
- if (if_pretrained_discriminator_exist == False):
689
- print("pretrained%s/f0D%s.pth" % (path_str, sr2), "not exist, will not use pretrained model")
690
- if if_f0_3:
691
- return (
692
- {"visible": True, "__type__": "update"},
693
- "pretrained%s/f0G%s.pth" % (path_str, sr2) if if_pretrained_generator_exist else "",
694
- "pretrained%s/f0D%s.pth" % (path_str, sr2) if if_pretrained_discriminator_exist else "",
695
- )
696
- return (
697
- {"visible": False, "__type__": "update"},
698
- ("pretrained%s/G%s.pth" % (path_str, sr2)) if if_pretrained_generator_exist else "",
699
- ("pretrained%s/D%s.pth" % (path_str, sr2)) if if_pretrained_discriminator_exist else "",
700
- )
701
-
702
 
703
  global log_interval
704
 
@@ -717,446 +547,9 @@ def set_log_interval(exp_dir, batch_size12):
717
  log_interval += 1
718
  return log_interval
719
 
720
- # but3.click(click_train,[exp_dir1,sr2,if_f0_3,save_epoch10,total_epoch11,batch_size12,if_save_latest13,pretrained_G14,pretrained_D15,gpus16])
721
- def click_train(
722
- exp_dir1,
723
- sr2,
724
- if_f0_3,
725
- spk_id5,
726
- save_epoch10,
727
- total_epoch11,
728
- batch_size12,
729
- if_save_latest13,
730
- pretrained_G14,
731
- pretrained_D15,
732
- gpus16,
733
- if_cache_gpu17,
734
- if_save_every_weights18,
735
- version19,
736
- ):
737
- CSVutil('csvdb/stop.csv', 'w+', 'formanting', False)
738
- # 生成filelist
739
- exp_dir = "%s/logs/%s" % (now_dir, exp_dir1)
740
- os.makedirs(exp_dir, exist_ok=True)
741
- gt_wavs_dir = "%s/0_gt_wavs" % (exp_dir)
742
- feature_dir = (
743
- "%s/3_feature256" % (exp_dir)
744
- if version19 == "v1"
745
- else "%s/3_feature768" % (exp_dir)
746
- )
747
-
748
- log_interval = set_log_interval(exp_dir, batch_size12)
749
-
750
- if if_f0_3:
751
- f0_dir = "%s/2a_f0" % (exp_dir)
752
- f0nsf_dir = "%s/2b-f0nsf" % (exp_dir)
753
- names = (
754
- set([name.split(".")[0] for name in os.listdir(gt_wavs_dir)])
755
- & set([name.split(".")[0] for name in os.listdir(feature_dir)])
756
- & set([name.split(".")[0] for name in os.listdir(f0_dir)])
757
- & set([name.split(".")[0] for name in os.listdir(f0nsf_dir)])
758
- )
759
- else:
760
- names = set([name.split(".")[0] for name in os.listdir(gt_wavs_dir)]) & set(
761
- [name.split(".")[0] for name in os.listdir(feature_dir)]
762
- )
763
- opt = []
764
- for name in names:
765
- if if_f0_3:
766
- opt.append(
767
- "%s/%s.wav|%s/%s.npy|%s/%s.wav.npy|%s/%s.wav.npy|%s"
768
- % (
769
- gt_wavs_dir.replace("\\", "\\\\"),
770
- name,
771
- feature_dir.replace("\\", "\\\\"),
772
- name,
773
- f0_dir.replace("\\", "\\\\"),
774
- name,
775
- f0nsf_dir.replace("\\", "\\\\"),
776
- name,
777
- spk_id5,
778
- )
779
- )
780
- else:
781
- opt.append(
782
- "%s/%s.wav|%s/%s.npy|%s"
783
- % (
784
- gt_wavs_dir.replace("\\", "\\\\"),
785
- name,
786
- feature_dir.replace("\\", "\\\\"),
787
- name,
788
- spk_id5,
789
- )
790
- )
791
- fea_dim = 256 if version19 == "v1" else 768
792
- if if_f0_3:
793
- for _ in range(2):
794
- opt.append(
795
- "%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature%s/mute.npy|%s/logs/mute/2a_f0/mute.wav.npy|%s/logs/mute/2b-f0nsf/mute.wav.npy|%s"
796
- % (now_dir, sr2, now_dir, fea_dim, now_dir, now_dir, spk_id5)
797
- )
798
- else:
799
- for _ in range(2):
800
- opt.append(
801
- "%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature%s/mute.npy|%s"
802
- % (now_dir, sr2, now_dir, fea_dim, spk_id5)
803
- )
804
- shuffle(opt)
805
- with open("%s/filelist.txt" % exp_dir, "w") as f:
806
- f.write("\n".join(opt))
807
- print("write filelist done")
808
- # 生成config#无需生成config
809
- # cmd = python_cmd + " train_nsf_sim_cache_sid_load_pretrain.py -e mi-test -sr 40k -f0 1 -bs 4 -g 0 -te 10 -se 5 -pg pretrained/f0G40k.pth -pd pretrained/f0D40k.pth -l 1 -c 0"
810
- print("use gpus:", gpus16)
811
- if pretrained_G14 == "":
812
- print("no pretrained Generator")
813
- if pretrained_D15 == "":
814
- print("no pretrained Discriminator")
815
- if gpus16:
816
- cmd = (
817
- config.python_cmd
818
- + " train_nsf_sim_cache_sid_load_pretrain.py -e %s -sr %s -f0 %s -bs %s -g %s -te %s -se %s %s %s -l %s -c %s -sw %s -v %s -li %s"
819
- % (
820
- exp_dir1,
821
- sr2,
822
- 1 if if_f0_3 else 0,
823
- batch_size12,
824
- gpus16,
825
- total_epoch11,
826
- save_epoch10,
827
- ("-pg %s" % pretrained_G14) if pretrained_G14 != "" else "",
828
- ("-pd %s" % pretrained_D15) if pretrained_D15 != "" else "",
829
- 1 if if_save_latest13 == True else 0,
830
- 1 if if_cache_gpu17 == True else 0,
831
- 1 if if_save_every_weights18 == True else 0,
832
- version19,
833
- log_interval,
834
- )
835
- )
836
- else:
837
- cmd = (
838
- config.python_cmd
839
- + " train_nsf_sim_cache_sid_load_pretrain.py -e %s -sr %s -f0 %s -bs %s -te %s -se %s %s %s -l %s -c %s -sw %s -v %s -li %s"
840
- % (
841
- exp_dir1,
842
- sr2,
843
- 1 if if_f0_3 else 0,
844
- batch_size12,
845
- total_epoch11,
846
- save_epoch10,
847
- ("-pg %s" % pretrained_G14) if pretrained_G14 != "" else "\b",
848
- ("-pd %s" % pretrained_D15) if pretrained_D15 != "" else "\b",
849
- 1 if if_save_latest13 == True else 0,
850
- 1 if if_cache_gpu17 == True else 0,
851
- 1 if if_save_every_weights18 == True else 0,
852
- version19,
853
- log_interval,
854
- )
855
- )
856
- print(cmd)
857
- p = Popen(cmd, shell=True, cwd=now_dir)
858
- global PID
859
- PID = p.pid
860
- p.wait()
861
- return ("训练结束, 您可查看控制台训练日志或实验文件夹下的train.log", {"visible": False, "__type__": "update"}, {"visible": True, "__type__": "update"})
862
-
863
-
864
- # but4.click(train_index, [exp_dir1], info3)
865
- def train_index(exp_dir1, version19):
866
- exp_dir = "%s/logs/%s" % (now_dir, exp_dir1)
867
- os.makedirs(exp_dir, exist_ok=True)
868
- feature_dir = (
869
- "%s/3_feature256" % (exp_dir)
870
- if version19 == "v1"
871
- else "%s/3_feature768" % (exp_dir)
872
- )
873
- if os.path.exists(feature_dir) == False:
874
- return "请先进行特征提取!"
875
- listdir_res = list(os.listdir(feature_dir))
876
- if len(listdir_res) == 0:
877
- return "请先进行特征提取!"
878
- npys = []
879
- for name in sorted(listdir_res):
880
- phone = np.load("%s/%s" % (feature_dir, name))
881
- npys.append(phone)
882
- big_npy = np.concatenate(npys, 0)
883
- big_npy_idx = np.arange(big_npy.shape[0])
884
- np.random.shuffle(big_npy_idx)
885
- big_npy = big_npy[big_npy_idx]
886
- np.save("%s/total_fea.npy" % exp_dir, big_npy)
887
- # n_ivf = big_npy.shape[0] // 39
888
- n_ivf = min(int(16 * np.sqrt(big_npy.shape[0])), big_npy.shape[0] // 39)
889
- infos = []
890
- infos.append("%s,%s" % (big_npy.shape, n_ivf))
891
- yield "\n".join(infos)
892
- index = faiss.index_factory(256 if version19 == "v1" else 768, "IVF%s,Flat" % n_ivf)
893
- # index = faiss.index_factory(256if version19=="v1"else 768, "IVF%s,PQ128x4fs,RFlat"%n_ivf)
894
- infos.append("training")
895
- yield "\n".join(infos)
896
- index_ivf = faiss.extract_index_ivf(index) #
897
- index_ivf.nprobe = 1
898
- index.train(big_npy)
899
- faiss.write_index(
900
- index,
901
- "%s/trained_IVF%s_Flat_nprobe_%s_%s_%s.index"
902
- % (exp_dir, n_ivf, index_ivf.nprobe, exp_dir1, version19),
903
- )
904
- # faiss.write_index(index, '%s/trained_IVF%s_Flat_FastScan_%s.index'%(exp_dir,n_ivf,version19))
905
- infos.append("adding")
906
- yield "\n".join(infos)
907
- batch_size_add = 8192
908
- for i in range(0, big_npy.shape[0], batch_size_add):
909
- index.add(big_npy[i : i + batch_size_add])
910
- faiss.write_index(
911
- index,
912
- "%s/added_IVF%s_Flat_nprobe_%s_%s_%s.index"
913
- % (exp_dir, n_ivf, index_ivf.nprobe, exp_dir1, version19),
914
- )
915
- infos.append(
916
- "成功构建索引,added_IVF%s_Flat_nprobe_%s_%s_%s.index"
917
- % (n_ivf, index_ivf.nprobe, exp_dir1, version19)
918
- )
919
- # faiss.write_index(index, '%s/added_IVF%s_Flat_FastScan_%s.index'%(exp_dir,n_ivf,version19))
920
- # infos.append("成功构建索引,added_IVF%s_Flat_FastScan_%s.index"%(n_ivf,version19))
921
- yield "\n".join(infos)
922
-
923
 
924
  # but5.click(train1key, [exp_dir1, sr2, if_f0_3, trainset_dir4, spk_id5, gpus6, np7, f0method8, save_epoch10, total_epoch11, batch_size12, if_save_latest13, pretrained_G14, pretrained_D15, gpus16, if_cache_gpu17], info3)
925
- def train1key(
926
- exp_dir1,
927
- sr2,
928
- if_f0_3,
929
- trainset_dir4,
930
- spk_id5,
931
- np7,
932
- f0method8,
933
- save_epoch10,
934
- total_epoch11,
935
- batch_size12,
936
- if_save_latest13,
937
- pretrained_G14,
938
- pretrained_D15,
939
- gpus16,
940
- if_cache_gpu17,
941
- if_save_every_weights18,
942
- version19,
943
- echl
944
- ):
945
- infos = []
946
-
947
- def get_info_str(strr):
948
- infos.append(strr)
949
- return "\n".join(infos)
950
-
951
- model_log_dir = "%s/logs/%s" % (now_dir, exp_dir1)
952
- preprocess_log_path = "%s/preprocess.log" % model_log_dir
953
- extract_f0_feature_log_path = "%s/extract_f0_feature.log" % model_log_dir
954
- gt_wavs_dir = "%s/0_gt_wavs" % model_log_dir
955
- feature_dir = (
956
- "%s/3_feature256" % model_log_dir
957
- if version19 == "v1"
958
- else "%s/3_feature768" % model_log_dir
959
- )
960
 
961
- os.makedirs(model_log_dir, exist_ok=True)
962
- #########step1:处理数据
963
- open(preprocess_log_path, "w").close()
964
- cmd = (
965
- config.python_cmd
966
- + " trainset_preprocess_pipeline_print.py %s %s %s %s "
967
- % (trainset_dir4, sr_dict[sr2], np7, model_log_dir)
968
- + str(config.noparallel)
969
- )
970
- yield get_info_str(i18n("step1:正在处理数据"))
971
- yield get_info_str(cmd)
972
- p = Popen(cmd, shell=True)
973
- p.wait()
974
- with open(preprocess_log_path, "r") as f:
975
- print(f.read())
976
- #########step2a:提取音高
977
- open(extract_f0_feature_log_path, "w")
978
- if if_f0_3:
979
- yield get_info_str("step2a:正在提取音高")
980
- cmd = config.python_cmd + " extract_f0_print.py %s %s %s %s" % (
981
- model_log_dir,
982
- np7,
983
- f0method8,
984
- echl
985
- )
986
- yield get_info_str(cmd)
987
- p = Popen(cmd, shell=True, cwd=now_dir)
988
- p.wait()
989
- with open(extract_f0_feature_log_path, "r") as f:
990
- print(f.read())
991
- else:
992
- yield get_info_str(i18n("step2a:无需提取音高"))
993
- #######step2b:提取特征
994
- yield get_info_str(i18n("step2b:正在提取特征"))
995
- gpus = gpus16.split("-")
996
- leng = len(gpus)
997
- ps = []
998
- for idx, n_g in enumerate(gpus):
999
- cmd = config.python_cmd + " extract_feature_print.py %s %s %s %s %s %s" % (
1000
- config.device,
1001
- leng,
1002
- idx,
1003
- n_g,
1004
- model_log_dir,
1005
- version19,
1006
- )
1007
- yield get_info_str(cmd)
1008
- p = Popen(
1009
- cmd, shell=True, cwd=now_dir
1010
- ) # , shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=now_dir
1011
- ps.append(p)
1012
- for p in ps:
1013
- p.wait()
1014
- with open(extract_f0_feature_log_path, "r") as f:
1015
- print(f.read())
1016
- #######step3a:训练模型
1017
- yield get_info_str(i18n("step3a:正在训练模型"))
1018
- # 生成filelist
1019
- if if_f0_3:
1020
- f0_dir = "%s/2a_f0" % model_log_dir
1021
- f0nsf_dir = "%s/2b-f0nsf" % model_log_dir
1022
- names = (
1023
- set([name.split(".")[0] for name in os.listdir(gt_wavs_dir)])
1024
- & set([name.split(".")[0] for name in os.listdir(feature_dir)])
1025
- & set([name.split(".")[0] for name in os.listdir(f0_dir)])
1026
- & set([name.split(".")[0] for name in os.listdir(f0nsf_dir)])
1027
- )
1028
- else:
1029
- names = set([name.split(".")[0] for name in os.listdir(gt_wavs_dir)]) & set(
1030
- [name.split(".")[0] for name in os.listdir(feature_dir)]
1031
- )
1032
- opt = []
1033
- for name in names:
1034
- if if_f0_3:
1035
- opt.append(
1036
- "%s/%s.wav|%s/%s.npy|%s/%s.wav.npy|%s/%s.wav.npy|%s"
1037
- % (
1038
- gt_wavs_dir.replace("\\", "\\\\"),
1039
- name,
1040
- feature_dir.replace("\\", "\\\\"),
1041
- name,
1042
- f0_dir.replace("\\", "\\\\"),
1043
- name,
1044
- f0nsf_dir.replace("\\", "\\\\"),
1045
- name,
1046
- spk_id5,
1047
- )
1048
- )
1049
- else:
1050
- opt.append(
1051
- "%s/%s.wav|%s/%s.npy|%s"
1052
- % (
1053
- gt_wavs_dir.replace("\\", "\\\\"),
1054
- name,
1055
- feature_dir.replace("\\", "\\\\"),
1056
- name,
1057
- spk_id5,
1058
- )
1059
- )
1060
- fea_dim = 256 if version19 == "v1" else 768
1061
- if if_f0_3:
1062
- for _ in range(2):
1063
- opt.append(
1064
- "%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature%s/mute.npy|%s/logs/mute/2a_f0/mute.wav.npy|%s/logs/mute/2b-f0nsf/mute.wav.npy|%s"
1065
- % (now_dir, sr2, now_dir, fea_dim, now_dir, now_dir, spk_id5)
1066
- )
1067
- else:
1068
- for _ in range(2):
1069
- opt.append(
1070
- "%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature%s/mute.npy|%s"
1071
- % (now_dir, sr2, now_dir, fea_dim, spk_id5)
1072
- )
1073
- shuffle(opt)
1074
- with open("%s/filelist.txt" % model_log_dir, "w") as f:
1075
- f.write("\n".join(opt))
1076
- yield get_info_str("write filelist done")
1077
- if gpus16:
1078
- cmd = (
1079
- config.python_cmd
1080
- +" train_nsf_sim_cache_sid_load_pretrain.py -e %s -sr %s -f0 %s -bs %s -g %s -te %s -se %s %s %s -l %s -c %s -sw %s -v %s"
1081
- % (
1082
- exp_dir1,
1083
- sr2,
1084
- 1 if if_f0_3 else 0,
1085
- batch_size12,
1086
- gpus16,
1087
- total_epoch11,
1088
- save_epoch10,
1089
- ("-pg %s" % pretrained_G14) if pretrained_G14 != "" else "",
1090
- ("-pd %s" % pretrained_D15) if pretrained_D15 != "" else "",
1091
- 1 if if_save_latest13 == True else 0,
1092
- 1 if if_cache_gpu17 == True else 0,
1093
- 1 if if_save_every_weights18 == True else 0,
1094
- version19,
1095
- )
1096
- )
1097
- else:
1098
- cmd = (
1099
- config.python_cmd
1100
- + " train_nsf_sim_cache_sid_load_pretrain.py -e %s -sr %s -f0 %s -bs %s -te %s -se %s %s %s -l %s -c %s -sw %s -v %s"
1101
- % (
1102
- exp_dir1,
1103
- sr2,
1104
- 1 if if_f0_3 else 0,
1105
- batch_size12,
1106
- total_epoch11,
1107
- save_epoch10,
1108
- ("-pg %s" % pretrained_G14) if pretrained_G14 != "" else "",
1109
- ("-pd %s" % pretrained_D15) if pretrained_D15 != "" else "",
1110
- 1 if if_save_latest13 == True else 0,
1111
- 1 if if_cache_gpu17 == True else 0,
1112
- 1 if if_save_every_weights18 == True else 0,
1113
- version19,
1114
- )
1115
- )
1116
- yield get_info_str(cmd)
1117
- p = Popen(cmd, shell=True, cwd=now_dir)
1118
- p.wait()
1119
- yield get_info_str(i18n("训练结束, 您可查看控制台训练日志或实验文件夹下的train.log"))
1120
- #######step3b:训练索引
1121
- npys = []
1122
- listdir_res = list(os.listdir(feature_dir))
1123
- for name in sorted(listdir_res):
1124
- phone = np.load("%s/%s" % (feature_dir, name))
1125
- npys.append(phone)
1126
- big_npy = np.concatenate(npys, 0)
1127
-
1128
- big_npy_idx = np.arange(big_npy.shape[0])
1129
- np.random.shuffle(big_npy_idx)
1130
- big_npy = big_npy[big_npy_idx]
1131
- np.save("%s/total_fea.npy" % model_log_dir, big_npy)
1132
-
1133
- # n_ivf = big_npy.shape[0] // 39
1134
- n_ivf = min(int(16 * np.sqrt(big_npy.shape[0])), big_npy.shape[0] // 39)
1135
- yield get_info_str("%s,%s" % (big_npy.shape, n_ivf))
1136
- index = faiss.index_factory(256 if version19 == "v1" else 768, "IVF%s,Flat" % n_ivf)
1137
- yield get_info_str("training index")
1138
- index_ivf = faiss.extract_index_ivf(index) #
1139
- index_ivf.nprobe = 1
1140
- index.train(big_npy)
1141
- faiss.write_index(
1142
- index,
1143
- "%s/trained_IVF%s_Flat_nprobe_%s_%s_%s.index"
1144
- % (model_log_dir, n_ivf, index_ivf.nprobe, exp_dir1, version19),
1145
- )
1146
- yield get_info_str("adding index")
1147
- batch_size_add = 8192
1148
- for i in range(0, big_npy.shape[0], batch_size_add):
1149
- index.add(big_npy[i : i + batch_size_add])
1150
- faiss.write_index(
1151
- index,
1152
- "%s/added_IVF%s_Flat_nprobe_%s_%s_%s.index"
1153
- % (model_log_dir, n_ivf, index_ivf.nprobe, exp_dir1, version19),
1154
- )
1155
- yield get_info_str(
1156
- "成功构建索引, added_IVF%s_Flat_nprobe_%s_%s_%s.index"
1157
- % (n_ivf, index_ivf.nprobe, exp_dir1, version19)
1158
- )
1159
- yield get_info_str(i18n("全流程结束!"))
1160
 
1161
 
1162
  def whethercrepeornah(radio):
@@ -1183,57 +576,6 @@ def change_info_(ckpt_path):
1183
  return {"__type__": "update"}, {"__type__": "update"}, {"__type__": "update"}
1184
 
1185
 
1186
- from lib.infer_pack.models_onnx import SynthesizerTrnMsNSFsidM
1187
-
1188
-
1189
- def export_onnx(ModelPath, ExportedPath, MoeVS=True):
1190
- cpt = torch.load(ModelPath, map_location="cpu")
1191
- cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
1192
- hidden_channels = 256 if cpt.get("version","v1")=="v1"else 768#cpt["config"][-2] # hidden_channels,为768Vec做准备
1193
-
1194
- test_phone = torch.rand(1, 200, hidden_channels) # hidden unit
1195
- test_phone_lengths = torch.tensor([200]).long() # hidden unit 长度(貌似没啥用)
1196
- test_pitch = torch.randint(size=(1, 200), low=5, high=255) # 基频(单位赫兹)
1197
- test_pitchf = torch.rand(1, 200) # nsf基频
1198
- test_ds = torch.LongTensor([0]) # 说话人ID
1199
- test_rnd = torch.rand(1, 192, 200) # 噪声(加入随机因子)
1200
-
1201
- device = "cpu" # 导出时设备(不影响使用模型)
1202
-
1203
-
1204
- net_g = SynthesizerTrnMsNSFsidM(
1205
- *cpt["config"], is_half=False,version=cpt.get("version","v1")
1206
- ) # fp32导出(C++要支持fp16必须手动将内存重新排列所以暂时不用fp16)
1207
- net_g.load_state_dict(cpt["weight"], strict=False)
1208
- input_names = ["phone", "phone_lengths", "pitch", "pitchf", "ds", "rnd"]
1209
- output_names = [
1210
- "audio",
1211
- ]
1212
- # net_g.construct_spkmixmap(n_speaker) 多角色混合轨道导出
1213
- torch.onnx.export(
1214
- net_g,
1215
- (
1216
- test_phone.to(device),
1217
- test_phone_lengths.to(device),
1218
- test_pitch.to(device),
1219
- test_pitchf.to(device),
1220
- test_ds.to(device),
1221
- test_rnd.to(device),
1222
- ),
1223
- ExportedPath,
1224
- dynamic_axes={
1225
- "phone": [1],
1226
- "pitch": [1],
1227
- "pitchf": [1],
1228
- "rnd": [2],
1229
- },
1230
- do_constant_folding=False,
1231
- opset_version=16,
1232
- verbose=False,
1233
- input_names=input_names,
1234
- output_names=output_names,
1235
- )
1236
- return "Finished"
1237
 
1238
  #region RVC WebUI App
1239
 
@@ -1706,123 +1048,7 @@ with gr.Blocks(theme=gr.themes.Base(), title='Mangio-RVC-Web 💻') as app:
1706
  [vc_output1, vc_output2],
1707
  )
1708
 
1709
- with gr.Accordion("Batch Conversion",open=False):
1710
- with gr.Row():
1711
- with gr.Column():
1712
- vc_transform1 = gr.Number(
1713
- label=i18n("变调(整数, 半音数量, 升八度12降八度-12)"), value=0
1714
- )
1715
- opt_input = gr.Textbox(label=i18n("指定输出文件夹"), value="opt")
1716
- f0method1 = gr.Radio(
1717
- label=i18n(
1718
- "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU"
1719
- ),
1720
- choices=["pm", "harvest", "crepe", "rmvpe"],
1721
- value="rmvpe",
1722
- interactive=True,
1723
- )
1724
- filter_radius1 = gr.Slider(
1725
- minimum=0,
1726
- maximum=7,
1727
- label=i18n(">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音"),
1728
- value=3,
1729
- step=1,
1730
- interactive=True,
1731
- )
1732
- with gr.Column():
1733
- file_index3 = gr.Textbox(
1734
- label=i18n("特征检索库文件路径,为空则使用下拉的选择结果"),
1735
- value="",
1736
- interactive=True,
1737
- )
1738
- file_index4 = gr.Dropdown(
1739
- label=i18n("自动检测index路径,下拉式选择(dropdown)"),
1740
- choices=sorted(index_paths),
1741
- interactive=True,
1742
- )
1743
- refresh_button.click(
1744
- fn=lambda: change_choices()[1],
1745
- inputs=[],
1746
- outputs=file_index4,
1747
- )
1748
- # file_big_npy2 = gr.Textbox(
1749
- # label=i18n("特征文件路径"),
1750
- # value="E:\\codes\\py39\\vits_vc_gpu_train\\logs\\mi-test-1key\\total_fea.npy",
1751
- # interactive=True,
1752
- # )
1753
- index_rate2 = gr.Slider(
1754
- minimum=0,
1755
- maximum=1,
1756
- label=i18n("检索特征占比"),
1757
- value=1,
1758
- interactive=True,
1759
- )
1760
- with gr.Column():
1761
- resample_sr1 = gr.Slider(
1762
- minimum=0,
1763
- maximum=48000,
1764
- label=i18n("后处理重采样至最终采样率,0为不进行重采样"),
1765
- value=0,
1766
- step=1,
1767
- interactive=True,
1768
- )
1769
- rms_mix_rate1 = gr.Slider(
1770
- minimum=0,
1771
- maximum=1,
1772
- label=i18n("输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络"),
1773
- value=1,
1774
- interactive=True,
1775
- )
1776
- protect1 = gr.Slider(
1777
- minimum=0,
1778
- maximum=0.5,
1779
- label=i18n(
1780
- "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果"
1781
- ),
1782
- value=0.33,
1783
- step=0.01,
1784
- interactive=True,
1785
- )
1786
- with gr.Column():
1787
- dir_input = gr.Textbox(
1788
- label=i18n("输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)"),
1789
- value="E:\codes\py39\\test-20230416b\\todo-songs",
1790
- )
1791
- inputs = gr.File(
1792
- file_count="multiple", label=i18n("也可批量输入音频文件, 二选一, 优先读文件夹")
1793
- )
1794
- with gr.Row():
1795
- format1 = gr.Radio(
1796
- label=i18n("导出文件格式"),
1797
- choices=["wav", "flac", "mp3", "m4a"],
1798
- value="flac",
1799
- interactive=True,
1800
- )
1801
- but1 = gr.Button(i18n("转换"), variant="primary")
1802
- vc_output3 = gr.Textbox(label=i18n("输出信息"))
1803
- but1.click(
1804
- vc_multi,
1805
- [
1806
- spk_item,
1807
- dir_input,
1808
- opt_input,
1809
- inputs,
1810
- vc_transform1,
1811
- f0method1,
1812
- file_index3,
1813
- file_index4,
1814
- # file_big_npy2,
1815
- index_rate2,
1816
- filter_radius1,
1817
- resample_sr1,
1818
- rms_mix_rate1,
1819
- protect1,
1820
- format1,
1821
- crepe_hop_length,
1822
- ],
1823
- [vc_output3],
1824
- )
1825
- but1.click(fn=lambda: easy_uploader.clear())
1826
  with gr.TabItem("Download Model"):
1827
  with gr.Row():
1828
  url=gr.Textbox(label="Enter the URL to the Model:")
@@ -1839,252 +1065,8 @@ with gr.Blocks(theme=gr.themes.Base(), title='Mangio-RVC-Web 💻') as app:
1839
  """
1840
  )
1841
 
1842
- def has_two_files_in_pretrained_folder():
1843
- pretrained_folder = "./pretrained/"
1844
- if not os.path.exists(pretrained_folder):
1845
- return False
1846
-
1847
- files_in_folder = os.listdir(pretrained_folder)
1848
- num_files = len(files_in_folder)
1849
- return num_files >= 2
1850
-
1851
- if has_two_files_in_pretrained_folder():
1852
- print("Pretrained weights are downloaded. Training tab enabled!\n-------------------------------")
1853
- with gr.TabItem("Train", visible=False):
1854
- with gr.Row():
1855
- with gr.Column():
1856
- exp_dir1 = gr.Textbox(label="Voice Name:", value="My-Voice")
1857
- sr2 = gr.Radio(
1858
- label=i18n("目标采样率"),
1859
- choices=["40k", "48k"],
1860
- value="40k",
1861
- interactive=True,
1862
- visible=False
1863
- )
1864
- if_f0_3 = gr.Radio(
1865
- label=i18n("模型是否带音高指导(唱歌一定要, 语音可以不要)"),
1866
- choices=[True, False],
1867
- value=True,
1868
- interactive=True,
1869
- visible=False
1870
- )
1871
- version19 = gr.Radio(
1872
- label="RVC version",
1873
- choices=["v1", "v2"],
1874
- value="v2",
1875
- interactive=True,
1876
- visible=False,
1877
- )
1878
- np7 = gr.Slider(
1879
- minimum=0,
1880
- maximum=config.n_cpu,
1881
- step=1,
1882
- label="# of CPUs for data processing (Leave as it is)",
1883
- value=config.n_cpu,
1884
- interactive=True,
1885
- visible=True
1886
- )
1887
- trainset_dir4 = gr.Textbox(label="Path to your dataset (audios, not zip):", value="./dataset")
1888
- easy_uploader = gr.Files(label='OR Drop your audios here. They will be uploaded in your dataset path above.',file_types=['audio'])
1889
- but1 = gr.Button("1. Process The Dataset", variant="primary")
1890
- info1 = gr.Textbox(label="Status (wait until it says 'end preprocess'):", value="")
1891
- easy_uploader.upload(fn=upload_to_dataset, inputs=[easy_uploader, trainset_dir4], outputs=[info1])
1892
- but1.click(
1893
- preprocess_dataset, [trainset_dir4, exp_dir1, sr2, np7], [info1]
1894
- )
1895
- with gr.Column():
1896
- spk_id5 = gr.Slider(
1897
- minimum=0,
1898
- maximum=4,
1899
- step=1,
1900
- label=i18n("请指定说话人id"),
1901
- value=0,
1902
- interactive=True,
1903
- visible=False
1904
- )
1905
- with gr.Accordion('GPU Settings', open=False, visible=False):
1906
- gpus6 = gr.Textbox(
1907
- label=i18n("以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2"),
1908
- value=gpus,
1909
- interactive=True,
1910
- visible=False
1911
- )
1912
- gpu_info9 = gr.Textbox(label=i18n("显卡信息"), value=gpu_info)
1913
- f0method8 = gr.Radio(
1914
- label=i18n(
1915
- "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢"
1916
- ),
1917
- choices=["harvest","crepe", "mangio-crepe", "rmvpe"], # Fork feature: Crepe on f0 extraction for training.
1918
- value="rmvpe",
1919
- interactive=True,
1920
- )
1921
-
1922
- extraction_crepe_hop_length = gr.Slider(
1923
- minimum=1,
1924
- maximum=512,
1925
- step=1,
1926
- label=i18n("crepe_hop_length"),
1927
- value=128,
1928
- interactive=True,
1929
- visible=False,
1930
- )
1931
- f0method8.change(fn=whethercrepeornah, inputs=[f0method8], outputs=[extraction_crepe_hop_length])
1932
- but2 = gr.Button("2. Pitch Extraction", variant="primary")
1933
- info2 = gr.Textbox(label="Status(Check the Colab Notebook's cell output):", value="", max_lines=8)
1934
- but2.click(
1935
- extract_f0_feature,
1936
- [gpus6, np7, f0method8, if_f0_3, exp_dir1, version19, extraction_crepe_hop_length],
1937
- [info2],
1938
- )
1939
- with gr.Row():
1940
- with gr.Column():
1941
- total_epoch11 = gr.Slider(
1942
- minimum=1,
1943
- maximum=5000,
1944
- step=10,
1945
- label="Total # of training epochs (IF you choose a value too high, your model will sound horribly overtrained.):",
1946
- value=250,
1947
- interactive=True,
1948
- )
1949
- butstop = gr.Button(
1950
- "Stop Training",
1951
- variant='primary',
1952
- visible=False,
1953
- )
1954
- but3 = gr.Button("3. Train Model", variant="primary", visible=True)
1955
-
1956
- but3.click(fn=stoptraining, inputs=[gr.Number(value=0, visible=False)], outputs=[but3, butstop])
1957
- butstop.click(fn=stoptraining, inputs=[gr.Number(value=1, visible=False)], outputs=[butstop, but3])
1958
-
1959
-
1960
- but4 = gr.Button("4.Train Index", variant="primary")
1961
- info3 = gr.Textbox(label="Status(Check the Colab Notebook's cell output):", value="", max_lines=10)
1962
- with gr.Accordion("Training Preferences (You can leave these as they are)", open=False):
1963
- #gr.Markdown(value=i18n("step3: 填写训练设置, 开始训练模型和索引"))
1964
- with gr.Column():
1965
- save_epoch10 = gr.Slider(
1966
- minimum=1,
1967
- maximum=200,
1968
- step=1,
1969
- label="Backup every X amount of epochs:",
1970
- value=10,
1971
- interactive=True,
1972
- )
1973
- batch_size12 = gr.Slider(
1974
- minimum=1,
1975
- maximum=40,
1976
- step=1,
1977
- label="Batch Size (LEAVE IT unless you know what you're doing!):",
1978
- value=default_batch_size,
1979
- interactive=True,
1980
- )
1981
- if_save_latest13 = gr.Checkbox(
1982
- label="Save only the latest '.ckpt' file to save disk space.",
1983
- value=True,
1984
- interactive=True,
1985
- )
1986
- if_cache_gpu17 = gr.Checkbox(
1987
- label="Cache all training sets to GPU memory. Caching small datasets (less than 10 minutes) can speed up training, but caching large datasets will consume a lot of GPU memory and may not provide much speed improvement.",
1988
- value=False,
1989
- interactive=True,
1990
- )
1991
- if_save_every_weights18 = gr.Checkbox(
1992
- label="Save a small final model to the 'weights' folder at each save point.",
1993
- value=True,
1994
- interactive=True,
1995
- )
1996
- zip_model = gr.Button('5. Download Model')
1997
- zipped_model = gr.Files(label='Your Model and Index file can be downloaded here:')
1998
- zip_model.click(fn=zip_downloader, inputs=[exp_dir1], outputs=[zipped_model, info3])
1999
- with gr.Group():
2000
- with gr.Accordion("Base Model Locations:", open=False, visible=False):
2001
- pretrained_G14 = gr.Textbox(
2002
- label=i18n("加载预训练底模G路径"),
2003
- value="pretrained_v2/f0G40k.pth",
2004
- interactive=True,
2005
- )
2006
- pretrained_D15 = gr.Textbox(
2007
- label=i18n("加载预训练底模D路径"),
2008
- value="pretrained_v2/f0D40k.pth",
2009
- interactive=True,
2010
- )
2011
- gpus16 = gr.Textbox(
2012
- label=i18n("以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2"),
2013
- value=gpus,
2014
- interactive=True,
2015
- )
2016
- sr2.change(
2017
- change_sr2,
2018
- [sr2, if_f0_3, version19],
2019
- [pretrained_G14, pretrained_D15, version19],
2020
- )
2021
- version19.change(
2022
- change_version19,
2023
- [sr2, if_f0_3, version19],
2024
- [pretrained_G14, pretrained_D15],
2025
- )
2026
- if_f0_3.change(
2027
- change_f0,
2028
- [if_f0_3, sr2, version19],
2029
- [f0method8, pretrained_G14, pretrained_D15],
2030
- )
2031
- but5 = gr.Button(i18n("一键训练"), variant="primary", visible=False)
2032
- but3.click(
2033
- click_train,
2034
- [
2035
- exp_dir1,
2036
- sr2,
2037
- if_f0_3,
2038
- spk_id5,
2039
- save_epoch10,
2040
- total_epoch11,
2041
- batch_size12,
2042
- if_save_latest13,
2043
- pretrained_G14,
2044
- pretrained_D15,
2045
- gpus16,
2046
- if_cache_gpu17,
2047
- if_save_every_weights18,
2048
- version19,
2049
- ],
2050
- [
2051
- info3,
2052
- butstop,
2053
- but3,
2054
- ],
2055
- )
2056
- but4.click(train_index, [exp_dir1, version19], info3)
2057
- but5.click(
2058
- train1key,
2059
- [
2060
- exp_dir1,
2061
- sr2,
2062
- if_f0_3,
2063
- trainset_dir4,
2064
- spk_id5,
2065
- np7,
2066
- f0method8,
2067
- save_epoch10,
2068
- total_epoch11,
2069
- batch_size12,
2070
- if_save_latest13,
2071
- pretrained_G14,
2072
- pretrained_D15,
2073
- gpus16,
2074
- if_cache_gpu17,
2075
- if_save_every_weights18,
2076
- version19,
2077
- extraction_crepe_hop_length
2078
- ],
2079
- info3,
2080
- )
2081
-
2082
- else:
2083
- print(
2084
- "Pretrained weights not downloaded. Disabling training tab.\n"
2085
- "Wondering how to train a voice? Visit here for the RVC model training guide: https://t.ly/RVC_Training_Guide\n"
2086
- "-------------------------------\n"
2087
- )
2088
 
2089
  app.queue(concurrency_count=511, max_size=1022).launch(share=False, quiet=True)
2090
  #endregion
 
529
  done[0] = True
530
 
531
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
532
 
533
  global log_interval
534
 
 
547
  log_interval += 1
548
  return log_interval
549
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
550
 
551
  # but5.click(train1key, [exp_dir1, sr2, if_f0_3, trainset_dir4, spk_id5, gpus6, np7, f0method8, save_epoch10, total_epoch11, batch_size12, if_save_latest13, pretrained_G14, pretrained_D15, gpus16, if_cache_gpu17], info3)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
552
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
553
 
554
 
555
  def whethercrepeornah(radio):
 
576
  return {"__type__": "update"}, {"__type__": "update"}, {"__type__": "update"}
577
 
578
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
579
 
580
  #region RVC WebUI App
581
 
 
1048
  [vc_output1, vc_output2],
1049
  )
1050
 
1051
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1052
  with gr.TabItem("Download Model"):
1053
  with gr.Row():
1054
  url=gr.Textbox(label="Enter the URL to the Model:")
 
1065
  """
1066
  )
1067
 
1068
+
1069
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1070
 
1071
  app.queue(concurrency_count=511, max_size=1022).launch(share=False, quiet=True)
1072
  #endregion