shibing624 commited on
Commit
2adceb8
·
verified ·
1 Parent(s): 0bbd7f2

Delete prepare_datasets

Browse files
prepare_datasets/1-get-text.py DELETED
@@ -1,131 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
-
3
- import os
4
-
5
- inp_text = os.environ.get("inp_text")
6
- inp_wav_dir = os.environ.get("inp_wav_dir")
7
- exp_name = os.environ.get("exp_name")
8
- i_part = os.environ.get("i_part")
9
- all_parts = os.environ.get("all_parts")
10
- os.environ["CUDA_VISIBLE_DEVICES"] = os.environ.get("_CUDA_VISIBLE_DEVICES")
11
- opt_dir = os.environ.get("opt_dir")
12
- bert_pretrained_dir = os.environ.get("bert_pretrained_dir")
13
- is_half = eval(os.environ.get("is_half", "True"))
14
- import sys, numpy as np, traceback, pdb
15
- import os.path
16
- from glob import glob
17
- from tqdm import tqdm
18
- from text.cleaner import clean_text
19
- import torch
20
- from transformers import AutoModelForMaskedLM, AutoTokenizer
21
- import numpy as np
22
-
23
- # inp_text=sys.argv[1]
24
- # inp_wav_dir=sys.argv[2]
25
- # exp_name=sys.argv[3]
26
- # i_part=sys.argv[4]
27
- # all_parts=sys.argv[5]
28
- # os.environ["CUDA_VISIBLE_DEVICES"]=sys.argv[6]#i_gpu
29
- # opt_dir="/data/docker/liujing04/gpt-vits/fine_tune_dataset/%s"%exp_name
30
- # bert_pretrained_dir="/data/docker/liujing04/bert-vits2/Bert-VITS2-master20231106/bert/chinese-roberta-wwm-ext-large"
31
-
32
- from time import time as ttime
33
- import shutil
34
-
35
-
36
- def my_save(fea, path): #####fix issue: torch.save doesn't support chinese path
37
- dir = os.path.dirname(path)
38
- name = os.path.basename(path)
39
- tmp_path = "%s/%s%s.pth" % (dir, ttime(), i_part)
40
- torch.save(fea, tmp_path)
41
- shutil.move(tmp_path, "%s/%s" % (dir, name))
42
-
43
-
44
-
45
- txt_path = "%s/2-name2text-%s.txt" % (opt_dir, i_part)
46
- if os.path.exists(txt_path) == False:
47
- bert_dir = "%s/3-bert" % (opt_dir)
48
- os.makedirs(opt_dir, exist_ok=True)
49
- os.makedirs(bert_dir, exist_ok=True)
50
- if torch.cuda.is_available():
51
- device = "cuda:0"
52
- elif torch.backends.mps.is_available():
53
- device = "mps"
54
- else:
55
- device = "cpu"
56
- tokenizer = AutoTokenizer.from_pretrained(bert_pretrained_dir)
57
- bert_model = AutoModelForMaskedLM.from_pretrained(bert_pretrained_dir)
58
- if is_half == True:
59
- bert_model = bert_model.half().to(device)
60
- else:
61
- bert_model = bert_model.to(device)
62
-
63
- def get_bert_feature(text, word2ph):
64
- with torch.no_grad():
65
- inputs = tokenizer(text, return_tensors="pt")
66
- for i in inputs:
67
- inputs[i] = inputs[i].to(device)
68
- res = bert_model(**inputs, output_hidden_states=True)
69
- res = torch.cat(res["hidden_states"][-3:-2], -1)[0].cpu()[1:-1]
70
-
71
- assert len(word2ph) == len(text)
72
- phone_level_feature = []
73
- for i in range(len(word2ph)):
74
- repeat_feature = res[i].repeat(word2ph[i], 1)
75
- phone_level_feature.append(repeat_feature)
76
-
77
- phone_level_feature = torch.cat(phone_level_feature, dim=0)
78
-
79
- return phone_level_feature.T
80
-
81
- def process(data, res):
82
- for name, text, lan in data:
83
- try:
84
- name = os.path.basename(name)
85
- phones, word2ph, norm_text = clean_text(
86
- text.replace("%", "-").replace("¥", ","), lan
87
- )
88
- path_bert = "%s/%s.pt" % (bert_dir, name)
89
- if os.path.exists(path_bert) == False and lan == "zh":
90
- bert_feature = get_bert_feature(norm_text, word2ph)
91
- assert bert_feature.shape[-1] == len(phones)
92
- # torch.save(bert_feature, path_bert)
93
- my_save(bert_feature, path_bert)
94
- phones = " ".join(phones)
95
- # res.append([name,phones])
96
- res.append([name, phones, word2ph, norm_text])
97
- except:
98
- print(name, text, traceback.format_exc())
99
-
100
- todo = []
101
- res = []
102
- with open(inp_text, "r", encoding="utf8") as f:
103
- lines = f.read().strip("\n").split("\n")
104
-
105
- language_v1_to_language_v2 = {
106
- "ZH": "zh",
107
- "zh": "zh",
108
- "JP": "ja",
109
- "jp": "ja",
110
- "JA": "ja",
111
- "ja": "ja",
112
- "EN": "en",
113
- "en": "en",
114
- "En": "en",
115
- }
116
- for line in lines[int(i_part) :: int(all_parts)]:
117
- try:
118
- wav_name, spk_name, language, text = line.split("|")
119
- # todo.append([name,text,"zh"])
120
- todo.append(
121
- [wav_name, text, language_v1_to_language_v2.get(language, language)]
122
- )
123
- except:
124
- print(line, traceback.format_exc())
125
-
126
- process(todo, res)
127
- opt = []
128
- for name, phones, word2ph, norm_text in res:
129
- opt.append("%s\t%s\t%s\t%s" % (name, phones, word2ph, norm_text))
130
- with open(txt_path, "w", encoding="utf8") as f:
131
- f.write("\n".join(opt) + "\n")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
prepare_datasets/2-get-hubert-wav32k.py DELETED
@@ -1,114 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
-
3
- import sys,os
4
- inp_text= os.environ.get("inp_text")
5
- inp_wav_dir= os.environ.get("inp_wav_dir")
6
- exp_name= os.environ.get("exp_name")
7
- i_part= os.environ.get("i_part")
8
- all_parts= os.environ.get("all_parts")
9
- os.environ["CUDA_VISIBLE_DEVICES"]= os.environ.get("_CUDA_VISIBLE_DEVICES")
10
- from feature_extractor import cnhubert
11
- opt_dir= os.environ.get("opt_dir")
12
- cnhubert.cnhubert_base_path= os.environ.get("cnhubert_base_dir")
13
- is_half=eval(os.environ.get("is_half","True"))
14
-
15
- import pdb,traceback,numpy as np,logging
16
- from scipy.io import wavfile
17
- import librosa,torch
18
- now_dir = os.getcwd()
19
- sys.path.append(now_dir)
20
- from my_utils import load_audio
21
-
22
- # from config import cnhubert_base_path
23
- # cnhubert.cnhubert_base_path=cnhubert_base_path
24
- # inp_text=sys.argv[1]
25
- # inp_wav_dir=sys.argv[2]
26
- # exp_name=sys.argv[3]
27
- # i_part=sys.argv[4]
28
- # all_parts=sys.argv[5]
29
- # os.environ["CUDA_VISIBLE_DEVICES"]=sys.argv[6]
30
- # cnhubert.cnhubert_base_path=sys.argv[7]
31
- # opt_dir="/data/docker/liujing04/gpt-vits/fine_tune_dataset/%s"%exp_name
32
-
33
- from time import time as ttime
34
- import shutil
35
- def my_save(fea,path):#####fix issue: torch.save doesn't support chinese path
36
- dir=os.path.dirname(path)
37
- name=os.path.basename(path)
38
- tmp_path="%s/%s%s.pth"%(dir,ttime(),i_part)
39
- torch.save(fea,tmp_path)
40
- shutil.move(tmp_path,"%s/%s"%(dir,name))
41
-
42
- hubert_dir="%s/4-cnhubert"%(opt_dir)
43
- wav32dir="%s/5-wav32k"%(opt_dir)
44
- os.makedirs(opt_dir,exist_ok=True)
45
- os.makedirs(hubert_dir,exist_ok=True)
46
- os.makedirs(wav32dir,exist_ok=True)
47
-
48
- maxx=0.95
49
- alpha=0.5
50
- if torch.cuda.is_available():
51
- device = "cuda:0"
52
- elif torch.backends.mps.is_available():
53
- device = "mps"
54
- else:
55
- device = "cpu"
56
- model=cnhubert.get_model()
57
- # is_half=False
58
- if(is_half==True):
59
- model=model.half().to(device)
60
- else:
61
- model = model.to(device)
62
-
63
- nan_fails=[]
64
- def name2go(wav_name):
65
- hubert_path="%s/%s.pt"%(hubert_dir,wav_name)
66
- if(os.path.exists(hubert_path)):return
67
- wav_path="%s/%s"%(inp_wav_dir,wav_name)
68
- tmp_audio = load_audio(wav_path, 32000)
69
- tmp_max = np.abs(tmp_audio).max()
70
- if tmp_max > 2.2:
71
- print("%s-filtered" % (wav_name, tmp_max))
72
- return
73
- tmp_audio32 = (tmp_audio / tmp_max * (maxx * alpha*32768)) + ((1 - alpha)*32768) * tmp_audio
74
- tmp_audio32b = (tmp_audio / tmp_max * (maxx * alpha*1145.14)) + ((1 - alpha)*1145.14) * tmp_audio
75
- tmp_audio = librosa.resample(
76
- tmp_audio32b, orig_sr=32000, target_sr=16000
77
- )#不是重采样问题
78
- tensor_wav16 = torch.from_numpy(tmp_audio)
79
- if (is_half == True):
80
- tensor_wav16=tensor_wav16.half().to(device)
81
- else:
82
- tensor_wav16 = tensor_wav16.to(device)
83
- ssl=model.model(tensor_wav16.unsqueeze(0))["last_hidden_state"].transpose(1,2).cpu()#torch.Size([1, 768, 215])
84
- if np.isnan(ssl.detach().numpy()).sum()!= 0:
85
- nan_fails.append(wav_name)
86
- print("nan filtered:%s"%wav_name)
87
- return
88
- wavfile.write(
89
- "%s/%s"%(wav32dir,wav_name),
90
- 32000,
91
- tmp_audio32.astype("int16"),
92
- )
93
- my_save(ssl,hubert_path )
94
-
95
- with open(inp_text,"r",encoding="utf8")as f:
96
- lines=f.read().strip("\n").split("\n")
97
-
98
- for line in lines[int(i_part)::int(all_parts)]:
99
- try:
100
- # wav_name,text=line.split("\t")
101
- wav_name, spk_name, language, text = line.split("|")
102
- wav_name=os.path.basename(wav_name)
103
- name2go(wav_name)
104
- except:
105
- print(line,traceback.format_exc())
106
-
107
- if(len(nan_fails)>0 and is_half==True):
108
- is_half=False
109
- model=model.float()
110
- for wav_name in nan_fails:
111
- try:
112
- name2go(wav_name)
113
- except:
114
- print(wav_name,traceback.format_exc())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
prepare_datasets/3-get-semantic.py DELETED
@@ -1,95 +0,0 @@
1
- import os
2
-
3
- inp_text = os.environ.get("inp_text")
4
- exp_name = os.environ.get("exp_name")
5
- i_part = os.environ.get("i_part")
6
- all_parts = os.environ.get("all_parts")
7
- os.environ["CUDA_VISIBLE_DEVICES"] = os.environ.get("_CUDA_VISIBLE_DEVICES")
8
- opt_dir = os.environ.get("opt_dir")
9
- pretrained_s2G = os.environ.get("pretrained_s2G")
10
- s2config_path = os.environ.get("s2config_path")
11
- is_half = eval(os.environ.get("is_half", "True"))
12
- import math, traceback
13
- import multiprocessing
14
- import sys, pdb
15
-
16
- now_dir = os.getcwd()
17
- sys.path.append(now_dir)
18
- from random import shuffle
19
- import torch.multiprocessing as mp
20
- from glob import glob
21
- from tqdm import tqdm
22
- import logging, librosa, utils, torch
23
- from module.models import SynthesizerTrn
24
-
25
- logging.getLogger("numba").setLevel(logging.WARNING)
26
- # from config import pretrained_s2G
27
-
28
- # inp_text=sys.argv[1]
29
- # exp_name=sys.argv[2]
30
- # i_part=sys.argv[3]
31
- # all_parts=sys.argv[4]
32
- # os.environ["CUDA_VISIBLE_DEVICES"]=sys.argv[5]
33
- # opt_dir="/data/docker/liujing04/gpt-vits/fine_tune_dataset/%s"%exp_name
34
-
35
-
36
- hubert_dir = "%s/4-cnhubert" % (opt_dir)
37
- semantic_path = "%s/6-name2semantic-%s.tsv" % (opt_dir, i_part)
38
- if os.path.exists(semantic_path) == False:
39
- os.makedirs(opt_dir, exist_ok=True)
40
-
41
- if torch.cuda.is_available():
42
- device = "cuda"
43
- elif torch.backends.mps.is_available():
44
- device = "mps"
45
- else:
46
- device = "cpu"
47
- hps = utils.get_hparams_from_file(s2config_path)
48
- vq_model = SynthesizerTrn(
49
- hps.data.filter_length // 2 + 1,
50
- hps.train.segment_size // hps.data.hop_length,
51
- n_speakers=hps.data.n_speakers,
52
- **hps.model
53
- )
54
- if is_half == True:
55
- vq_model = vq_model.half().to(device)
56
- else:
57
- vq_model = vq_model.to(device)
58
- vq_model.eval()
59
- # utils.load_checkpoint(utils.latest_checkpoint_path(hps.s2_ckpt_dir, "G_*.pth"), vq_model, None, True)
60
- # utils.load_checkpoint(pretrained_s2G, vq_model, None, True)
61
- print(
62
- vq_model.load_state_dict(
63
- torch.load(pretrained_s2G, map_location="cpu")["weight"], strict=False
64
- )
65
- )
66
-
67
- def name2go(wav_name, lines):
68
- hubert_path = "%s/%s.pt" % (hubert_dir, wav_name)
69
- if os.path.exists(hubert_path) == False:
70
- return
71
- ssl_content = torch.load(hubert_path, map_location="cpu")
72
- if is_half == True:
73
- ssl_content = ssl_content.half().to(device)
74
- else:
75
- ssl_content = ssl_content.to(device)
76
- codes = vq_model.extract_latent(ssl_content)
77
- semantic = " ".join([str(i) for i in codes[0, 0, :].tolist()])
78
- lines.append("%s\t%s" % (wav_name, semantic))
79
-
80
- with open(inp_text, "r", encoding="utf8") as f:
81
- lines = f.read().strip("\n").split("\n")
82
-
83
- lines1 = []
84
- for line in lines[int(i_part) :: int(all_parts)]:
85
- # print(line)
86
- try:
87
- # wav_name,text=line.split("\t")
88
- wav_name, spk_name, language, text = line.split("|")
89
- wav_name = os.path.basename(wav_name)
90
- # name2go(name,lines1)
91
- name2go(wav_name, lines1)
92
- except:
93
- print(line, traceback.format_exc())
94
- with open(semantic_path, "w", encoding="utf8") as f:
95
- f.write("\n".join(lines1))