Spaces:
Sleeping
Sleeping
Mahiruoshi
commited on
Commit
•
7e680fd
1
Parent(s):
9f9d041
Upload 22 files
Browse files- .gitattributes +2 -0
- app.py +241 -0
- attentions.py +300 -0
- checkpoints/default/config.json +1 -0
- checkpoints/default/model.pth +3 -0
- cleaners/JapaneseCleaner.dll +3 -0
- cleaners/char.bin +3 -0
- cleaners/matrix.bin +3 -0
- cleaners/sys.dic +3 -0
- cleaners/unk.dic +0 -0
- commons.py +97 -0
- jieba/dict.txt +0 -0
- mel_processing.py +112 -0
- models.py +498 -0
- modules.py +387 -0
- requirements.txt +22 -0
- text/__init__.py +32 -0
- text/cleaners.py +95 -0
- text/mandarin.py +328 -0
- text/symbols.py +67 -0
- transforms.py +193 -0
- utils.py +76 -0
- 目前的环境.txt +143 -0
.gitattributes
CHANGED
@@ -32,3 +32,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
32 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
32 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
35 |
+
cleaners/JapaneseCleaner.dll filter=lfs diff=lfs merge=lfs -text
|
36 |
+
cleaners/sys.dic filter=lfs diff=lfs merge=lfs -text
|
app.py
ADDED
@@ -0,0 +1,241 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
import gradio as gr
|
3 |
+
import torch
|
4 |
+
import unicodedata
|
5 |
+
import commons
|
6 |
+
import utils
|
7 |
+
import pathlib
|
8 |
+
from models import SynthesizerTrn
|
9 |
+
from text import text_to_sequence
|
10 |
+
import time
|
11 |
+
import os
|
12 |
+
import io
|
13 |
+
from scipy.io.wavfile import write
|
14 |
+
from flask import Flask, request
|
15 |
+
from threading import Thread
|
16 |
+
import openai
|
17 |
+
import requests
|
18 |
+
import json
|
19 |
+
import soundfile as sf
|
20 |
+
from scipy import signal
|
21 |
+
class VitsGradio:
|
22 |
+
def __init__(self):
|
23 |
+
self.lan = ["中文","日文","自动"]
|
24 |
+
self.chatapi = ["gpt-3.5-turbo","gpt3"]
|
25 |
+
self.modelPaths = []
|
26 |
+
for root,dirs,files in os.walk("checkpoints"):
|
27 |
+
for dir in dirs:
|
28 |
+
self.modelPaths.append(dir)
|
29 |
+
with gr.Blocks() as self.Vits:
|
30 |
+
with gr.Tab("调试用"):
|
31 |
+
with gr.Row():
|
32 |
+
with gr.Column():
|
33 |
+
with gr.Row():
|
34 |
+
with gr.Column():
|
35 |
+
self.text = gr.TextArea(label="Text", value="你好")
|
36 |
+
with gr.Accordion(label="测试api", open=False):
|
37 |
+
self.local_chat1 = gr.Checkbox(value=False, label="使用网址+文本进行模拟")
|
38 |
+
self.url_input = gr.TextArea(label="键入测试", value="http://127.0.0.1:8080/chat?Text=")
|
39 |
+
butto = gr.Button("模拟前端抓取语音文件")
|
40 |
+
btnVC = gr.Button("测试tts+对话程序")
|
41 |
+
with gr.Column():
|
42 |
+
output2 = gr.TextArea(label="回复")
|
43 |
+
output1 = gr.Audio(label="采样率22050")
|
44 |
+
output3 = gr.outputs.File(label="44100hz: output.wav")
|
45 |
+
butto.click(self.Simul, inputs=[self.text, self.url_input], outputs=[output2,output3])
|
46 |
+
btnVC.click(self.tts_fn, inputs=[self.text], outputs=[output1,output2])
|
47 |
+
with gr.Tab("控制面板"):
|
48 |
+
with gr.Row():
|
49 |
+
with gr.Column():
|
50 |
+
with gr.Row():
|
51 |
+
with gr.Column():
|
52 |
+
self.api_input1 = gr.TextArea(label="输入api-key或本地存储说话模型的路径", value="https://platform.openai.com/account/api-keys")
|
53 |
+
with gr.Accordion(label="chatbot选择", open=False):
|
54 |
+
self.api_input2 = gr.Checkbox(value=True, label="采用gpt3.5")
|
55 |
+
self.local_chat1 = gr.Checkbox(value=False, label="启动本地chatbot")
|
56 |
+
self.local_chat2 = gr.Checkbox(value=True, label="是否量化")
|
57 |
+
res = gr.TextArea()
|
58 |
+
Botselection = gr.Button("完成chatbot设定")
|
59 |
+
Botselection.click(self.check_bot, inputs=[self.api_input1,self.api_input2,self.local_chat1,self.local_chat2], outputs = [res])
|
60 |
+
self.input1 = gr.Dropdown(label = "模型", choices = self.modelPaths, value = self.modelPaths[0], type = "value")
|
61 |
+
self.input2 = gr.Dropdown(label="Language", choices=self.lan, value="自动", interactive=True)
|
62 |
+
with gr.Column():
|
63 |
+
btnVC = gr.Button("完成vits TTS端设定")
|
64 |
+
self.input3 = gr.Dropdown(label="Speaker", choices=list(range(101)), value=0, interactive=True)
|
65 |
+
self.input4 = gr.Slider(minimum=0, maximum=1.0, label="更改噪声比例(noise scale),以控制情感", value=0.267)
|
66 |
+
self.input5 = gr.Slider(minimum=0, maximum=1.0, label="更改噪声偏差(noise scale w),以控制音素长短", value=0.7)
|
67 |
+
self.input6 = gr.Slider(minimum=0.1, maximum=10, label="duration", value=1)
|
68 |
+
statusa = gr.TextArea()
|
69 |
+
btnVC.click(self.create_tts_fn, inputs=[self.input1, self.input2, self.input3, self.input4, self.input5, self.input6], outputs = [statusa])
|
70 |
+
|
71 |
+
def Simul(self,text,url_input):
|
72 |
+
web = url_input + text
|
73 |
+
res = requests.get(web)
|
74 |
+
music = res.content
|
75 |
+
with open('output.wav', 'wb') as code:
|
76 |
+
code.write(music)
|
77 |
+
file_path = "output.wav"
|
78 |
+
return web,file_path
|
79 |
+
|
80 |
+
|
81 |
+
def chatgpt(self,text):
|
82 |
+
self.messages.append({"role": "user", "content": text},)
|
83 |
+
chat = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages= self.messages)
|
84 |
+
reply = chat.choices[0].message.content
|
85 |
+
return reply
|
86 |
+
|
87 |
+
def ChATGLM(self,text):
|
88 |
+
if text == 'clear':
|
89 |
+
self.history = []
|
90 |
+
response, new_history = self.model.chat(self.tokenizer, text, self.history)
|
91 |
+
response = response.replace(" ",'').replace("\n",'.')
|
92 |
+
self.history = new_history
|
93 |
+
return response
|
94 |
+
|
95 |
+
def gpt3_chat(self,text):
|
96 |
+
call_name = "Waifu"
|
97 |
+
openai.api_key = args.key
|
98 |
+
identity = ""
|
99 |
+
start_sequence = '\n'+str(call_name)+':'
|
100 |
+
restart_sequence = "\nYou: "
|
101 |
+
if 1 == 1:
|
102 |
+
prompt0 = text #当期prompt
|
103 |
+
if text == 'quit':
|
104 |
+
return prompt0
|
105 |
+
prompt = identity + prompt0 + start_sequence
|
106 |
+
response = openai.Completion.create(
|
107 |
+
model="text-davinci-003",
|
108 |
+
prompt=prompt,
|
109 |
+
temperature=0.5,
|
110 |
+
max_tokens=1000,
|
111 |
+
top_p=1.0,
|
112 |
+
frequency_penalty=0.5,
|
113 |
+
presence_penalty=0.0,
|
114 |
+
stop=["\nYou:"]
|
115 |
+
)
|
116 |
+
return response['choices'][0]['text'].strip()
|
117 |
+
|
118 |
+
def check_bot(self,api_input1,api_input2,local_chat1,local_chat2):
|
119 |
+
if local_chat1:
|
120 |
+
from transformers import AutoTokenizer, AutoModel
|
121 |
+
self.tokenizer = AutoTokenizer.from_pretrained(api_input1, trust_remote_code=True)
|
122 |
+
if local_chat2:
|
123 |
+
self.model = AutoModel.from_pretrained(api_input1, trust_remote_code=True).half().quantize(4).cuda()
|
124 |
+
else:
|
125 |
+
self.model = AutoModel.from_pretrained(api_input1, trust_remote_code=True)
|
126 |
+
self.history = []
|
127 |
+
else:
|
128 |
+
self.messages = []
|
129 |
+
openai.api_key = api_input1
|
130 |
+
return "Finished"
|
131 |
+
|
132 |
+
def is_japanese(self,string):
|
133 |
+
for ch in string:
|
134 |
+
if ord(ch) > 0x3040 and ord(ch) < 0x30FF:
|
135 |
+
return True
|
136 |
+
return False
|
137 |
+
|
138 |
+
def is_english(self,string):
|
139 |
+
import re
|
140 |
+
pattern = re.compile('^[A-Za-z0-9.,:;!?()_*"\' ]+$')
|
141 |
+
if pattern.fullmatch(string):
|
142 |
+
return True
|
143 |
+
else:
|
144 |
+
return False
|
145 |
+
|
146 |
+
|
147 |
+
|
148 |
+
def get_text(self,text, hps, cleaned=False):
|
149 |
+
if cleaned:
|
150 |
+
text_norm = text_to_sequence(text, self.hps_ms.symbols, [])
|
151 |
+
else:
|
152 |
+
text_norm = text_to_sequence(text, self.hps_ms.symbols, self.hps_ms.data.text_cleaners)
|
153 |
+
if self.hps_ms.data.add_blank:
|
154 |
+
text_norm = commons.intersperse(text_norm, 0)
|
155 |
+
text_norm = torch.LongTensor(text_norm)
|
156 |
+
return text_norm
|
157 |
+
|
158 |
+
|
159 |
+
def get_label(self,text, label):
|
160 |
+
if f'[{label}]' in text:
|
161 |
+
return True, text.replace(f'[{label}]', '')
|
162 |
+
else:
|
163 |
+
return False, text
|
164 |
+
|
165 |
+
def sle(self,language,text):
|
166 |
+
text = text.replace('\n','。').replace(' ',',')
|
167 |
+
if language == "中文":
|
168 |
+
tts_input1 = "[ZH]" + text + "[ZH]"
|
169 |
+
return tts_input1
|
170 |
+
elif language == "自动":
|
171 |
+
tts_input1 = f"[JA]{text}[JA]" if self.is_japanese(text) else f"[ZH]{text}[ZH]"
|
172 |
+
return tts_input1
|
173 |
+
elif language == "日文":
|
174 |
+
tts_input1 = "[JA]" + text + "[JA]"
|
175 |
+
return tts_input1
|
176 |
+
|
177 |
+
def create_tts_fn(self,path, input2, input3, n_scale= 0.667,n_scale_w = 0.8, l_scale = 1 ):
|
178 |
+
self.language = input2
|
179 |
+
self.speaker_id = int(input3)
|
180 |
+
self.n_scale = n_scale
|
181 |
+
self.n_scale_w = n_scale_w
|
182 |
+
self.l_scale = l_scale
|
183 |
+
self.dev = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
184 |
+
self.hps_ms = utils.get_hparams_from_file(f"checkpoints/{path}/config.json")
|
185 |
+
self.n_speakers = self.hps_ms.data.n_speakers if 'n_speakers' in self.hps_ms.data.keys() else 0
|
186 |
+
self.n_symbols = len(self.hps_ms.symbols) if 'symbols' in self.hps_ms.keys() else 0
|
187 |
+
self.net_g_ms = SynthesizerTrn(
|
188 |
+
self.n_symbols,
|
189 |
+
self.hps_ms.data.filter_length // 2 + 1,
|
190 |
+
self.hps_ms.train.segment_size // self.hps_ms.data.hop_length,
|
191 |
+
n_speakers=self.n_speakers,
|
192 |
+
**self.hps_ms.model).to(self.dev)
|
193 |
+
_ = self.net_g_ms.eval()
|
194 |
+
_ = utils.load_checkpoint(f"checkpoints/{path}/model.pth", self.net_g_ms)
|
195 |
+
return 'success'
|
196 |
+
|
197 |
+
|
198 |
+
def tts_fn(self,text):
|
199 |
+
if self.local_chat1:
|
200 |
+
text = self.chatgpt(text)
|
201 |
+
elif self.api_input2:
|
202 |
+
text = self.ChATGLM(text)
|
203 |
+
else:
|
204 |
+
text = self.gpt3_chat(text)
|
205 |
+
print(text)
|
206 |
+
text =self.sle(self.language,text)
|
207 |
+
with torch.no_grad():
|
208 |
+
stn_tst = self.get_text(text, self.hps_ms, cleaned=False)
|
209 |
+
x_tst = stn_tst.unsqueeze(0).to(self.dev)
|
210 |
+
x_tst_lengths = torch.LongTensor([stn_tst.size(0)]).to(self.dev)
|
211 |
+
sid = torch.LongTensor([self.speaker_id]).to(self.dev)
|
212 |
+
audio = self.net_g_ms.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=self.n_scale, noise_scale_w=self.n_scale_w, length_scale=self.l_scale)[0][
|
213 |
+
0, 0].data.cpu().float().numpy()
|
214 |
+
resampled_audio_data = signal.resample(audio, len(audio) * 2)
|
215 |
+
sf.write('temp.wav', resampled_audio_data, 44100, 'PCM_24')
|
216 |
+
return (self.hps_ms.data.sampling_rate, audio),text.replace('[JA]','').replace('[ZH]','')
|
217 |
+
|
218 |
+
app = Flask(__name__)
|
219 |
+
print("开始部���")
|
220 |
+
grVits = VitsGradio()
|
221 |
+
|
222 |
+
@app.route('/chat')
|
223 |
+
def text_api():
|
224 |
+
message = request.args.get('Text','')
|
225 |
+
audio,text = grVits.tts_fn(message)
|
226 |
+
text = text.replace('[JA]','').replace('[ZH]','')
|
227 |
+
with open('temp.wav','rb') as bit:
|
228 |
+
wav_bytes = bit.read()
|
229 |
+
headers = {
|
230 |
+
'Content-Type': 'audio/wav',
|
231 |
+
'Text': text.encode('utf-8')}
|
232 |
+
return wav_bytes, 200, headers
|
233 |
+
|
234 |
+
def gradio_interface():
|
235 |
+
return grVits.Vits.launch()
|
236 |
+
|
237 |
+
if __name__ == '__main__':
|
238 |
+
api_thread = Thread(target=app.run, args=("0.0.0.0", 8080))
|
239 |
+
gradio_thread = Thread(target=gradio_interface)
|
240 |
+
api_thread.start()
|
241 |
+
gradio_thread.start()
|
attentions.py
ADDED
@@ -0,0 +1,300 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
import torch
|
3 |
+
from torch import nn
|
4 |
+
from torch.nn import functional as F
|
5 |
+
|
6 |
+
import commons
|
7 |
+
from modules import LayerNorm
|
8 |
+
|
9 |
+
|
10 |
+
class Encoder(nn.Module):
|
11 |
+
def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs):
|
12 |
+
super().__init__()
|
13 |
+
self.hidden_channels = hidden_channels
|
14 |
+
self.filter_channels = filter_channels
|
15 |
+
self.n_heads = n_heads
|
16 |
+
self.n_layers = n_layers
|
17 |
+
self.kernel_size = kernel_size
|
18 |
+
self.p_dropout = p_dropout
|
19 |
+
self.window_size = window_size
|
20 |
+
|
21 |
+
self.drop = nn.Dropout(p_dropout)
|
22 |
+
self.attn_layers = nn.ModuleList()
|
23 |
+
self.norm_layers_1 = nn.ModuleList()
|
24 |
+
self.ffn_layers = nn.ModuleList()
|
25 |
+
self.norm_layers_2 = nn.ModuleList()
|
26 |
+
for i in range(self.n_layers):
|
27 |
+
self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size))
|
28 |
+
self.norm_layers_1.append(LayerNorm(hidden_channels))
|
29 |
+
self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))
|
30 |
+
self.norm_layers_2.append(LayerNorm(hidden_channels))
|
31 |
+
|
32 |
+
def forward(self, x, x_mask):
|
33 |
+
attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
34 |
+
x = x * x_mask
|
35 |
+
for i in range(self.n_layers):
|
36 |
+
y = self.attn_layers[i](x, x, attn_mask)
|
37 |
+
y = self.drop(y)
|
38 |
+
x = self.norm_layers_1[i](x + y)
|
39 |
+
|
40 |
+
y = self.ffn_layers[i](x, x_mask)
|
41 |
+
y = self.drop(y)
|
42 |
+
x = self.norm_layers_2[i](x + y)
|
43 |
+
x = x * x_mask
|
44 |
+
return x
|
45 |
+
|
46 |
+
|
47 |
+
class Decoder(nn.Module):
|
48 |
+
def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs):
|
49 |
+
super().__init__()
|
50 |
+
self.hidden_channels = hidden_channels
|
51 |
+
self.filter_channels = filter_channels
|
52 |
+
self.n_heads = n_heads
|
53 |
+
self.n_layers = n_layers
|
54 |
+
self.kernel_size = kernel_size
|
55 |
+
self.p_dropout = p_dropout
|
56 |
+
self.proximal_bias = proximal_bias
|
57 |
+
self.proximal_init = proximal_init
|
58 |
+
|
59 |
+
self.drop = nn.Dropout(p_dropout)
|
60 |
+
self.self_attn_layers = nn.ModuleList()
|
61 |
+
self.norm_layers_0 = nn.ModuleList()
|
62 |
+
self.encdec_attn_layers = nn.ModuleList()
|
63 |
+
self.norm_layers_1 = nn.ModuleList()
|
64 |
+
self.ffn_layers = nn.ModuleList()
|
65 |
+
self.norm_layers_2 = nn.ModuleList()
|
66 |
+
for i in range(self.n_layers):
|
67 |
+
self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init))
|
68 |
+
self.norm_layers_0.append(LayerNorm(hidden_channels))
|
69 |
+
self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout))
|
70 |
+
self.norm_layers_1.append(LayerNorm(hidden_channels))
|
71 |
+
self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True))
|
72 |
+
self.norm_layers_2.append(LayerNorm(hidden_channels))
|
73 |
+
|
74 |
+
def forward(self, x, x_mask, h, h_mask):
|
75 |
+
"""
|
76 |
+
x: decoder input
|
77 |
+
h: encoder output
|
78 |
+
"""
|
79 |
+
self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
|
80 |
+
encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
81 |
+
x = x * x_mask
|
82 |
+
for i in range(self.n_layers):
|
83 |
+
y = self.self_attn_layers[i](x, x, self_attn_mask)
|
84 |
+
y = self.drop(y)
|
85 |
+
x = self.norm_layers_0[i](x + y)
|
86 |
+
|
87 |
+
y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
|
88 |
+
y = self.drop(y)
|
89 |
+
x = self.norm_layers_1[i](x + y)
|
90 |
+
|
91 |
+
y = self.ffn_layers[i](x, x_mask)
|
92 |
+
y = self.drop(y)
|
93 |
+
x = self.norm_layers_2[i](x + y)
|
94 |
+
x = x * x_mask
|
95 |
+
return x
|
96 |
+
|
97 |
+
|
98 |
+
class MultiHeadAttention(nn.Module):
|
99 |
+
def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False):
|
100 |
+
super().__init__()
|
101 |
+
assert channels % n_heads == 0
|
102 |
+
|
103 |
+
self.channels = channels
|
104 |
+
self.out_channels = out_channels
|
105 |
+
self.n_heads = n_heads
|
106 |
+
self.p_dropout = p_dropout
|
107 |
+
self.window_size = window_size
|
108 |
+
self.heads_share = heads_share
|
109 |
+
self.block_length = block_length
|
110 |
+
self.proximal_bias = proximal_bias
|
111 |
+
self.proximal_init = proximal_init
|
112 |
+
self.attn = None
|
113 |
+
|
114 |
+
self.k_channels = channels // n_heads
|
115 |
+
self.conv_q = nn.Conv1d(channels, channels, 1)
|
116 |
+
self.conv_k = nn.Conv1d(channels, channels, 1)
|
117 |
+
self.conv_v = nn.Conv1d(channels, channels, 1)
|
118 |
+
self.conv_o = nn.Conv1d(channels, out_channels, 1)
|
119 |
+
self.drop = nn.Dropout(p_dropout)
|
120 |
+
|
121 |
+
if window_size is not None:
|
122 |
+
n_heads_rel = 1 if heads_share else n_heads
|
123 |
+
rel_stddev = self.k_channels**-0.5
|
124 |
+
self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
|
125 |
+
self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
|
126 |
+
|
127 |
+
nn.init.xavier_uniform_(self.conv_q.weight)
|
128 |
+
nn.init.xavier_uniform_(self.conv_k.weight)
|
129 |
+
nn.init.xavier_uniform_(self.conv_v.weight)
|
130 |
+
if proximal_init:
|
131 |
+
with torch.no_grad():
|
132 |
+
self.conv_k.weight.copy_(self.conv_q.weight)
|
133 |
+
self.conv_k.bias.copy_(self.conv_q.bias)
|
134 |
+
|
135 |
+
def forward(self, x, c, attn_mask=None):
|
136 |
+
q = self.conv_q(x)
|
137 |
+
k = self.conv_k(c)
|
138 |
+
v = self.conv_v(c)
|
139 |
+
|
140 |
+
x, self.attn = self.attention(q, k, v, mask=attn_mask)
|
141 |
+
|
142 |
+
x = self.conv_o(x)
|
143 |
+
return x
|
144 |
+
|
145 |
+
def attention(self, query, key, value, mask=None):
|
146 |
+
# reshape [b, d, t] -> [b, n_h, t, d_k]
|
147 |
+
b, d, t_s, t_t = (*key.size(), query.size(2))
|
148 |
+
query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
|
149 |
+
key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
150 |
+
value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
151 |
+
|
152 |
+
scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
|
153 |
+
if self.window_size is not None:
|
154 |
+
assert t_s == t_t, "Relative attention is only available for self-attention."
|
155 |
+
key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
|
156 |
+
rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings)
|
157 |
+
scores_local = self._relative_position_to_absolute_position(rel_logits)
|
158 |
+
scores = scores + scores_local
|
159 |
+
if self.proximal_bias:
|
160 |
+
assert t_s == t_t, "Proximal bias is only available for self-attention."
|
161 |
+
scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
|
162 |
+
if mask is not None:
|
163 |
+
scores = scores.masked_fill(mask == 0, -1e4)
|
164 |
+
if self.block_length is not None:
|
165 |
+
assert t_s == t_t, "Local attention is only available for self-attention."
|
166 |
+
block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
|
167 |
+
scores = scores.masked_fill(block_mask == 0, -1e4)
|
168 |
+
p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
|
169 |
+
p_attn = self.drop(p_attn)
|
170 |
+
output = torch.matmul(p_attn, value)
|
171 |
+
if self.window_size is not None:
|
172 |
+
relative_weights = self._absolute_position_to_relative_position(p_attn)
|
173 |
+
value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
|
174 |
+
output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
|
175 |
+
output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t]
|
176 |
+
return output, p_attn
|
177 |
+
|
178 |
+
def _matmul_with_relative_values(self, x, y):
|
179 |
+
"""
|
180 |
+
x: [b, h, l, m]
|
181 |
+
y: [h or 1, m, d]
|
182 |
+
ret: [b, h, l, d]
|
183 |
+
"""
|
184 |
+
ret = torch.matmul(x, y.unsqueeze(0))
|
185 |
+
return ret
|
186 |
+
|
187 |
+
def _matmul_with_relative_keys(self, x, y):
|
188 |
+
"""
|
189 |
+
x: [b, h, l, d]
|
190 |
+
y: [h or 1, m, d]
|
191 |
+
ret: [b, h, l, m]
|
192 |
+
"""
|
193 |
+
ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
|
194 |
+
return ret
|
195 |
+
|
196 |
+
def _get_relative_embeddings(self, relative_embeddings, length):
|
197 |
+
max_relative_position = 2 * self.window_size + 1
|
198 |
+
# Pad first before slice to avoid using cond ops.
|
199 |
+
pad_length = max(length - (self.window_size + 1), 0)
|
200 |
+
slice_start_position = max((self.window_size + 1) - length, 0)
|
201 |
+
slice_end_position = slice_start_position + 2 * length - 1
|
202 |
+
if pad_length > 0:
|
203 |
+
padded_relative_embeddings = F.pad(
|
204 |
+
relative_embeddings,
|
205 |
+
commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
|
206 |
+
else:
|
207 |
+
padded_relative_embeddings = relative_embeddings
|
208 |
+
used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position]
|
209 |
+
return used_relative_embeddings
|
210 |
+
|
211 |
+
def _relative_position_to_absolute_position(self, x):
|
212 |
+
"""
|
213 |
+
x: [b, h, l, 2*l-1]
|
214 |
+
ret: [b, h, l, l]
|
215 |
+
"""
|
216 |
+
batch, heads, length, _ = x.size()
|
217 |
+
# Concat columns of pad to shift from relative to absolute indexing.
|
218 |
+
x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]]))
|
219 |
+
|
220 |
+
# Concat extra elements so to add up to shape (len+1, 2*len-1).
|
221 |
+
x_flat = x.view([batch, heads, length * 2 * length])
|
222 |
+
x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]]))
|
223 |
+
|
224 |
+
# Reshape and slice out the padded elements.
|
225 |
+
x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:]
|
226 |
+
return x_final
|
227 |
+
|
228 |
+
def _absolute_position_to_relative_position(self, x):
|
229 |
+
"""
|
230 |
+
x: [b, h, l, l]
|
231 |
+
ret: [b, h, l, 2*l-1]
|
232 |
+
"""
|
233 |
+
batch, heads, length, _ = x.size()
|
234 |
+
# padd along column
|
235 |
+
x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]]))
|
236 |
+
x_flat = x.view([batch, heads, length**2 + length*(length -1)])
|
237 |
+
# add 0's in the beginning that will skew the elements after reshape
|
238 |
+
x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
|
239 |
+
x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:]
|
240 |
+
return x_final
|
241 |
+
|
242 |
+
def _attention_bias_proximal(self, length):
|
243 |
+
"""Bias for self-attention to encourage attention to close positions.
|
244 |
+
Args:
|
245 |
+
length: an integer scalar.
|
246 |
+
Returns:
|
247 |
+
a Tensor with shape [1, 1, length, length]
|
248 |
+
"""
|
249 |
+
r = torch.arange(length, dtype=torch.float32)
|
250 |
+
diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
|
251 |
+
return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
|
252 |
+
|
253 |
+
|
254 |
+
class FFN(nn.Module):
|
255 |
+
def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False):
|
256 |
+
super().__init__()
|
257 |
+
self.in_channels = in_channels
|
258 |
+
self.out_channels = out_channels
|
259 |
+
self.filter_channels = filter_channels
|
260 |
+
self.kernel_size = kernel_size
|
261 |
+
self.p_dropout = p_dropout
|
262 |
+
self.activation = activation
|
263 |
+
self.causal = causal
|
264 |
+
|
265 |
+
if causal:
|
266 |
+
self.padding = self._causal_padding
|
267 |
+
else:
|
268 |
+
self.padding = self._same_padding
|
269 |
+
|
270 |
+
self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
|
271 |
+
self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
|
272 |
+
self.drop = nn.Dropout(p_dropout)
|
273 |
+
|
274 |
+
def forward(self, x, x_mask):
|
275 |
+
x = self.conv_1(self.padding(x * x_mask))
|
276 |
+
if self.activation == "gelu":
|
277 |
+
x = x * torch.sigmoid(1.702 * x)
|
278 |
+
else:
|
279 |
+
x = torch.relu(x)
|
280 |
+
x = self.drop(x)
|
281 |
+
x = self.conv_2(self.padding(x * x_mask))
|
282 |
+
return x * x_mask
|
283 |
+
|
284 |
+
def _causal_padding(self, x):
|
285 |
+
if self.kernel_size == 1:
|
286 |
+
return x
|
287 |
+
pad_l = self.kernel_size - 1
|
288 |
+
pad_r = 0
|
289 |
+
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
|
290 |
+
x = F.pad(x, commons.convert_pad_shape(padding))
|
291 |
+
return x
|
292 |
+
|
293 |
+
def _same_padding(self, x):
|
294 |
+
if self.kernel_size == 1:
|
295 |
+
return x
|
296 |
+
pad_l = (self.kernel_size - 1) // 2
|
297 |
+
pad_r = self.kernel_size // 2
|
298 |
+
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
|
299 |
+
x = F.pad(x, commons.convert_pad_shape(padding))
|
300 |
+
return x
|
checkpoints/default/config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"data":{"add_blank":true,"cleaned_text":true,"cleaners":["custom_cleaners"],"filter_length":1024,"hop_length":256,"max_wav_value":32768.0,"mel_fmax":null,"mel_fmin":0.0,"n_mel_channels":80,"n_speakers":12,"sampling_rate":22050,"text_cleaners":["zh_ja_mixture_cleaners"],"training_files":"/root/content/vits/filelists/muse_tricolor_train.txt.cleaned","validation_files":"/root/content/vits/filelists/muse_tricolor_val.txt.cleaned","win_length":1024},"model":{"filter_channels":768,"gin_channels":256,"hidden_channels":192,"inter_channels":192,"kernel_size":3,"n_heads":2,"n_layers":6,"n_layers_q":3,"p_dropout":0.1,"resblock":"1","resblock_dilation_sizes":[[1,3,5],[1,3,5],[1,3,5]],"resblock_kernel_sizes":[3,7,11],"upsample_initial_channel":512,"upsample_kernel_sizes":[16,16,4,4],"upsample_rates":[8,8,2,2],"use_spectral_norm":false},"speakers":["Minami Kotori","Sonoda Umi","Koizumi Hanayo","Hoshizora Rin","Tojo Nozomi","Yazawa Nico","Ayase Eli","Nishikino Maki","Kosaka Honoka","WenZhi","MoXiaoju","Biaobei"],"symbols":["_",",",".","!","?","\u2026","~","_",".","!","?","-","~","\u2026","A","E","I","N","O","Q","U","a","b","d","e","f","g","h","i","j","k","l","m","n","o","p","r","s","t","u","v","w","y","z","\u0283","\u02a7","\u02a6","\u026f","\u0279","\u0259","\u0265","\u207c","\u02b0","`","\u2192","\u2193","\u2191"," "],"train":{"batch_size":32,"betas":[0.8,0.99],"c_kl":1.0,"c_mel":45,"epochs":1200,"eps":1e-09,"eval_interval":10000,"fp16_run":true,"init_lr_ratio":1,"learning_rate":0.0002,"log_interval":200,"lr_decay":0.999875,"seed":1234,"segment_size":8192,"warmup_epochs":0}}
|
checkpoints/default/model.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:81fc0ab91e340a14198453055bb36797c85b74abde79e3c6507150118902f45c
|
3 |
+
size 476700333
|
cleaners/JapaneseCleaner.dll
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a659eb68d12d4a88ef7dfde6086b9974cd4d43634f7e4bfe710d5537cdd61a75
|
3 |
+
size 3097600
|
cleaners/char.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:888ee94c5a8a7a26d24ab3f1b7155441351954fd51ea06b4a2f78bd742492b2f
|
3 |
+
size 262496
|
cleaners/matrix.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:62fd16b4f64c851d5dc352ef0d5740c5fc83ddc7c203b2b0b1fc5271969a14ce
|
3 |
+
size 3792262
|
cleaners/sys.dic
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ca57d9029691a70a5dfb99afc2844180256161d7130da65b1a867510e129b9a6
|
3 |
+
size 103073776
|
cleaners/unk.dic
ADDED
Binary file (5.69 kB). View file
|
|
commons.py
ADDED
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
import torch
|
3 |
+
from torch.nn import functional as F
|
4 |
+
import torch.jit
|
5 |
+
|
6 |
+
|
7 |
+
def script_method(fn, _rcb=None):
|
8 |
+
return fn
|
9 |
+
|
10 |
+
|
11 |
+
def script(obj, optimize=True, _frames_up=0, _rcb=None):
|
12 |
+
return obj
|
13 |
+
|
14 |
+
|
15 |
+
torch.jit.script_method = script_method
|
16 |
+
torch.jit.script = script
|
17 |
+
|
18 |
+
|
19 |
+
def init_weights(m, mean=0.0, std=0.01):
|
20 |
+
classname = m.__class__.__name__
|
21 |
+
if classname.find("Conv") != -1:
|
22 |
+
m.weight.data.normal_(mean, std)
|
23 |
+
|
24 |
+
|
25 |
+
def get_padding(kernel_size, dilation=1):
|
26 |
+
return int((kernel_size*dilation - dilation)/2)
|
27 |
+
|
28 |
+
|
29 |
+
def intersperse(lst, item):
|
30 |
+
result = [item] * (len(lst) * 2 + 1)
|
31 |
+
result[1::2] = lst
|
32 |
+
return result
|
33 |
+
|
34 |
+
|
35 |
+
def slice_segments(x, ids_str, segment_size=4):
|
36 |
+
ret = torch.zeros_like(x[:, :, :segment_size])
|
37 |
+
for i in range(x.size(0)):
|
38 |
+
idx_str = ids_str[i]
|
39 |
+
idx_end = idx_str + segment_size
|
40 |
+
ret[i] = x[i, :, idx_str:idx_end]
|
41 |
+
return ret
|
42 |
+
|
43 |
+
|
44 |
+
def rand_slice_segments(x, x_lengths=None, segment_size=4):
|
45 |
+
b, d, t = x.size()
|
46 |
+
if x_lengths is None:
|
47 |
+
x_lengths = t
|
48 |
+
ids_str_max = x_lengths - segment_size + 1
|
49 |
+
ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
|
50 |
+
ret = slice_segments(x, ids_str, segment_size)
|
51 |
+
return ret, ids_str
|
52 |
+
|
53 |
+
|
54 |
+
def subsequent_mask(length):
|
55 |
+
mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
|
56 |
+
return mask
|
57 |
+
|
58 |
+
|
59 |
+
@torch.jit.script
|
60 |
+
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
|
61 |
+
n_channels_int = n_channels[0]
|
62 |
+
in_act = input_a + input_b
|
63 |
+
t_act = torch.tanh(in_act[:, :n_channels_int, :])
|
64 |
+
s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
|
65 |
+
acts = t_act * s_act
|
66 |
+
return acts
|
67 |
+
|
68 |
+
|
69 |
+
def convert_pad_shape(pad_shape):
|
70 |
+
l = pad_shape[::-1]
|
71 |
+
pad_shape = [item for sublist in l for item in sublist]
|
72 |
+
return pad_shape
|
73 |
+
|
74 |
+
|
75 |
+
def sequence_mask(length, max_length=None):
|
76 |
+
if max_length is None:
|
77 |
+
max_length = length.max()
|
78 |
+
x = torch.arange(max_length, dtype=length.dtype, device=length.device)
|
79 |
+
return x.unsqueeze(0) < length.unsqueeze(1)
|
80 |
+
|
81 |
+
|
82 |
+
def generate_path(duration, mask):
|
83 |
+
"""
|
84 |
+
duration: [b, 1, t_x]
|
85 |
+
mask: [b, 1, t_y, t_x]
|
86 |
+
"""
|
87 |
+
device = duration.device
|
88 |
+
|
89 |
+
b, _, t_y, t_x = mask.shape
|
90 |
+
cum_duration = torch.cumsum(duration, -1)
|
91 |
+
|
92 |
+
cum_duration_flat = cum_duration.view(b * t_x)
|
93 |
+
path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
|
94 |
+
path = path.view(b, t_x, t_y)
|
95 |
+
path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
|
96 |
+
path = path.unsqueeze(1).transpose(2,3) * mask
|
97 |
+
return path
|
jieba/dict.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
mel_processing.py
ADDED
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
import os
|
3 |
+
import random
|
4 |
+
import torch
|
5 |
+
from torch import nn
|
6 |
+
import torch.nn.functional as F
|
7 |
+
import torch.utils.data
|
8 |
+
import numpy as np
|
9 |
+
import librosa
|
10 |
+
import librosa.util as librosa_util
|
11 |
+
from librosa.util import normalize, pad_center, tiny
|
12 |
+
from scipy.signal import get_window
|
13 |
+
from scipy.io.wavfile import read
|
14 |
+
from librosa.filters import mel as librosa_mel_fn
|
15 |
+
|
16 |
+
MAX_WAV_VALUE = 32768.0
|
17 |
+
|
18 |
+
|
19 |
+
def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
|
20 |
+
"""
|
21 |
+
PARAMS
|
22 |
+
------
|
23 |
+
C: compression factor
|
24 |
+
"""
|
25 |
+
return torch.log(torch.clamp(x, min=clip_val) * C)
|
26 |
+
|
27 |
+
|
28 |
+
def dynamic_range_decompression_torch(x, C=1):
|
29 |
+
"""
|
30 |
+
PARAMS
|
31 |
+
------
|
32 |
+
C: compression factor used to compress
|
33 |
+
"""
|
34 |
+
return torch.exp(x) / C
|
35 |
+
|
36 |
+
|
37 |
+
def spectral_normalize_torch(magnitudes):
|
38 |
+
output = dynamic_range_compression_torch(magnitudes)
|
39 |
+
return output
|
40 |
+
|
41 |
+
|
42 |
+
def spectral_de_normalize_torch(magnitudes):
|
43 |
+
output = dynamic_range_decompression_torch(magnitudes)
|
44 |
+
return output
|
45 |
+
|
46 |
+
|
47 |
+
mel_basis = {}
|
48 |
+
hann_window = {}
|
49 |
+
|
50 |
+
|
51 |
+
def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False):
|
52 |
+
if torch.min(y) < -1.:
|
53 |
+
print('min value is ', torch.min(y))
|
54 |
+
if torch.max(y) > 1.:
|
55 |
+
print('max value is ', torch.max(y))
|
56 |
+
|
57 |
+
global hann_window
|
58 |
+
dtype_device = str(y.dtype) + '_' + str(y.device)
|
59 |
+
wnsize_dtype_device = str(win_size) + '_' + dtype_device
|
60 |
+
if wnsize_dtype_device not in hann_window:
|
61 |
+
hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
|
62 |
+
|
63 |
+
y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
|
64 |
+
y = y.squeeze(1)
|
65 |
+
|
66 |
+
spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
|
67 |
+
center=center, pad_mode='reflect', normalized=False, onesided=True)
|
68 |
+
|
69 |
+
spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
|
70 |
+
return spec
|
71 |
+
|
72 |
+
|
73 |
+
def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):
|
74 |
+
global mel_basis
|
75 |
+
dtype_device = str(spec.dtype) + '_' + str(spec.device)
|
76 |
+
fmax_dtype_device = str(fmax) + '_' + dtype_device
|
77 |
+
if fmax_dtype_device not in mel_basis:
|
78 |
+
mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
|
79 |
+
mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device)
|
80 |
+
spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
|
81 |
+
spec = spectral_normalize_torch(spec)
|
82 |
+
return spec
|
83 |
+
|
84 |
+
|
85 |
+
def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):
|
86 |
+
if torch.min(y) < -1.:
|
87 |
+
print('min value is ', torch.min(y))
|
88 |
+
if torch.max(y) > 1.:
|
89 |
+
print('max value is ', torch.max(y))
|
90 |
+
|
91 |
+
global mel_basis, hann_window
|
92 |
+
dtype_device = str(y.dtype) + '_' + str(y.device)
|
93 |
+
fmax_dtype_device = str(fmax) + '_' + dtype_device
|
94 |
+
wnsize_dtype_device = str(win_size) + '_' + dtype_device
|
95 |
+
if fmax_dtype_device not in mel_basis:
|
96 |
+
mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
|
97 |
+
mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device)
|
98 |
+
if wnsize_dtype_device not in hann_window:
|
99 |
+
hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
|
100 |
+
|
101 |
+
y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
|
102 |
+
y = y.squeeze(1)
|
103 |
+
|
104 |
+
spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
|
105 |
+
center=center, pad_mode='reflect', normalized=False, onesided=True)
|
106 |
+
|
107 |
+
spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
|
108 |
+
|
109 |
+
spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
|
110 |
+
spec = spectral_normalize_torch(spec)
|
111 |
+
|
112 |
+
return spec
|
models.py
ADDED
@@ -0,0 +1,498 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
|
3 |
+
import torch
|
4 |
+
from torch import nn
|
5 |
+
from torch.nn import Conv1d, ConvTranspose1d, Conv2d
|
6 |
+
from torch.nn import functional as F
|
7 |
+
from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
|
8 |
+
|
9 |
+
import attentions
|
10 |
+
import commons
|
11 |
+
import modules
|
12 |
+
from commons import init_weights, get_padding
|
13 |
+
|
14 |
+
|
15 |
+
class StochasticDurationPredictor(nn.Module):
|
16 |
+
def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0):
|
17 |
+
super().__init__()
|
18 |
+
filter_channels = in_channels # it needs to be removed from future version.
|
19 |
+
self.in_channels = in_channels
|
20 |
+
self.filter_channels = filter_channels
|
21 |
+
self.kernel_size = kernel_size
|
22 |
+
self.p_dropout = p_dropout
|
23 |
+
self.n_flows = n_flows
|
24 |
+
self.gin_channels = gin_channels
|
25 |
+
|
26 |
+
self.log_flow = modules.Log()
|
27 |
+
self.flows = nn.ModuleList()
|
28 |
+
self.flows.append(modules.ElementwiseAffine(2))
|
29 |
+
for i in range(n_flows):
|
30 |
+
self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
|
31 |
+
self.flows.append(modules.Flip())
|
32 |
+
|
33 |
+
self.post_pre = nn.Conv1d(1, filter_channels, 1)
|
34 |
+
self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1)
|
35 |
+
self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
|
36 |
+
self.post_flows = nn.ModuleList()
|
37 |
+
self.post_flows.append(modules.ElementwiseAffine(2))
|
38 |
+
for i in range(4):
|
39 |
+
self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
|
40 |
+
self.post_flows.append(modules.Flip())
|
41 |
+
|
42 |
+
self.pre = nn.Conv1d(in_channels, filter_channels, 1)
|
43 |
+
self.proj = nn.Conv1d(filter_channels, filter_channels, 1)
|
44 |
+
self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
|
45 |
+
if gin_channels != 0:
|
46 |
+
self.cond = nn.Conv1d(gin_channels, filter_channels, 1)
|
47 |
+
|
48 |
+
def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0):
|
49 |
+
x = torch.detach(x)
|
50 |
+
x = self.pre(x)
|
51 |
+
if g is not None:
|
52 |
+
g = torch.detach(g)
|
53 |
+
x = x + self.cond(g)
|
54 |
+
x = self.convs(x, x_mask)
|
55 |
+
x = self.proj(x) * x_mask
|
56 |
+
|
57 |
+
if not reverse:
|
58 |
+
flows = self.flows
|
59 |
+
assert w is not None
|
60 |
+
|
61 |
+
logdet_tot_q = 0
|
62 |
+
h_w = self.post_pre(w)
|
63 |
+
h_w = self.post_convs(h_w, x_mask)
|
64 |
+
h_w = self.post_proj(h_w) * x_mask
|
65 |
+
e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask
|
66 |
+
z_q = e_q
|
67 |
+
for flow in self.post_flows:
|
68 |
+
z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w))
|
69 |
+
logdet_tot_q += logdet_q
|
70 |
+
z_u, z1 = torch.split(z_q, [1, 1], 1)
|
71 |
+
u = torch.sigmoid(z_u) * x_mask
|
72 |
+
z0 = (w - u) * x_mask
|
73 |
+
logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1, 2])
|
74 |
+
logq = torch.sum(-0.5 * (math.log(2 * math.pi) + (e_q ** 2)) * x_mask, [1, 2]) - logdet_tot_q
|
75 |
+
|
76 |
+
logdet_tot = 0
|
77 |
+
z0, logdet = self.log_flow(z0, x_mask)
|
78 |
+
logdet_tot += logdet
|
79 |
+
z = torch.cat([z0, z1], 1)
|
80 |
+
for flow in flows:
|
81 |
+
z, logdet = flow(z, x_mask, g=x, reverse=reverse)
|
82 |
+
logdet_tot = logdet_tot + logdet
|
83 |
+
nll = torch.sum(0.5 * (math.log(2 * math.pi) + (z ** 2)) * x_mask, [1, 2]) - logdet_tot
|
84 |
+
return nll + logq # [b]
|
85 |
+
else:
|
86 |
+
flows = list(reversed(self.flows))
|
87 |
+
flows = flows[:-2] + [flows[-1]] # remove a useless vflow
|
88 |
+
z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale
|
89 |
+
for flow in flows:
|
90 |
+
z = flow(z, x_mask, g=x, reverse=reverse)
|
91 |
+
z0, z1 = torch.split(z, [1, 1], 1)
|
92 |
+
logw = z0
|
93 |
+
return logw
|
94 |
+
|
95 |
+
|
96 |
+
class DurationPredictor(nn.Module):
|
97 |
+
def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0):
|
98 |
+
super().__init__()
|
99 |
+
|
100 |
+
self.in_channels = in_channels
|
101 |
+
self.filter_channels = filter_channels
|
102 |
+
self.kernel_size = kernel_size
|
103 |
+
self.p_dropout = p_dropout
|
104 |
+
self.gin_channels = gin_channels
|
105 |
+
|
106 |
+
self.drop = nn.Dropout(p_dropout)
|
107 |
+
self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2)
|
108 |
+
self.norm_1 = modules.LayerNorm(filter_channels)
|
109 |
+
self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size // 2)
|
110 |
+
self.norm_2 = modules.LayerNorm(filter_channels)
|
111 |
+
self.proj = nn.Conv1d(filter_channels, 1, 1)
|
112 |
+
|
113 |
+
if gin_channels != 0:
|
114 |
+
self.cond = nn.Conv1d(gin_channels, in_channels, 1)
|
115 |
+
|
116 |
+
def forward(self, x, x_mask, g=None):
|
117 |
+
x = torch.detach(x)
|
118 |
+
if g is not None:
|
119 |
+
g = torch.detach(g)
|
120 |
+
x = x + self.cond(g)
|
121 |
+
x = self.conv_1(x * x_mask)
|
122 |
+
x = torch.relu(x)
|
123 |
+
x = self.norm_1(x)
|
124 |
+
x = self.drop(x)
|
125 |
+
x = self.conv_2(x * x_mask)
|
126 |
+
x = torch.relu(x)
|
127 |
+
x = self.norm_2(x)
|
128 |
+
x = self.drop(x)
|
129 |
+
x = self.proj(x * x_mask)
|
130 |
+
return x * x_mask
|
131 |
+
|
132 |
+
|
133 |
+
class TextEncoder(nn.Module):
|
134 |
+
def __init__(self,
|
135 |
+
n_vocab,
|
136 |
+
out_channels,
|
137 |
+
hidden_channels,
|
138 |
+
filter_channels,
|
139 |
+
n_heads,
|
140 |
+
n_layers,
|
141 |
+
kernel_size,
|
142 |
+
p_dropout):
|
143 |
+
super().__init__()
|
144 |
+
self.n_vocab = n_vocab
|
145 |
+
self.out_channels = out_channels
|
146 |
+
self.hidden_channels = hidden_channels
|
147 |
+
self.filter_channels = filter_channels
|
148 |
+
self.n_heads = n_heads
|
149 |
+
self.n_layers = n_layers
|
150 |
+
self.kernel_size = kernel_size
|
151 |
+
self.p_dropout = p_dropout
|
152 |
+
|
153 |
+
if self.n_vocab != 0:
|
154 |
+
self.emb = nn.Embedding(n_vocab, hidden_channels)
|
155 |
+
nn.init.normal_(self.emb.weight, 0.0, hidden_channels ** -0.5)
|
156 |
+
|
157 |
+
self.encoder = attentions.Encoder(
|
158 |
+
hidden_channels,
|
159 |
+
filter_channels,
|
160 |
+
n_heads,
|
161 |
+
n_layers,
|
162 |
+
kernel_size,
|
163 |
+
p_dropout)
|
164 |
+
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
165 |
+
|
166 |
+
def forward(self, x, x_lengths):
|
167 |
+
if self.n_vocab != 0:
|
168 |
+
x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h]
|
169 |
+
x = torch.transpose(x, 1, -1) # [b, h, t]
|
170 |
+
x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
|
171 |
+
|
172 |
+
x = self.encoder(x * x_mask, x_mask)
|
173 |
+
stats = self.proj(x) * x_mask
|
174 |
+
|
175 |
+
m, logs = torch.split(stats, self.out_channels, dim=1)
|
176 |
+
return x, m, logs, x_mask
|
177 |
+
|
178 |
+
|
179 |
+
class ResidualCouplingBlock(nn.Module):
|
180 |
+
def __init__(self,
|
181 |
+
channels,
|
182 |
+
hidden_channels,
|
183 |
+
kernel_size,
|
184 |
+
dilation_rate,
|
185 |
+
n_layers,
|
186 |
+
n_flows=4,
|
187 |
+
gin_channels=0):
|
188 |
+
super().__init__()
|
189 |
+
self.channels = channels
|
190 |
+
self.hidden_channels = hidden_channels
|
191 |
+
self.kernel_size = kernel_size
|
192 |
+
self.dilation_rate = dilation_rate
|
193 |
+
self.n_layers = n_layers
|
194 |
+
self.n_flows = n_flows
|
195 |
+
self.gin_channels = gin_channels
|
196 |
+
|
197 |
+
self.flows = nn.ModuleList()
|
198 |
+
for i in range(n_flows):
|
199 |
+
self.flows.append(
|
200 |
+
modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers,
|
201 |
+
gin_channels=gin_channels, mean_only=True))
|
202 |
+
self.flows.append(modules.Flip())
|
203 |
+
|
204 |
+
def forward(self, x, x_mask, g=None, reverse=False):
|
205 |
+
if not reverse:
|
206 |
+
for flow in self.flows:
|
207 |
+
x, _ = flow(x, x_mask, g=g, reverse=reverse)
|
208 |
+
else:
|
209 |
+
for flow in reversed(self.flows):
|
210 |
+
x = flow(x, x_mask, g=g, reverse=reverse)
|
211 |
+
return x
|
212 |
+
|
213 |
+
|
214 |
+
class PosteriorEncoder(nn.Module):
|
215 |
+
def __init__(self,
|
216 |
+
in_channels,
|
217 |
+
out_channels,
|
218 |
+
hidden_channels,
|
219 |
+
kernel_size,
|
220 |
+
dilation_rate,
|
221 |
+
n_layers,
|
222 |
+
gin_channels=0):
|
223 |
+
super().__init__()
|
224 |
+
self.in_channels = in_channels
|
225 |
+
self.out_channels = out_channels
|
226 |
+
self.hidden_channels = hidden_channels
|
227 |
+
self.kernel_size = kernel_size
|
228 |
+
self.dilation_rate = dilation_rate
|
229 |
+
self.n_layers = n_layers
|
230 |
+
self.gin_channels = gin_channels
|
231 |
+
|
232 |
+
self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
|
233 |
+
self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels)
|
234 |
+
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
235 |
+
|
236 |
+
def forward(self, x, x_lengths, g=None):
|
237 |
+
x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
|
238 |
+
x = self.pre(x) * x_mask
|
239 |
+
x = self.enc(x, x_mask, g=g)
|
240 |
+
stats = self.proj(x) * x_mask
|
241 |
+
m, logs = torch.split(stats, self.out_channels, dim=1)
|
242 |
+
z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
|
243 |
+
return z, m, logs, x_mask
|
244 |
+
|
245 |
+
|
246 |
+
class Generator(torch.nn.Module):
|
247 |
+
def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates,
|
248 |
+
upsample_initial_channel, upsample_kernel_sizes, gin_channels=0):
|
249 |
+
super(Generator, self).__init__()
|
250 |
+
self.num_kernels = len(resblock_kernel_sizes)
|
251 |
+
self.num_upsamples = len(upsample_rates)
|
252 |
+
self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)
|
253 |
+
resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2
|
254 |
+
|
255 |
+
self.ups = nn.ModuleList()
|
256 |
+
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
|
257 |
+
self.ups.append(weight_norm(
|
258 |
+
ConvTranspose1d(upsample_initial_channel // (2 ** i), upsample_initial_channel // (2 ** (i + 1)),
|
259 |
+
k, u, padding=(k - u) // 2)))
|
260 |
+
|
261 |
+
self.resblocks = nn.ModuleList()
|
262 |
+
for i in range(len(self.ups)):
|
263 |
+
ch = upsample_initial_channel // (2 ** (i + 1))
|
264 |
+
for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
|
265 |
+
self.resblocks.append(resblock(ch, k, d))
|
266 |
+
|
267 |
+
self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
|
268 |
+
self.ups.apply(init_weights)
|
269 |
+
|
270 |
+
if gin_channels != 0:
|
271 |
+
self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
|
272 |
+
|
273 |
+
def forward(self, x, g=None):
|
274 |
+
x = self.conv_pre(x)
|
275 |
+
if g is not None:
|
276 |
+
x = x + self.cond(g)
|
277 |
+
|
278 |
+
for i in range(self.num_upsamples):
|
279 |
+
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
280 |
+
x = self.ups[i](x)
|
281 |
+
xs = None
|
282 |
+
for j in range(self.num_kernels):
|
283 |
+
if xs is None:
|
284 |
+
xs = self.resblocks[i * self.num_kernels + j](x)
|
285 |
+
else:
|
286 |
+
xs += self.resblocks[i * self.num_kernels + j](x)
|
287 |
+
x = xs / self.num_kernels
|
288 |
+
x = F.leaky_relu(x)
|
289 |
+
x = self.conv_post(x)
|
290 |
+
x = torch.tanh(x)
|
291 |
+
|
292 |
+
return x
|
293 |
+
|
294 |
+
def remove_weight_norm(self):
|
295 |
+
print('Removing weight norm...')
|
296 |
+
for l in self.ups:
|
297 |
+
remove_weight_norm(l)
|
298 |
+
for l in self.resblocks:
|
299 |
+
l.remove_weight_norm()
|
300 |
+
|
301 |
+
|
302 |
+
class DiscriminatorP(torch.nn.Module):
|
303 |
+
def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
|
304 |
+
super(DiscriminatorP, self).__init__()
|
305 |
+
self.period = period
|
306 |
+
self.use_spectral_norm = use_spectral_norm
|
307 |
+
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
308 |
+
self.convs = nn.ModuleList([
|
309 |
+
norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
|
310 |
+
norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
|
311 |
+
norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
|
312 |
+
norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
|
313 |
+
norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))),
|
314 |
+
])
|
315 |
+
self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
|
316 |
+
|
317 |
+
def forward(self, x):
|
318 |
+
fmap = []
|
319 |
+
|
320 |
+
# 1d to 2d
|
321 |
+
b, c, t = x.shape
|
322 |
+
if t % self.period != 0: # pad first
|
323 |
+
n_pad = self.period - (t % self.period)
|
324 |
+
x = F.pad(x, (0, n_pad), "reflect")
|
325 |
+
t = t + n_pad
|
326 |
+
x = x.view(b, c, t // self.period, self.period)
|
327 |
+
|
328 |
+
for l in self.convs:
|
329 |
+
x = l(x)
|
330 |
+
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
331 |
+
fmap.append(x)
|
332 |
+
x = self.conv_post(x)
|
333 |
+
fmap.append(x)
|
334 |
+
x = torch.flatten(x, 1, -1)
|
335 |
+
|
336 |
+
return x, fmap
|
337 |
+
|
338 |
+
|
339 |
+
class DiscriminatorS(torch.nn.Module):
|
340 |
+
def __init__(self, use_spectral_norm=False):
|
341 |
+
super(DiscriminatorS, self).__init__()
|
342 |
+
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
343 |
+
self.convs = nn.ModuleList([
|
344 |
+
norm_f(Conv1d(1, 16, 15, 1, padding=7)),
|
345 |
+
norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
|
346 |
+
norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
|
347 |
+
norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
|
348 |
+
norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
|
349 |
+
norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
|
350 |
+
])
|
351 |
+
self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
|
352 |
+
|
353 |
+
def forward(self, x):
|
354 |
+
fmap = []
|
355 |
+
|
356 |
+
for l in self.convs:
|
357 |
+
x = l(x)
|
358 |
+
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
359 |
+
fmap.append(x)
|
360 |
+
x = self.conv_post(x)
|
361 |
+
fmap.append(x)
|
362 |
+
x = torch.flatten(x, 1, -1)
|
363 |
+
|
364 |
+
return x, fmap
|
365 |
+
|
366 |
+
|
367 |
+
class MultiPeriodDiscriminator(torch.nn.Module):
|
368 |
+
def __init__(self, use_spectral_norm=False):
|
369 |
+
super(MultiPeriodDiscriminator, self).__init__()
|
370 |
+
periods = [2, 3, 5, 7, 11]
|
371 |
+
|
372 |
+
discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
|
373 |
+
discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]
|
374 |
+
self.discriminators = nn.ModuleList(discs)
|
375 |
+
|
376 |
+
def forward(self, y, y_hat):
|
377 |
+
y_d_rs = []
|
378 |
+
y_d_gs = []
|
379 |
+
fmap_rs = []
|
380 |
+
fmap_gs = []
|
381 |
+
for i, d in enumerate(self.discriminators):
|
382 |
+
y_d_r, fmap_r = d(y)
|
383 |
+
y_d_g, fmap_g = d(y_hat)
|
384 |
+
y_d_rs.append(y_d_r)
|
385 |
+
y_d_gs.append(y_d_g)
|
386 |
+
fmap_rs.append(fmap_r)
|
387 |
+
fmap_gs.append(fmap_g)
|
388 |
+
|
389 |
+
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
|
390 |
+
|
391 |
+
|
392 |
+
class SynthesizerTrn(nn.Module):
|
393 |
+
"""
|
394 |
+
Synthesizer for Training
|
395 |
+
"""
|
396 |
+
|
397 |
+
def __init__(self,
|
398 |
+
n_vocab,
|
399 |
+
spec_channels,
|
400 |
+
segment_size,
|
401 |
+
inter_channels,
|
402 |
+
hidden_channels,
|
403 |
+
filter_channels,
|
404 |
+
n_heads,
|
405 |
+
n_layers,
|
406 |
+
kernel_size,
|
407 |
+
p_dropout,
|
408 |
+
resblock,
|
409 |
+
resblock_kernel_sizes,
|
410 |
+
resblock_dilation_sizes,
|
411 |
+
upsample_rates,
|
412 |
+
upsample_initial_channel,
|
413 |
+
upsample_kernel_sizes,
|
414 |
+
n_speakers=0,
|
415 |
+
gin_channels=0,
|
416 |
+
use_sdp=True,
|
417 |
+
**kwargs):
|
418 |
+
|
419 |
+
super().__init__()
|
420 |
+
self.n_vocab = n_vocab
|
421 |
+
self.spec_channels = spec_channels
|
422 |
+
self.inter_channels = inter_channels
|
423 |
+
self.hidden_channels = hidden_channels
|
424 |
+
self.filter_channels = filter_channels
|
425 |
+
self.n_heads = n_heads
|
426 |
+
self.n_layers = n_layers
|
427 |
+
self.kernel_size = kernel_size
|
428 |
+
self.p_dropout = p_dropout
|
429 |
+
self.resblock = resblock
|
430 |
+
self.resblock_kernel_sizes = resblock_kernel_sizes
|
431 |
+
self.resblock_dilation_sizes = resblock_dilation_sizes
|
432 |
+
self.upsample_rates = upsample_rates
|
433 |
+
self.upsample_initial_channel = upsample_initial_channel
|
434 |
+
self.upsample_kernel_sizes = upsample_kernel_sizes
|
435 |
+
self.segment_size = segment_size
|
436 |
+
self.n_speakers = n_speakers
|
437 |
+
self.gin_channels = gin_channels
|
438 |
+
|
439 |
+
self.use_sdp = use_sdp
|
440 |
+
|
441 |
+
self.enc_p = TextEncoder(n_vocab,
|
442 |
+
inter_channels,
|
443 |
+
hidden_channels,
|
444 |
+
filter_channels,
|
445 |
+
n_heads,
|
446 |
+
n_layers,
|
447 |
+
kernel_size,
|
448 |
+
p_dropout)
|
449 |
+
self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates,
|
450 |
+
upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels)
|
451 |
+
self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16,
|
452 |
+
gin_channels=gin_channels)
|
453 |
+
self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)
|
454 |
+
|
455 |
+
if use_sdp:
|
456 |
+
self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels)
|
457 |
+
else:
|
458 |
+
self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels)
|
459 |
+
|
460 |
+
if n_speakers > 1:
|
461 |
+
self.emb_g = nn.Embedding(n_speakers, gin_channels)
|
462 |
+
|
463 |
+
def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None):
|
464 |
+
x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths)
|
465 |
+
if self.n_speakers > 0:
|
466 |
+
g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
|
467 |
+
else:
|
468 |
+
g = None
|
469 |
+
|
470 |
+
if self.use_sdp:
|
471 |
+
logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w)
|
472 |
+
else:
|
473 |
+
logw = self.dp(x, x_mask, g=g)
|
474 |
+
w = torch.exp(logw) * x_mask * length_scale
|
475 |
+
w_ceil = torch.ceil(w)
|
476 |
+
y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()
|
477 |
+
y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype)
|
478 |
+
attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
|
479 |
+
attn = commons.generate_path(w_ceil, attn_mask)
|
480 |
+
|
481 |
+
m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
|
482 |
+
logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1,
|
483 |
+
2) # [b, t', t], [b, t, d] -> [b, d, t']
|
484 |
+
|
485 |
+
z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale
|
486 |
+
z = self.flow(z_p, y_mask, g=g, reverse=True)
|
487 |
+
o = self.dec((z * y_mask)[:, :, :max_len], g=g)
|
488 |
+
return o, attn, y_mask, (z, z_p, m_p, logs_p)
|
489 |
+
|
490 |
+
def voice_conversion(self, y, y_lengths, sid_src, sid_tgt):
|
491 |
+
assert self.n_speakers > 0, "n_speakers have to be larger than 0."
|
492 |
+
g_src = self.emb_g(sid_src).unsqueeze(-1)
|
493 |
+
g_tgt = self.emb_g(sid_tgt).unsqueeze(-1)
|
494 |
+
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src)
|
495 |
+
z_p = self.flow(z, y_mask, g=g_src)
|
496 |
+
z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True)
|
497 |
+
o_hat = self.dec(z_hat * y_mask, g=g_tgt)
|
498 |
+
return o_hat, y_mask, (z, z_p, z_hat)
|
modules.py
ADDED
@@ -0,0 +1,387 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
import torch
|
3 |
+
from torch import nn
|
4 |
+
from torch.nn import functional as F
|
5 |
+
|
6 |
+
from torch.nn import Conv1d
|
7 |
+
from torch.nn.utils import weight_norm, remove_weight_norm
|
8 |
+
|
9 |
+
import commons
|
10 |
+
from commons import init_weights, get_padding
|
11 |
+
from transforms import piecewise_rational_quadratic_transform
|
12 |
+
|
13 |
+
|
14 |
+
LRELU_SLOPE = 0.1
|
15 |
+
|
16 |
+
|
17 |
+
class LayerNorm(nn.Module):
|
18 |
+
def __init__(self, channels, eps=1e-5):
|
19 |
+
super().__init__()
|
20 |
+
self.channels = channels
|
21 |
+
self.eps = eps
|
22 |
+
|
23 |
+
self.gamma = nn.Parameter(torch.ones(channels))
|
24 |
+
self.beta = nn.Parameter(torch.zeros(channels))
|
25 |
+
|
26 |
+
def forward(self, x):
|
27 |
+
x = x.transpose(1, -1)
|
28 |
+
x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
|
29 |
+
return x.transpose(1, -1)
|
30 |
+
|
31 |
+
|
32 |
+
class ConvReluNorm(nn.Module):
|
33 |
+
def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
|
34 |
+
super().__init__()
|
35 |
+
self.in_channels = in_channels
|
36 |
+
self.hidden_channels = hidden_channels
|
37 |
+
self.out_channels = out_channels
|
38 |
+
self.kernel_size = kernel_size
|
39 |
+
self.n_layers = n_layers
|
40 |
+
self.p_dropout = p_dropout
|
41 |
+
assert n_layers > 1, "Number of layers should be larger than 0."
|
42 |
+
|
43 |
+
self.conv_layers = nn.ModuleList()
|
44 |
+
self.norm_layers = nn.ModuleList()
|
45 |
+
self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2))
|
46 |
+
self.norm_layers.append(LayerNorm(hidden_channels))
|
47 |
+
self.relu_drop = nn.Sequential(
|
48 |
+
nn.ReLU(),
|
49 |
+
nn.Dropout(p_dropout))
|
50 |
+
for _ in range(n_layers-1):
|
51 |
+
self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2))
|
52 |
+
self.norm_layers.append(LayerNorm(hidden_channels))
|
53 |
+
self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
|
54 |
+
self.proj.weight.data.zero_()
|
55 |
+
self.proj.bias.data.zero_()
|
56 |
+
|
57 |
+
def forward(self, x, x_mask):
|
58 |
+
x_org = x
|
59 |
+
for i in range(self.n_layers):
|
60 |
+
x = self.conv_layers[i](x * x_mask)
|
61 |
+
x = self.norm_layers[i](x)
|
62 |
+
x = self.relu_drop(x)
|
63 |
+
x = x_org + self.proj(x)
|
64 |
+
return x * x_mask
|
65 |
+
|
66 |
+
|
67 |
+
class DDSConv(nn.Module):
|
68 |
+
"""
|
69 |
+
Dialted and Depth-Separable Convolution
|
70 |
+
"""
|
71 |
+
def __init__(self, channels, kernel_size, n_layers, p_dropout=0.):
|
72 |
+
super().__init__()
|
73 |
+
self.channels = channels
|
74 |
+
self.kernel_size = kernel_size
|
75 |
+
self.n_layers = n_layers
|
76 |
+
self.p_dropout = p_dropout
|
77 |
+
|
78 |
+
self.drop = nn.Dropout(p_dropout)
|
79 |
+
self.convs_sep = nn.ModuleList()
|
80 |
+
self.convs_1x1 = nn.ModuleList()
|
81 |
+
self.norms_1 = nn.ModuleList()
|
82 |
+
self.norms_2 = nn.ModuleList()
|
83 |
+
for i in range(n_layers):
|
84 |
+
dilation = kernel_size ** i
|
85 |
+
padding = (kernel_size * dilation - dilation) // 2
|
86 |
+
self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size,
|
87 |
+
groups=channels, dilation=dilation, padding=padding
|
88 |
+
))
|
89 |
+
self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
|
90 |
+
self.norms_1.append(LayerNorm(channels))
|
91 |
+
self.norms_2.append(LayerNorm(channels))
|
92 |
+
|
93 |
+
def forward(self, x, x_mask, g=None):
|
94 |
+
if g is not None:
|
95 |
+
x = x + g
|
96 |
+
for i in range(self.n_layers):
|
97 |
+
y = self.convs_sep[i](x * x_mask)
|
98 |
+
y = self.norms_1[i](y)
|
99 |
+
y = F.gelu(y)
|
100 |
+
y = self.convs_1x1[i](y)
|
101 |
+
y = self.norms_2[i](y)
|
102 |
+
y = F.gelu(y)
|
103 |
+
y = self.drop(y)
|
104 |
+
x = x + y
|
105 |
+
return x * x_mask
|
106 |
+
|
107 |
+
|
108 |
+
class WN(torch.nn.Module):
|
109 |
+
def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
|
110 |
+
super(WN, self).__init__()
|
111 |
+
assert(kernel_size % 2 == 1)
|
112 |
+
self.hidden_channels =hidden_channels
|
113 |
+
self.kernel_size = kernel_size,
|
114 |
+
self.dilation_rate = dilation_rate
|
115 |
+
self.n_layers = n_layers
|
116 |
+
self.gin_channels = gin_channels
|
117 |
+
self.p_dropout = p_dropout
|
118 |
+
|
119 |
+
self.in_layers = torch.nn.ModuleList()
|
120 |
+
self.res_skip_layers = torch.nn.ModuleList()
|
121 |
+
self.drop = nn.Dropout(p_dropout)
|
122 |
+
|
123 |
+
if gin_channels != 0:
|
124 |
+
cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1)
|
125 |
+
self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
|
126 |
+
|
127 |
+
for i in range(n_layers):
|
128 |
+
dilation = dilation_rate ** i
|
129 |
+
padding = int((kernel_size * dilation - dilation) / 2)
|
130 |
+
in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size,
|
131 |
+
dilation=dilation, padding=padding)
|
132 |
+
in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
|
133 |
+
self.in_layers.append(in_layer)
|
134 |
+
|
135 |
+
# last one is not necessary
|
136 |
+
if i < n_layers - 1:
|
137 |
+
res_skip_channels = 2 * hidden_channels
|
138 |
+
else:
|
139 |
+
res_skip_channels = hidden_channels
|
140 |
+
|
141 |
+
res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
|
142 |
+
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
|
143 |
+
self.res_skip_layers.append(res_skip_layer)
|
144 |
+
|
145 |
+
def forward(self, x, x_mask, g=None, **kwargs):
|
146 |
+
output = torch.zeros_like(x)
|
147 |
+
n_channels_tensor = torch.IntTensor([self.hidden_channels])
|
148 |
+
|
149 |
+
if g is not None:
|
150 |
+
g = self.cond_layer(g)
|
151 |
+
|
152 |
+
for i in range(self.n_layers):
|
153 |
+
x_in = self.in_layers[i](x)
|
154 |
+
if g is not None:
|
155 |
+
cond_offset = i * 2 * self.hidden_channels
|
156 |
+
g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:]
|
157 |
+
else:
|
158 |
+
g_l = torch.zeros_like(x_in)
|
159 |
+
|
160 |
+
acts = commons.fused_add_tanh_sigmoid_multiply(
|
161 |
+
x_in,
|
162 |
+
g_l,
|
163 |
+
n_channels_tensor)
|
164 |
+
acts = self.drop(acts)
|
165 |
+
|
166 |
+
res_skip_acts = self.res_skip_layers[i](acts)
|
167 |
+
if i < self.n_layers - 1:
|
168 |
+
res_acts = res_skip_acts[:,:self.hidden_channels,:]
|
169 |
+
x = (x + res_acts) * x_mask
|
170 |
+
output = output + res_skip_acts[:,self.hidden_channels:,:]
|
171 |
+
else:
|
172 |
+
output = output + res_skip_acts
|
173 |
+
return output * x_mask
|
174 |
+
|
175 |
+
def remove_weight_norm(self):
|
176 |
+
if self.gin_channels != 0:
|
177 |
+
torch.nn.utils.remove_weight_norm(self.cond_layer)
|
178 |
+
for l in self.in_layers:
|
179 |
+
torch.nn.utils.remove_weight_norm(l)
|
180 |
+
for l in self.res_skip_layers:
|
181 |
+
torch.nn.utils.remove_weight_norm(l)
|
182 |
+
|
183 |
+
|
184 |
+
class ResBlock1(torch.nn.Module):
|
185 |
+
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
|
186 |
+
super(ResBlock1, self).__init__()
|
187 |
+
self.convs1 = nn.ModuleList([
|
188 |
+
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
|
189 |
+
padding=get_padding(kernel_size, dilation[0]))),
|
190 |
+
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
|
191 |
+
padding=get_padding(kernel_size, dilation[1]))),
|
192 |
+
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
|
193 |
+
padding=get_padding(kernel_size, dilation[2])))
|
194 |
+
])
|
195 |
+
self.convs1.apply(init_weights)
|
196 |
+
|
197 |
+
self.convs2 = nn.ModuleList([
|
198 |
+
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
|
199 |
+
padding=get_padding(kernel_size, 1))),
|
200 |
+
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
|
201 |
+
padding=get_padding(kernel_size, 1))),
|
202 |
+
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
|
203 |
+
padding=get_padding(kernel_size, 1)))
|
204 |
+
])
|
205 |
+
self.convs2.apply(init_weights)
|
206 |
+
|
207 |
+
def forward(self, x, x_mask=None):
|
208 |
+
for c1, c2 in zip(self.convs1, self.convs2):
|
209 |
+
xt = F.leaky_relu(x, LRELU_SLOPE)
|
210 |
+
if x_mask is not None:
|
211 |
+
xt = xt * x_mask
|
212 |
+
xt = c1(xt)
|
213 |
+
xt = F.leaky_relu(xt, LRELU_SLOPE)
|
214 |
+
if x_mask is not None:
|
215 |
+
xt = xt * x_mask
|
216 |
+
xt = c2(xt)
|
217 |
+
x = xt + x
|
218 |
+
if x_mask is not None:
|
219 |
+
x = x * x_mask
|
220 |
+
return x
|
221 |
+
|
222 |
+
def remove_weight_norm(self):
|
223 |
+
for l in self.convs1:
|
224 |
+
remove_weight_norm(l)
|
225 |
+
for l in self.convs2:
|
226 |
+
remove_weight_norm(l)
|
227 |
+
|
228 |
+
|
229 |
+
class ResBlock2(torch.nn.Module):
|
230 |
+
def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
|
231 |
+
super(ResBlock2, self).__init__()
|
232 |
+
self.convs = nn.ModuleList([
|
233 |
+
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
|
234 |
+
padding=get_padding(kernel_size, dilation[0]))),
|
235 |
+
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
|
236 |
+
padding=get_padding(kernel_size, dilation[1])))
|
237 |
+
])
|
238 |
+
self.convs.apply(init_weights)
|
239 |
+
|
240 |
+
def forward(self, x, x_mask=None):
|
241 |
+
for c in self.convs:
|
242 |
+
xt = F.leaky_relu(x, LRELU_SLOPE)
|
243 |
+
if x_mask is not None:
|
244 |
+
xt = xt * x_mask
|
245 |
+
xt = c(xt)
|
246 |
+
x = xt + x
|
247 |
+
if x_mask is not None:
|
248 |
+
x = x * x_mask
|
249 |
+
return x
|
250 |
+
|
251 |
+
def remove_weight_norm(self):
|
252 |
+
for l in self.convs:
|
253 |
+
remove_weight_norm(l)
|
254 |
+
|
255 |
+
|
256 |
+
class Log(nn.Module):
|
257 |
+
def forward(self, x, x_mask, reverse=False, **kwargs):
|
258 |
+
if not reverse:
|
259 |
+
y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
|
260 |
+
logdet = torch.sum(-y, [1, 2])
|
261 |
+
return y, logdet
|
262 |
+
else:
|
263 |
+
x = torch.exp(x) * x_mask
|
264 |
+
return x
|
265 |
+
|
266 |
+
|
267 |
+
class Flip(nn.Module):
|
268 |
+
def forward(self, x, *args, reverse=False, **kwargs):
|
269 |
+
x = torch.flip(x, [1])
|
270 |
+
if not reverse:
|
271 |
+
logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
|
272 |
+
return x, logdet
|
273 |
+
else:
|
274 |
+
return x
|
275 |
+
|
276 |
+
|
277 |
+
class ElementwiseAffine(nn.Module):
|
278 |
+
def __init__(self, channels):
|
279 |
+
super().__init__()
|
280 |
+
self.channels = channels
|
281 |
+
self.m = nn.Parameter(torch.zeros(channels,1))
|
282 |
+
self.logs = nn.Parameter(torch.zeros(channels,1))
|
283 |
+
|
284 |
+
def forward(self, x, x_mask, reverse=False, **kwargs):
|
285 |
+
if not reverse:
|
286 |
+
y = self.m + torch.exp(self.logs) * x
|
287 |
+
y = y * x_mask
|
288 |
+
logdet = torch.sum(self.logs * x_mask, [1,2])
|
289 |
+
return y, logdet
|
290 |
+
else:
|
291 |
+
x = (x - self.m) * torch.exp(-self.logs) * x_mask
|
292 |
+
return x
|
293 |
+
|
294 |
+
|
295 |
+
class ResidualCouplingLayer(nn.Module):
|
296 |
+
def __init__(self,
|
297 |
+
channels,
|
298 |
+
hidden_channels,
|
299 |
+
kernel_size,
|
300 |
+
dilation_rate,
|
301 |
+
n_layers,
|
302 |
+
p_dropout=0,
|
303 |
+
gin_channels=0,
|
304 |
+
mean_only=False):
|
305 |
+
assert channels % 2 == 0, "channels should be divisible by 2"
|
306 |
+
super().__init__()
|
307 |
+
self.channels = channels
|
308 |
+
self.hidden_channels = hidden_channels
|
309 |
+
self.kernel_size = kernel_size
|
310 |
+
self.dilation_rate = dilation_rate
|
311 |
+
self.n_layers = n_layers
|
312 |
+
self.half_channels = channels // 2
|
313 |
+
self.mean_only = mean_only
|
314 |
+
|
315 |
+
self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
|
316 |
+
self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels)
|
317 |
+
self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
|
318 |
+
self.post.weight.data.zero_()
|
319 |
+
self.post.bias.data.zero_()
|
320 |
+
|
321 |
+
def forward(self, x, x_mask, g=None, reverse=False):
|
322 |
+
x0, x1 = torch.split(x, [self.half_channels]*2, 1)
|
323 |
+
h = self.pre(x0) * x_mask
|
324 |
+
h = self.enc(h, x_mask, g=g)
|
325 |
+
stats = self.post(h) * x_mask
|
326 |
+
if not self.mean_only:
|
327 |
+
m, logs = torch.split(stats, [self.half_channels]*2, 1)
|
328 |
+
else:
|
329 |
+
m = stats
|
330 |
+
logs = torch.zeros_like(m)
|
331 |
+
|
332 |
+
if not reverse:
|
333 |
+
x1 = m + x1 * torch.exp(logs) * x_mask
|
334 |
+
x = torch.cat([x0, x1], 1)
|
335 |
+
logdet = torch.sum(logs, [1,2])
|
336 |
+
return x, logdet
|
337 |
+
else:
|
338 |
+
x1 = (x1 - m) * torch.exp(-logs) * x_mask
|
339 |
+
x = torch.cat([x0, x1], 1)
|
340 |
+
return x
|
341 |
+
|
342 |
+
|
343 |
+
class ConvFlow(nn.Module):
|
344 |
+
def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0):
|
345 |
+
super().__init__()
|
346 |
+
self.in_channels = in_channels
|
347 |
+
self.filter_channels = filter_channels
|
348 |
+
self.kernel_size = kernel_size
|
349 |
+
self.n_layers = n_layers
|
350 |
+
self.num_bins = num_bins
|
351 |
+
self.tail_bound = tail_bound
|
352 |
+
self.half_channels = in_channels // 2
|
353 |
+
|
354 |
+
self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
|
355 |
+
self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.)
|
356 |
+
self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1)
|
357 |
+
self.proj.weight.data.zero_()
|
358 |
+
self.proj.bias.data.zero_()
|
359 |
+
|
360 |
+
def forward(self, x, x_mask, g=None, reverse=False):
|
361 |
+
x0, x1 = torch.split(x, [self.half_channels]*2, 1)
|
362 |
+
h = self.pre(x0)
|
363 |
+
h = self.convs(h, x_mask, g=g)
|
364 |
+
h = self.proj(h) * x_mask
|
365 |
+
|
366 |
+
b, c, t = x0.shape
|
367 |
+
h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
|
368 |
+
|
369 |
+
unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels)
|
370 |
+
unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels)
|
371 |
+
unnormalized_derivatives = h[..., 2 * self.num_bins:]
|
372 |
+
|
373 |
+
x1, logabsdet = piecewise_rational_quadratic_transform(x1,
|
374 |
+
unnormalized_widths,
|
375 |
+
unnormalized_heights,
|
376 |
+
unnormalized_derivatives,
|
377 |
+
inverse=reverse,
|
378 |
+
tails='linear',
|
379 |
+
tail_bound=self.tail_bound
|
380 |
+
)
|
381 |
+
|
382 |
+
x = torch.cat([x0, x1], 1) * x_mask
|
383 |
+
logdet = torch.sum(logabsdet * x_mask, [1,2])
|
384 |
+
if not reverse:
|
385 |
+
return x, logdet
|
386 |
+
else:
|
387 |
+
return x
|
requirements.txt
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Flask
|
2 |
+
Cython==0.29.21
|
3 |
+
librosa==0.8.0
|
4 |
+
matplotlib==3.3.1
|
5 |
+
numpy==1.21.6
|
6 |
+
phonemizer==2.2.1
|
7 |
+
scipy==1.5.2
|
8 |
+
tensorboard==2.3.0
|
9 |
+
torch
|
10 |
+
torchvision
|
11 |
+
Unidecode==1.1.1
|
12 |
+
jamo==0.4.1
|
13 |
+
pypinyin==0.44.0
|
14 |
+
jieba==0.42.1
|
15 |
+
cn2an==0.5.17
|
16 |
+
jieba==0.42.1
|
17 |
+
ipython==7.34.0
|
18 |
+
gradio==3.4.1
|
19 |
+
openai
|
20 |
+
pydub
|
21 |
+
inflect
|
22 |
+
eng_to_ipa
|
text/__init__.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
""" from https://github.com/keithito/tacotron """
|
2 |
+
from text import cleaners
|
3 |
+
|
4 |
+
|
5 |
+
def text_to_sequence(text, symbols, cleaner_names):
|
6 |
+
'''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
|
7 |
+
Args:
|
8 |
+
text: string to convert to a sequence
|
9 |
+
cleaner_names: names of the cleaner functions to run the text through
|
10 |
+
Returns:
|
11 |
+
List of integers corresponding to the symbols in the text
|
12 |
+
'''
|
13 |
+
_symbol_to_id = {s: i for i, s in enumerate(symbols)}
|
14 |
+
|
15 |
+
sequence = []
|
16 |
+
|
17 |
+
clean_text = _clean_text(text, cleaner_names)
|
18 |
+
for symbol in clean_text:
|
19 |
+
if symbol not in _symbol_to_id.keys():
|
20 |
+
continue
|
21 |
+
symbol_id = _symbol_to_id[symbol]
|
22 |
+
sequence += [symbol_id]
|
23 |
+
return sequence
|
24 |
+
|
25 |
+
|
26 |
+
def _clean_text(text, cleaner_names):
|
27 |
+
for name in cleaner_names:
|
28 |
+
cleaner = getattr(cleaners, name)
|
29 |
+
if not cleaner:
|
30 |
+
raise Exception('Unknown cleaner: %s' % name)
|
31 |
+
text = cleaner(text)
|
32 |
+
return text
|
text/cleaners.py
ADDED
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
import re
|
3 |
+
from unidecode import unidecode
|
4 |
+
from unidecode import unidecode
|
5 |
+
import ctypes
|
6 |
+
from text.mandarin import number_to_chinese, chinese_to_bopomofo, latin_to_bopomofo, chinese_to_romaji, chinese_to_lazy_ipa, chinese_to_ipa, chinese_to_ipa2
|
7 |
+
dll = ctypes.cdll.LoadLibrary('cleaners/JapaneseCleaner.dll')
|
8 |
+
dll.CreateOjt.restype = ctypes.c_uint64
|
9 |
+
dll.PluginMain.restype = ctypes.c_uint64
|
10 |
+
floder = ctypes.create_unicode_buffer("cleaners")
|
11 |
+
dll.CreateOjt(floder)
|
12 |
+
|
13 |
+
def clean_japanese(text):
|
14 |
+
input_wchar_pointer = ctypes.create_unicode_buffer(text)
|
15 |
+
result = ctypes.wstring_at(dll.PluginMain(input_wchar_pointer))
|
16 |
+
return result
|
17 |
+
|
18 |
+
def none_cleaner(text):
|
19 |
+
return text
|
20 |
+
|
21 |
+
def japanese_cleaners(text):
|
22 |
+
text = clean_japanese(text)
|
23 |
+
text = re.sub(r'([A-Za-z])$', r'\1.', text)
|
24 |
+
return text
|
25 |
+
|
26 |
+
def japanese_cleaners2(text):
|
27 |
+
return japanese_cleaners(text).replace('ts', 'ʦ').replace('...', '…')
|
28 |
+
|
29 |
+
def chinese_cleaners(text):
|
30 |
+
'''Pipeline for Chinese text'''
|
31 |
+
text = number_to_chinese(text)
|
32 |
+
text = chinese_to_bopomofo(text)
|
33 |
+
text = latin_to_bopomofo(text)
|
34 |
+
if re.match('[ˉˊˇˋ˙]', text[-1]):
|
35 |
+
text += '。'
|
36 |
+
return text
|
37 |
+
|
38 |
+
def zh_ja_mixture_cleaners(text):
|
39 |
+
chinese_texts = re.findall(r'\[ZH\].*?\[ZH\]', text)
|
40 |
+
japanese_texts = re.findall(r'\[JA\].*?\[JA\]', text)
|
41 |
+
for chinese_text in chinese_texts:
|
42 |
+
cleaned_text = chinese_to_romaji(chinese_text[4:-4])
|
43 |
+
text = text.replace(chinese_text, cleaned_text+' ', 1)
|
44 |
+
for japanese_text in japanese_texts:
|
45 |
+
cleaned_text = japanese_cleaners(
|
46 |
+
japanese_text[4:-4]).replace('ts', 'ʦ').replace('u', 'ɯ').replace('...', '…')
|
47 |
+
text = text.replace(japanese_text, cleaned_text+' ', 1)
|
48 |
+
text = text[:-1]
|
49 |
+
if re.match('[A-Za-zɯɹəɥ→↓↑]', text[-1]):
|
50 |
+
text += '.'
|
51 |
+
return text
|
52 |
+
|
53 |
+
def cjke_cleaners(text):
|
54 |
+
chinese_texts = re.findall(r'\[ZH\].*?\[ZH\]', text)
|
55 |
+
japanese_texts = re.findall(r'\[JA\].*?\[JA\]', text)
|
56 |
+
for chinese_text in chinese_texts:
|
57 |
+
cleaned_text = chinese_to_lazy_ipa(chinese_text[4:-4])
|
58 |
+
cleaned_text = cleaned_text.replace(
|
59 |
+
'ʧ', 'tʃ').replace('ʦ', 'ts').replace('ɥan', 'ɥæn')
|
60 |
+
text = text.replace(chinese_text, cleaned_text+' ', 1)
|
61 |
+
for japanese_text in japanese_texts:
|
62 |
+
cleaned_text = japanese_cleaners(japanese_text[4:-4])
|
63 |
+
text = text.replace(japanese_text, cleaned_text+' ', 1)
|
64 |
+
text = text[:-1]
|
65 |
+
if re.match(r'[^\.,!\?\-…~]', text[-1]):
|
66 |
+
text += '.'
|
67 |
+
return text
|
68 |
+
|
69 |
+
def cjks_cleaners(text):
|
70 |
+
chinese_texts = re.findall(r'\[ZH\].*?\[ZH\]', text)
|
71 |
+
japanese_texts = re.findall(r'\[JA\].*?\[JA\]', text)
|
72 |
+
for chinese_text in chinese_texts:
|
73 |
+
cleaned_text = chinese_to_lazy_ipa(chinese_text[4:-4])
|
74 |
+
text = text.replace(chinese_text, cleaned_text+' ', 1)
|
75 |
+
for japanese_text in japanese_texts:
|
76 |
+
cleaned_text = japanese_cleaners(japanese_text[4:-4])
|
77 |
+
text = text.replace(japanese_text, cleaned_text+' ', 1)
|
78 |
+
text = text[:-1]
|
79 |
+
if re.match(r'[^\.,!\?\-…~]', text[-1]):
|
80 |
+
text += '.'
|
81 |
+
return text
|
82 |
+
|
83 |
+
def cjke_cleaners2(text):
|
84 |
+
chinese_texts = re.findall(r'\[ZH\].*?\[ZH\]', text)
|
85 |
+
japanese_texts = re.findall(r'\[JA\].*?\[JA\]', text)
|
86 |
+
for chinese_text in chinese_texts:
|
87 |
+
cleaned_text = chinese_to_lazy_ipa(chinese_text[4:-4])
|
88 |
+
text = text.replace(chinese_text, cleaned_text+' ', 1)
|
89 |
+
for japanese_text in japanese_texts:
|
90 |
+
cleaned_text = japanese_cleaners(japanese_text[4:-4])
|
91 |
+
text = text.replace(japanese_text, cleaned_text+' ', 1)
|
92 |
+
text = text[:-1]
|
93 |
+
if re.match(r'[^\.,!\?\-…~]', text[-1]):
|
94 |
+
text += '.'
|
95 |
+
return text
|
text/mandarin.py
ADDED
@@ -0,0 +1,328 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
import re
|
4 |
+
from pypinyin import lazy_pinyin, BOPOMOFO
|
5 |
+
import jieba
|
6 |
+
import cn2an
|
7 |
+
|
8 |
+
|
9 |
+
# List of (Latin alphabet, bopomofo) pairs:
|
10 |
+
_latin_to_bopomofo = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
|
11 |
+
('a', 'ㄟˉ'),
|
12 |
+
('b', 'ㄅㄧˋ'),
|
13 |
+
('c', 'ㄙㄧˉ'),
|
14 |
+
('d', 'ㄉㄧˋ'),
|
15 |
+
('e', 'ㄧˋ'),
|
16 |
+
('f', 'ㄝˊㄈㄨˋ'),
|
17 |
+
('g', 'ㄐㄧˋ'),
|
18 |
+
('h', 'ㄝˇㄑㄩˋ'),
|
19 |
+
('i', 'ㄞˋ'),
|
20 |
+
('j', 'ㄐㄟˋ'),
|
21 |
+
('k', 'ㄎㄟˋ'),
|
22 |
+
('l', 'ㄝˊㄛˋ'),
|
23 |
+
('m', 'ㄝˊㄇㄨˋ'),
|
24 |
+
('n', 'ㄣˉ'),
|
25 |
+
('o', 'ㄡˉ'),
|
26 |
+
('p', 'ㄆㄧˉ'),
|
27 |
+
('q', 'ㄎㄧㄡˉ'),
|
28 |
+
('r', 'ㄚˋ'),
|
29 |
+
('s', 'ㄝˊㄙˋ'),
|
30 |
+
('t', 'ㄊㄧˋ'),
|
31 |
+
('u', 'ㄧㄡˉ'),
|
32 |
+
('v', 'ㄨㄧˉ'),
|
33 |
+
('w', 'ㄉㄚˋㄅㄨˋㄌㄧㄡˋ'),
|
34 |
+
('x', 'ㄝˉㄎㄨˋㄙˋ'),
|
35 |
+
('y', 'ㄨㄞˋ'),
|
36 |
+
('z', 'ㄗㄟˋ')
|
37 |
+
]]
|
38 |
+
|
39 |
+
# List of (bopomofo, romaji) pairs:
|
40 |
+
_bopomofo_to_romaji = [(re.compile('%s' % x[0]), x[1]) for x in [
|
41 |
+
('ㄅㄛ', 'p⁼wo'),
|
42 |
+
('ㄆㄛ', 'pʰwo'),
|
43 |
+
('ㄇㄛ', 'mwo'),
|
44 |
+
('ㄈㄛ', 'fwo'),
|
45 |
+
('ㄅ', 'p⁼'),
|
46 |
+
('ㄆ', 'pʰ'),
|
47 |
+
('ㄇ', 'm'),
|
48 |
+
('ㄈ', 'f'),
|
49 |
+
('ㄉ', 't⁼'),
|
50 |
+
('ㄊ', 'tʰ'),
|
51 |
+
('ㄋ', 'n'),
|
52 |
+
('ㄌ', 'l'),
|
53 |
+
('ㄍ', 'k⁼'),
|
54 |
+
('ㄎ', 'kʰ'),
|
55 |
+
('ㄏ', 'h'),
|
56 |
+
('ㄐ', 'ʧ⁼'),
|
57 |
+
('ㄑ', 'ʧʰ'),
|
58 |
+
('ㄒ', 'ʃ'),
|
59 |
+
('ㄓ', 'ʦ`⁼'),
|
60 |
+
('ㄔ', 'ʦ`ʰ'),
|
61 |
+
('ㄕ', 's`'),
|
62 |
+
('ㄖ', 'ɹ`'),
|
63 |
+
('ㄗ', 'ʦ⁼'),
|
64 |
+
('ㄘ', 'ʦʰ'),
|
65 |
+
('ㄙ', 's'),
|
66 |
+
('ㄚ', 'a'),
|
67 |
+
('ㄛ', 'o'),
|
68 |
+
('ㄜ', 'ə'),
|
69 |
+
('ㄝ', 'e'),
|
70 |
+
('ㄞ', 'ai'),
|
71 |
+
('ㄟ', 'ei'),
|
72 |
+
('ㄠ', 'au'),
|
73 |
+
('ㄡ', 'ou'),
|
74 |
+
('ㄧㄢ', 'yeNN'),
|
75 |
+
('ㄢ', 'aNN'),
|
76 |
+
('ㄧㄣ', 'iNN'),
|
77 |
+
('ㄣ', 'əNN'),
|
78 |
+
('ㄤ', 'aNg'),
|
79 |
+
('ㄧㄥ', 'iNg'),
|
80 |
+
('ㄨㄥ', 'uNg'),
|
81 |
+
('ㄩㄥ', 'yuNg'),
|
82 |
+
('ㄥ', 'əNg'),
|
83 |
+
('ㄦ', 'əɻ'),
|
84 |
+
('ㄧ', 'i'),
|
85 |
+
('ㄨ', 'u'),
|
86 |
+
('ㄩ', 'ɥ'),
|
87 |
+
('ˉ', '→'),
|
88 |
+
('ˊ', '↑'),
|
89 |
+
('ˇ', '↓↑'),
|
90 |
+
('ˋ', '↓'),
|
91 |
+
('˙', ''),
|
92 |
+
(',', ','),
|
93 |
+
('。', '.'),
|
94 |
+
('!', '!'),
|
95 |
+
('?', '?'),
|
96 |
+
('—', '-')
|
97 |
+
]]
|
98 |
+
|
99 |
+
# List of (romaji, ipa) pairs:
|
100 |
+
_romaji_to_ipa = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
|
101 |
+
('ʃy', 'ʃ'),
|
102 |
+
('ʧʰy', 'ʧʰ'),
|
103 |
+
('ʧ⁼y', 'ʧ⁼'),
|
104 |
+
('NN', 'n'),
|
105 |
+
('Ng', 'ŋ'),
|
106 |
+
('y', 'j'),
|
107 |
+
('h', 'x')
|
108 |
+
]]
|
109 |
+
|
110 |
+
# List of (bopomofo, ipa) pairs:
|
111 |
+
_bopomofo_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [
|
112 |
+
('ㄅㄛ', 'p⁼wo'),
|
113 |
+
('ㄆㄛ', 'pʰwo'),
|
114 |
+
('ㄇㄛ', 'mwo'),
|
115 |
+
('ㄈㄛ', 'fwo'),
|
116 |
+
('ㄅ', 'p⁼'),
|
117 |
+
('ㄆ', 'pʰ'),
|
118 |
+
('ㄇ', 'm'),
|
119 |
+
('ㄈ', 'f'),
|
120 |
+
('ㄉ', 't⁼'),
|
121 |
+
('ㄊ', 'tʰ'),
|
122 |
+
('ㄋ', 'n'),
|
123 |
+
('ㄌ', 'l'),
|
124 |
+
('ㄍ', 'k⁼'),
|
125 |
+
('ㄎ', 'kʰ'),
|
126 |
+
('ㄏ', 'x'),
|
127 |
+
('ㄐ', 'tʃ⁼'),
|
128 |
+
('ㄑ', 'tʃʰ'),
|
129 |
+
('ㄒ', 'ʃ'),
|
130 |
+
('ㄓ', 'ts`⁼'),
|
131 |
+
('ㄔ', 'ts`ʰ'),
|
132 |
+
('ㄕ', 's`'),
|
133 |
+
('ㄖ', 'ɹ`'),
|
134 |
+
('ㄗ', 'ts⁼'),
|
135 |
+
('ㄘ', 'tsʰ'),
|
136 |
+
('ㄙ', 's'),
|
137 |
+
('ㄚ', 'a'),
|
138 |
+
('ㄛ', 'o'),
|
139 |
+
('ㄜ', 'ə'),
|
140 |
+
('ㄝ', 'ɛ'),
|
141 |
+
('ㄞ', 'aɪ'),
|
142 |
+
('ㄟ', 'eɪ'),
|
143 |
+
('ㄠ', 'ɑʊ'),
|
144 |
+
('ㄡ', 'oʊ'),
|
145 |
+
('ㄧㄢ', 'jɛn'),
|
146 |
+
('ㄩㄢ', 'ɥæn'),
|
147 |
+
('ㄢ', 'an'),
|
148 |
+
('ㄧㄣ', 'in'),
|
149 |
+
('ㄩㄣ', 'ɥn'),
|
150 |
+
('ㄣ', 'ən'),
|
151 |
+
('ㄤ', 'ɑŋ'),
|
152 |
+
('ㄧㄥ', 'iŋ'),
|
153 |
+
('ㄨㄥ', 'ʊŋ'),
|
154 |
+
('ㄩㄥ', 'jʊŋ'),
|
155 |
+
('ㄥ', 'əŋ'),
|
156 |
+
('ㄦ', 'əɻ'),
|
157 |
+
('ㄧ', 'i'),
|
158 |
+
('ㄨ', 'u'),
|
159 |
+
('ㄩ', 'ɥ'),
|
160 |
+
('ˉ', '→'),
|
161 |
+
('ˊ', '↑'),
|
162 |
+
('ˇ', '↓↑'),
|
163 |
+
('ˋ', '↓'),
|
164 |
+
('˙', ''),
|
165 |
+
(',', ','),
|
166 |
+
('。', '.'),
|
167 |
+
('!', '!'),
|
168 |
+
('?', '?'),
|
169 |
+
('—', '-')
|
170 |
+
]]
|
171 |
+
|
172 |
+
# List of (bopomofo, ipa2) pairs:
|
173 |
+
_bopomofo_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [
|
174 |
+
('ㄅㄛ', 'pwo'),
|
175 |
+
('ㄆㄛ', 'pʰwo'),
|
176 |
+
('ㄇㄛ', 'mwo'),
|
177 |
+
('ㄈㄛ', 'fwo'),
|
178 |
+
('ㄅ', 'p'),
|
179 |
+
('ㄆ', 'pʰ'),
|
180 |
+
('ㄇ', 'm'),
|
181 |
+
('ㄈ', 'f'),
|
182 |
+
('ㄉ', 't'),
|
183 |
+
('ㄊ', 'tʰ'),
|
184 |
+
('ㄋ', 'n'),
|
185 |
+
('ㄌ', 'l'),
|
186 |
+
('ㄍ', 'k'),
|
187 |
+
('ㄎ', 'kʰ'),
|
188 |
+
('ㄏ', 'h'),
|
189 |
+
('ㄐ', 'tɕ'),
|
190 |
+
('ㄑ', 'tɕʰ'),
|
191 |
+
('ㄒ', 'ɕ'),
|
192 |
+
('ㄓ', 'tʂ'),
|
193 |
+
('ㄔ', 'tʂʰ'),
|
194 |
+
('ㄕ', 'ʂ'),
|
195 |
+
('ㄖ', 'ɻ'),
|
196 |
+
('ㄗ', 'ts'),
|
197 |
+
('ㄘ', 'tsʰ'),
|
198 |
+
('ㄙ', 's'),
|
199 |
+
('ㄚ', 'a'),
|
200 |
+
('ㄛ', 'o'),
|
201 |
+
('ㄜ', 'ɤ'),
|
202 |
+
('ㄝ', 'ɛ'),
|
203 |
+
('ㄞ', 'aɪ'),
|
204 |
+
('ㄟ', 'eɪ'),
|
205 |
+
('ㄠ', 'ɑʊ'),
|
206 |
+
('ㄡ', 'oʊ'),
|
207 |
+
('ㄧㄢ', 'jɛn'),
|
208 |
+
('ㄩㄢ', 'yæn'),
|
209 |
+
('ㄢ', 'an'),
|
210 |
+
('ㄧㄣ', 'in'),
|
211 |
+
('ㄩㄣ', 'yn'),
|
212 |
+
('ㄣ', 'ən'),
|
213 |
+
('ㄤ', 'ɑŋ'),
|
214 |
+
('ㄧㄥ', 'iŋ'),
|
215 |
+
('ㄨㄥ', 'ʊŋ'),
|
216 |
+
('ㄩㄥ', 'jʊŋ'),
|
217 |
+
('ㄥ', 'ɤŋ'),
|
218 |
+
('ㄦ', 'əɻ'),
|
219 |
+
('ㄧ', 'i'),
|
220 |
+
('ㄨ', 'u'),
|
221 |
+
('ㄩ', 'y'),
|
222 |
+
('ˉ', '˥'),
|
223 |
+
('ˊ', '˧˥'),
|
224 |
+
('ˇ', '˨˩˦'),
|
225 |
+
('ˋ', '˥˩'),
|
226 |
+
('˙', ''),
|
227 |
+
(',', ','),
|
228 |
+
('。', '.'),
|
229 |
+
('!', '!'),
|
230 |
+
('?', '?'),
|
231 |
+
('—', '-')
|
232 |
+
]]
|
233 |
+
|
234 |
+
|
235 |
+
def number_to_chinese(text):
|
236 |
+
numbers = re.findall(r'\d+(?:\.?\d+)?', text)
|
237 |
+
for number in numbers:
|
238 |
+
text = text.replace(number, cn2an.an2cn(number), 1)
|
239 |
+
return text
|
240 |
+
|
241 |
+
|
242 |
+
def chinese_to_bopomofo(text, taiwanese=False):
|
243 |
+
text = text.replace('、', ',').replace(';', ',').replace(':', ',')
|
244 |
+
words = jieba.lcut(text, cut_all=False)
|
245 |
+
text = ''
|
246 |
+
for word in words:
|
247 |
+
bopomofos = lazy_pinyin(word, BOPOMOFO)
|
248 |
+
if not re.search('[\u4e00-\u9fff]', word):
|
249 |
+
text += word
|
250 |
+
continue
|
251 |
+
for i in range(len(bopomofos)):
|
252 |
+
bopomofos[i] = re.sub(r'([\u3105-\u3129])$', r'\1ˉ', bopomofos[i])
|
253 |
+
if text != '':
|
254 |
+
text += ' '
|
255 |
+
if taiwanese:
|
256 |
+
text += '#'+'#'.join(bopomofos)
|
257 |
+
else:
|
258 |
+
text += ''.join(bopomofos)
|
259 |
+
return text
|
260 |
+
|
261 |
+
|
262 |
+
def latin_to_bopomofo(text):
|
263 |
+
for regex, replacement in _latin_to_bopomofo:
|
264 |
+
text = re.sub(regex, replacement, text)
|
265 |
+
return text
|
266 |
+
|
267 |
+
|
268 |
+
def bopomofo_to_romaji(text):
|
269 |
+
for regex, replacement in _bopomofo_to_romaji:
|
270 |
+
text = re.sub(regex, replacement, text)
|
271 |
+
return text
|
272 |
+
|
273 |
+
|
274 |
+
def bopomofo_to_ipa(text):
|
275 |
+
for regex, replacement in _bopomofo_to_ipa:
|
276 |
+
text = re.sub(regex, replacement, text)
|
277 |
+
return text
|
278 |
+
|
279 |
+
|
280 |
+
def bopomofo_to_ipa2(text):
|
281 |
+
for regex, replacement in _bopomofo_to_ipa2:
|
282 |
+
text = re.sub(regex, replacement, text)
|
283 |
+
return text
|
284 |
+
|
285 |
+
|
286 |
+
def chinese_to_romaji(text):
|
287 |
+
text = number_to_chinese(text)
|
288 |
+
text = chinese_to_bopomofo(text)
|
289 |
+
text = latin_to_bopomofo(text)
|
290 |
+
text = bopomofo_to_romaji(text)
|
291 |
+
text = re.sub('i([aoe])', r'y\1', text)
|
292 |
+
text = re.sub('u([aoəe])', r'w\1', text)
|
293 |
+
text = re.sub('([ʦsɹ]`[⁼ʰ]?)([→↓↑ ]+|$)',
|
294 |
+
r'\1ɹ`\2', text).replace('ɻ', 'ɹ`')
|
295 |
+
text = re.sub('([ʦs][⁼ʰ]?)([→↓↑ ]+|$)', r'\1ɹ\2', text)
|
296 |
+
return text
|
297 |
+
|
298 |
+
|
299 |
+
def chinese_to_lazy_ipa(text):
|
300 |
+
text = chinese_to_romaji(text)
|
301 |
+
for regex, replacement in _romaji_to_ipa:
|
302 |
+
text = re.sub(regex, replacement, text)
|
303 |
+
return text
|
304 |
+
|
305 |
+
|
306 |
+
def chinese_to_ipa(text):
|
307 |
+
text = number_to_chinese(text)
|
308 |
+
text = chinese_to_bopomofo(text)
|
309 |
+
text = latin_to_bopomofo(text)
|
310 |
+
text = bopomofo_to_ipa(text)
|
311 |
+
text = re.sub('i([aoe])', r'j\1', text)
|
312 |
+
text = re.sub('u([aoəe])', r'w\1', text)
|
313 |
+
text = re.sub('([sɹ]`[⁼ʰ]?)([→↓↑ ]+|$)',
|
314 |
+
r'\1ɹ`\2', text).replace('ɻ', 'ɹ`')
|
315 |
+
text = re.sub('([s][⁼ʰ]?)([→↓↑ ]+|$)', r'\1ɹ\2', text)
|
316 |
+
return text
|
317 |
+
|
318 |
+
|
319 |
+
def chinese_to_ipa2(text, taiwanese=False):
|
320 |
+
text = number_to_chinese(text)
|
321 |
+
text = chinese_to_bopomofo(text, taiwanese)
|
322 |
+
text = latin_to_bopomofo(text)
|
323 |
+
text = bopomofo_to_ipa2(text)
|
324 |
+
text = re.sub(r'i([aoe])', r'j\1', text)
|
325 |
+
text = re.sub(r'u([aoəe])', r'w\1', text)
|
326 |
+
text = re.sub(r'([ʂɹ]ʰ?)([˩˨˧˦˥ ]+|$)', r'\1ʅ\2', text)
|
327 |
+
text = re.sub(r'(sʰ?)([˩˨˧˦˥ ]+|$)', r'\1ɿ\2', text)
|
328 |
+
return text
|
text/symbols.py
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''
|
2 |
+
Defines the set of symbols used in text input to the model.
|
3 |
+
'''
|
4 |
+
_pad = '_'
|
5 |
+
_punctuation = ',.!?-~…'
|
6 |
+
_letters = 'AEINOQUabdefghijklmnoprstuvwyzʃʧʦɯɹəɥ⁼ʰ`→↓↑ '
|
7 |
+
'''
|
8 |
+
# japanese_cleaners2
|
9 |
+
_pad = '_'
|
10 |
+
_punctuation = ',.!?-~…'
|
11 |
+
_letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧʦ↓↑ '
|
12 |
+
'''
|
13 |
+
|
14 |
+
'''# korean_cleaners
|
15 |
+
_pad = '_'
|
16 |
+
_punctuation = ',.!?…~'
|
17 |
+
_letters = 'ㄱㄴㄷㄹㅁㅂㅅㅇㅈㅊㅋㅌㅍㅎㄲㄸㅃㅆㅉㅏㅓㅗㅜㅡㅣㅐㅔ '
|
18 |
+
'''
|
19 |
+
|
20 |
+
'''# chinese_cleaners
|
21 |
+
_pad = '_'
|
22 |
+
_punctuation = ',。!?—…'
|
23 |
+
_letters = 'ㄅㄆㄇㄈㄉㄊㄋㄌㄍㄎㄏㄐㄑㄒㄓㄔㄕㄖㄗㄘㄙㄚㄛㄜㄝㄞㄟㄠㄡㄢㄣㄤㄥㄦㄧㄨㄩˉˊˇˋ˙ '
|
24 |
+
'''
|
25 |
+
|
26 |
+
|
27 |
+
'''# sanskrit_cleaners
|
28 |
+
_pad = '_'
|
29 |
+
_punctuation = '।'
|
30 |
+
_letters = 'ँंःअआइईउऊऋएऐओऔकखगघङचछजझञटठडढणतथदधनपफबभमयरलळवशषसहऽािीुूृॄेैोौ्ॠॢ '
|
31 |
+
'''
|
32 |
+
|
33 |
+
'''# cjks_cleaners
|
34 |
+
_pad = '_'
|
35 |
+
_punctuation = ',.!?-~…'
|
36 |
+
_letters = 'NQabdefghijklmnopstuvwxyzʃʧʥʦɯɹəɥçɸɾβŋɦː⁼ʰ`^#*=→↓↑ '
|
37 |
+
'''
|
38 |
+
|
39 |
+
'''# thai_cleaners
|
40 |
+
_pad = '_'
|
41 |
+
_punctuation = '.!? '
|
42 |
+
_letters = 'กขฃคฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรฤลวศษสหฬอฮฯะัาำิีึืุูเแโใไๅๆ็่้๊๋์'
|
43 |
+
'''
|
44 |
+
|
45 |
+
'''# cjke_cleaners2
|
46 |
+
_pad = '_'
|
47 |
+
_punctuation = ',.!?-~…'
|
48 |
+
_letters = 'NQabdefghijklmnopstuvwxyzɑæʃʑçɯɪɔɛɹðəɫɥɸʊɾʒθβŋɦ⁼ʰ`^#*=ˈˌ→↓↑ '
|
49 |
+
'''
|
50 |
+
|
51 |
+
'''# shanghainese_cleaners
|
52 |
+
_pad = '_'
|
53 |
+
_punctuation = ',.!?…'
|
54 |
+
_letters = 'abdfghiklmnopstuvyzøŋȵɑɔɕəɤɦɪɿʑʔʰ̩̃ᴀᴇ15678 '
|
55 |
+
'''
|
56 |
+
|
57 |
+
'''# chinese_dialect_cleaners
|
58 |
+
_pad = '_'
|
59 |
+
_punctuation = ',.!?~…─'
|
60 |
+
_letters = '#Nabdefghijklmnoprstuvwxyzæçøŋœȵɐɑɒɓɔɕɗɘəɚɛɜɣɤɦɪɭɯɵɷɸɻɾɿʂʅʊʋʌʏʑʔʦʮʰʷˀː˥˦˧˨˩̥̩̃̚αᴀᴇ↑↓∅ⱼ '
|
61 |
+
'''
|
62 |
+
|
63 |
+
# Export all symbols:
|
64 |
+
symbols = [_pad] + list(_punctuation) + list(_letters)
|
65 |
+
|
66 |
+
# Special symbol ids
|
67 |
+
SPACE_ID = symbols.index(" ")
|
transforms.py
ADDED
@@ -0,0 +1,193 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from torch.nn import functional as F
|
3 |
+
|
4 |
+
import numpy as np
|
5 |
+
|
6 |
+
|
7 |
+
DEFAULT_MIN_BIN_WIDTH = 1e-3
|
8 |
+
DEFAULT_MIN_BIN_HEIGHT = 1e-3
|
9 |
+
DEFAULT_MIN_DERIVATIVE = 1e-3
|
10 |
+
|
11 |
+
|
12 |
+
def piecewise_rational_quadratic_transform(inputs,
|
13 |
+
unnormalized_widths,
|
14 |
+
unnormalized_heights,
|
15 |
+
unnormalized_derivatives,
|
16 |
+
inverse=False,
|
17 |
+
tails=None,
|
18 |
+
tail_bound=1.,
|
19 |
+
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
20 |
+
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
21 |
+
min_derivative=DEFAULT_MIN_DERIVATIVE):
|
22 |
+
|
23 |
+
if tails is None:
|
24 |
+
spline_fn = rational_quadratic_spline
|
25 |
+
spline_kwargs = {}
|
26 |
+
else:
|
27 |
+
spline_fn = unconstrained_rational_quadratic_spline
|
28 |
+
spline_kwargs = {
|
29 |
+
'tails': tails,
|
30 |
+
'tail_bound': tail_bound
|
31 |
+
}
|
32 |
+
|
33 |
+
outputs, logabsdet = spline_fn(
|
34 |
+
inputs=inputs,
|
35 |
+
unnormalized_widths=unnormalized_widths,
|
36 |
+
unnormalized_heights=unnormalized_heights,
|
37 |
+
unnormalized_derivatives=unnormalized_derivatives,
|
38 |
+
inverse=inverse,
|
39 |
+
min_bin_width=min_bin_width,
|
40 |
+
min_bin_height=min_bin_height,
|
41 |
+
min_derivative=min_derivative,
|
42 |
+
**spline_kwargs
|
43 |
+
)
|
44 |
+
return outputs, logabsdet
|
45 |
+
|
46 |
+
|
47 |
+
def searchsorted(bin_locations, inputs, eps=1e-6):
|
48 |
+
bin_locations[..., -1] += eps
|
49 |
+
return torch.sum(
|
50 |
+
inputs[..., None] >= bin_locations,
|
51 |
+
dim=-1
|
52 |
+
) - 1
|
53 |
+
|
54 |
+
|
55 |
+
def unconstrained_rational_quadratic_spline(inputs,
|
56 |
+
unnormalized_widths,
|
57 |
+
unnormalized_heights,
|
58 |
+
unnormalized_derivatives,
|
59 |
+
inverse=False,
|
60 |
+
tails='linear',
|
61 |
+
tail_bound=1.,
|
62 |
+
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
63 |
+
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
64 |
+
min_derivative=DEFAULT_MIN_DERIVATIVE):
|
65 |
+
inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
|
66 |
+
outside_interval_mask = ~inside_interval_mask
|
67 |
+
|
68 |
+
outputs = torch.zeros_like(inputs)
|
69 |
+
logabsdet = torch.zeros_like(inputs)
|
70 |
+
|
71 |
+
if tails == 'linear':
|
72 |
+
unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
|
73 |
+
constant = np.log(np.exp(1 - min_derivative) - 1)
|
74 |
+
unnormalized_derivatives[..., 0] = constant
|
75 |
+
unnormalized_derivatives[..., -1] = constant
|
76 |
+
|
77 |
+
outputs[outside_interval_mask] = inputs[outside_interval_mask]
|
78 |
+
logabsdet[outside_interval_mask] = 0
|
79 |
+
else:
|
80 |
+
raise RuntimeError('{} tails are not implemented.'.format(tails))
|
81 |
+
|
82 |
+
outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline(
|
83 |
+
inputs=inputs[inside_interval_mask],
|
84 |
+
unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
|
85 |
+
unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
|
86 |
+
unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
|
87 |
+
inverse=inverse,
|
88 |
+
left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound,
|
89 |
+
min_bin_width=min_bin_width,
|
90 |
+
min_bin_height=min_bin_height,
|
91 |
+
min_derivative=min_derivative
|
92 |
+
)
|
93 |
+
|
94 |
+
return outputs, logabsdet
|
95 |
+
|
96 |
+
def rational_quadratic_spline(inputs,
|
97 |
+
unnormalized_widths,
|
98 |
+
unnormalized_heights,
|
99 |
+
unnormalized_derivatives,
|
100 |
+
inverse=False,
|
101 |
+
left=0., right=1., bottom=0., top=1.,
|
102 |
+
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
103 |
+
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
104 |
+
min_derivative=DEFAULT_MIN_DERIVATIVE):
|
105 |
+
if torch.min(inputs) < left or torch.max(inputs) > right:
|
106 |
+
raise ValueError('Input to a transform is not within its domain')
|
107 |
+
|
108 |
+
num_bins = unnormalized_widths.shape[-1]
|
109 |
+
|
110 |
+
if min_bin_width * num_bins > 1.0:
|
111 |
+
raise ValueError('Minimal bin width too large for the number of bins')
|
112 |
+
if min_bin_height * num_bins > 1.0:
|
113 |
+
raise ValueError('Minimal bin height too large for the number of bins')
|
114 |
+
|
115 |
+
widths = F.softmax(unnormalized_widths, dim=-1)
|
116 |
+
widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
|
117 |
+
cumwidths = torch.cumsum(widths, dim=-1)
|
118 |
+
cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0)
|
119 |
+
cumwidths = (right - left) * cumwidths + left
|
120 |
+
cumwidths[..., 0] = left
|
121 |
+
cumwidths[..., -1] = right
|
122 |
+
widths = cumwidths[..., 1:] - cumwidths[..., :-1]
|
123 |
+
|
124 |
+
derivatives = min_derivative + F.softplus(unnormalized_derivatives)
|
125 |
+
|
126 |
+
heights = F.softmax(unnormalized_heights, dim=-1)
|
127 |
+
heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
|
128 |
+
cumheights = torch.cumsum(heights, dim=-1)
|
129 |
+
cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0)
|
130 |
+
cumheights = (top - bottom) * cumheights + bottom
|
131 |
+
cumheights[..., 0] = bottom
|
132 |
+
cumheights[..., -1] = top
|
133 |
+
heights = cumheights[..., 1:] - cumheights[..., :-1]
|
134 |
+
|
135 |
+
if inverse:
|
136 |
+
bin_idx = searchsorted(cumheights, inputs)[..., None]
|
137 |
+
else:
|
138 |
+
bin_idx = searchsorted(cumwidths, inputs)[..., None]
|
139 |
+
|
140 |
+
input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
|
141 |
+
input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
|
142 |
+
|
143 |
+
input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
|
144 |
+
delta = heights / widths
|
145 |
+
input_delta = delta.gather(-1, bin_idx)[..., 0]
|
146 |
+
|
147 |
+
input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
|
148 |
+
input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
|
149 |
+
|
150 |
+
input_heights = heights.gather(-1, bin_idx)[..., 0]
|
151 |
+
|
152 |
+
if inverse:
|
153 |
+
a = (((inputs - input_cumheights) * (input_derivatives
|
154 |
+
+ input_derivatives_plus_one
|
155 |
+
- 2 * input_delta)
|
156 |
+
+ input_heights * (input_delta - input_derivatives)))
|
157 |
+
b = (input_heights * input_derivatives
|
158 |
+
- (inputs - input_cumheights) * (input_derivatives
|
159 |
+
+ input_derivatives_plus_one
|
160 |
+
- 2 * input_delta))
|
161 |
+
c = - input_delta * (inputs - input_cumheights)
|
162 |
+
|
163 |
+
discriminant = b.pow(2) - 4 * a * c
|
164 |
+
assert (discriminant >= 0).all()
|
165 |
+
|
166 |
+
root = (2 * c) / (-b - torch.sqrt(discriminant))
|
167 |
+
outputs = root * input_bin_widths + input_cumwidths
|
168 |
+
|
169 |
+
theta_one_minus_theta = root * (1 - root)
|
170 |
+
denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
|
171 |
+
* theta_one_minus_theta)
|
172 |
+
derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2)
|
173 |
+
+ 2 * input_delta * theta_one_minus_theta
|
174 |
+
+ input_derivatives * (1 - root).pow(2))
|
175 |
+
logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
|
176 |
+
|
177 |
+
return outputs, -logabsdet
|
178 |
+
else:
|
179 |
+
theta = (inputs - input_cumwidths) / input_bin_widths
|
180 |
+
theta_one_minus_theta = theta * (1 - theta)
|
181 |
+
|
182 |
+
numerator = input_heights * (input_delta * theta.pow(2)
|
183 |
+
+ input_derivatives * theta_one_minus_theta)
|
184 |
+
denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
|
185 |
+
* theta_one_minus_theta)
|
186 |
+
outputs = input_cumheights + numerator / denominator
|
187 |
+
|
188 |
+
derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2)
|
189 |
+
+ 2 * input_delta * theta_one_minus_theta
|
190 |
+
+ input_derivatives * (1 - theta).pow(2))
|
191 |
+
logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
|
192 |
+
|
193 |
+
return outputs, logabsdet
|
utils.py
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
from json import loads
|
3 |
+
from torch import load, FloatTensor
|
4 |
+
from numpy import float32
|
5 |
+
import librosa
|
6 |
+
|
7 |
+
|
8 |
+
class HParams():
|
9 |
+
def __init__(self, **kwargs):
|
10 |
+
for k, v in kwargs.items():
|
11 |
+
if type(v) == dict:
|
12 |
+
v = HParams(**v)
|
13 |
+
self[k] = v
|
14 |
+
|
15 |
+
def keys(self):
|
16 |
+
return self.__dict__.keys()
|
17 |
+
|
18 |
+
def items(self):
|
19 |
+
return self.__dict__.items()
|
20 |
+
|
21 |
+
def values(self):
|
22 |
+
return self.__dict__.values()
|
23 |
+
|
24 |
+
def __len__(self):
|
25 |
+
return len(self.__dict__)
|
26 |
+
|
27 |
+
def __getitem__(self, key):
|
28 |
+
return getattr(self, key)
|
29 |
+
|
30 |
+
def __setitem__(self, key, value):
|
31 |
+
return setattr(self, key, value)
|
32 |
+
|
33 |
+
def __contains__(self, key):
|
34 |
+
return key in self.__dict__
|
35 |
+
|
36 |
+
def __repr__(self):
|
37 |
+
return self.__dict__.__repr__()
|
38 |
+
|
39 |
+
|
40 |
+
def load_checkpoint(checkpoint_path, model):
|
41 |
+
checkpoint_dict = load(checkpoint_path, map_location='cpu')
|
42 |
+
iteration = checkpoint_dict['iteration']
|
43 |
+
saved_state_dict = checkpoint_dict['model']
|
44 |
+
if hasattr(model, 'module'):
|
45 |
+
state_dict = model.module.state_dict()
|
46 |
+
else:
|
47 |
+
state_dict = model.state_dict()
|
48 |
+
new_state_dict = {}
|
49 |
+
for k, v in state_dict.items():
|
50 |
+
try:
|
51 |
+
new_state_dict[k] = saved_state_dict[k]
|
52 |
+
except:
|
53 |
+
logging.info("%s is not in the checkpoint" % k)
|
54 |
+
new_state_dict[k] = v
|
55 |
+
pass
|
56 |
+
if hasattr(model, 'module'):
|
57 |
+
model.module.load_state_dict(new_state_dict)
|
58 |
+
else:
|
59 |
+
model.load_state_dict(new_state_dict)
|
60 |
+
logging.info("Loaded checkpoint '{}' (iteration {})".format(
|
61 |
+
checkpoint_path, iteration))
|
62 |
+
return
|
63 |
+
|
64 |
+
|
65 |
+
def get_hparams_from_file(config_path):
|
66 |
+
with open(config_path, "r") as f:
|
67 |
+
data = f.read()
|
68 |
+
config = loads(data)
|
69 |
+
|
70 |
+
hparams = HParams(**config)
|
71 |
+
return hparams
|
72 |
+
|
73 |
+
|
74 |
+
def load_audio_to_torch(full_path, target_sampling_rate):
|
75 |
+
audio, sampling_rate = librosa.load(full_path, sr=target_sampling_rate, mono=True)
|
76 |
+
return FloatTensor(audio.astype(float32))
|
目前的环境.txt
ADDED
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
absl-py==1.4.0
|
2 |
+
aiohttp==3.8.4
|
3 |
+
aiosignal==1.3.1
|
4 |
+
anyio==3.6.2
|
5 |
+
async-timeout==4.0.2
|
6 |
+
attrs==22.2.0
|
7 |
+
audioread==3.0.0
|
8 |
+
Babel==2.12.1
|
9 |
+
backcall==0.2.0
|
10 |
+
bcrypt==4.0.1
|
11 |
+
blinker==1.6.2
|
12 |
+
cachetools==4.2.4
|
13 |
+
cffi==1.15.1
|
14 |
+
charset-normalizer==3.1.0
|
15 |
+
click==8.1.3
|
16 |
+
clldutils==3.19.0
|
17 |
+
cn2an==0.5.17
|
18 |
+
colorama==0.4.6
|
19 |
+
coloredlogs==15.0.1
|
20 |
+
colorlog==6.7.0
|
21 |
+
cryptography==40.0.1
|
22 |
+
csvw==3.1.3
|
23 |
+
cycler==0.11.0
|
24 |
+
Cython==0.29.21
|
25 |
+
decorator==5.1.1
|
26 |
+
eng-to-ipa==0.0.2
|
27 |
+
fastapi==0.95.0
|
28 |
+
ffmpy==0.3.0
|
29 |
+
Flask==2.3.1
|
30 |
+
flatbuffers==23.1.21
|
31 |
+
frozenlist==1.3.3
|
32 |
+
fsspec==2023.3.0
|
33 |
+
google-auth==1.35.0
|
34 |
+
google-auth-oauthlib==0.4.6
|
35 |
+
gradio==3.4.1
|
36 |
+
grpcio==1.53.0
|
37 |
+
h11==0.12.0
|
38 |
+
httpcore==0.15.0
|
39 |
+
httpx==0.23.3
|
40 |
+
humanfriendly==10.0
|
41 |
+
idna==3.4
|
42 |
+
importlib-metadata==6.1.0
|
43 |
+
importlib-resources==5.12.0
|
44 |
+
inflect==6.0.2
|
45 |
+
ipython==7.34.0
|
46 |
+
isodate==0.6.1
|
47 |
+
itsdangerous==2.1.2
|
48 |
+
jamo==0.4.1
|
49 |
+
jedi==0.18.2
|
50 |
+
jieba==0.42.1
|
51 |
+
Jinja2==3.1.2
|
52 |
+
joblib==1.2.0
|
53 |
+
jsonschema==4.17.3
|
54 |
+
kiwisolver==1.4.4
|
55 |
+
language-tags==1.2.0
|
56 |
+
librosa==0.8.0
|
57 |
+
linkify-it-py==2.0.0
|
58 |
+
llvmlite==0.39.1
|
59 |
+
lxml==4.9.2
|
60 |
+
Markdown==3.4.3
|
61 |
+
markdown-it-py==2.2.0
|
62 |
+
MarkupSafe==2.1.2
|
63 |
+
matplotlib==3.3.1
|
64 |
+
matplotlib-inline==0.1.6
|
65 |
+
mdit-py-plugins==0.3.5
|
66 |
+
mdurl==0.1.2
|
67 |
+
mpmath==1.2.1
|
68 |
+
multidict==6.0.4
|
69 |
+
numba==0.56.4
|
70 |
+
numpy==1.21.6
|
71 |
+
oauthlib==3.2.2
|
72 |
+
onnxruntime==1.14.1
|
73 |
+
openai==0.27.2
|
74 |
+
opencv-contrib-python==4.7.0.68
|
75 |
+
orjson==3.8.8
|
76 |
+
packaging==23.0
|
77 |
+
pandas==1.5.3
|
78 |
+
paramiko==3.1.0
|
79 |
+
parso==0.8.3
|
80 |
+
phonemizer==2.2.1
|
81 |
+
pickleshare==0.7.5
|
82 |
+
Pillow==9.4.0
|
83 |
+
pkgutil_resolve_name==1.3.10
|
84 |
+
platformdirs==3.2.0
|
85 |
+
pooch==1.7.0
|
86 |
+
proces==0.1.4
|
87 |
+
prompt-toolkit==3.0.38
|
88 |
+
protobuf==4.22.1
|
89 |
+
pyasn1==0.4.8
|
90 |
+
pyasn1-modules==0.2.8
|
91 |
+
pycparser==2.21
|
92 |
+
pycryptodome==3.17
|
93 |
+
pydantic==1.10.7
|
94 |
+
pydub==0.25.1
|
95 |
+
pyglet==2.0.5
|
96 |
+
Pygments==2.14.0
|
97 |
+
pylatexenc==2.10
|
98 |
+
PyNaCl==1.5.0
|
99 |
+
pyparsing==3.0.9
|
100 |
+
pypinyin==0.44.0
|
101 |
+
pyreadline3==3.4.1
|
102 |
+
pyrsistent==0.19.3
|
103 |
+
python-dateutil==2.8.2
|
104 |
+
python-multipart==0.0.6
|
105 |
+
pytz==2023.2
|
106 |
+
PyYAML==6.0
|
107 |
+
rdflib==6.3.2
|
108 |
+
regex==2023.3.23
|
109 |
+
requests==2.28.2
|
110 |
+
requests-oauthlib==1.3.1
|
111 |
+
resampy==0.4.2
|
112 |
+
rfc3986==1.5.0
|
113 |
+
rsa==4.9
|
114 |
+
ruamel.yaml==0.17.21
|
115 |
+
ruamel.yaml.clib==0.2.7
|
116 |
+
scikit-learn==1.2.2
|
117 |
+
scipy==1.5.2
|
118 |
+
segments==2.2.1
|
119 |
+
six==1.16.0
|
120 |
+
sniffio==1.3.0
|
121 |
+
soundfile==0.12.1
|
122 |
+
starlette==0.26.1
|
123 |
+
sympy==1.11.1
|
124 |
+
tabulate==0.9.0
|
125 |
+
tensorboard==2.3.0
|
126 |
+
tensorboard-plugin-wit==1.8.1
|
127 |
+
threadpoolctl==3.1.0
|
128 |
+
torch==1.13.1
|
129 |
+
torchvision==0.14.1
|
130 |
+
tqdm==4.65.0
|
131 |
+
traitlets==5.9.0
|
132 |
+
typing_extensions==4.5.0
|
133 |
+
uc-micro-py==1.0.1
|
134 |
+
Unidecode==1.1.1
|
135 |
+
uritemplate==4.1.1
|
136 |
+
urllib3==1.26.15
|
137 |
+
uvicorn==0.21.1
|
138 |
+
wcwidth==0.2.6
|
139 |
+
websockets==10.4
|
140 |
+
Werkzeug==2.3.0
|
141 |
+
wincertstore==0.2
|
142 |
+
yarl==1.8.2
|
143 |
+
zipp==3.15.0
|