qqq
#1
by
changanguli
- opened
- Docker/Dockerfile +0 -12
- Docker/vits.sh +0 -20
- app.py +123 -124
- models.py +2 -3
Docker/Dockerfile
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
FROM python:3.9-bullseye
|
2 |
-
VOLUME ["/app"]
|
3 |
-
WORKDIR /app
|
4 |
-
# Set apt to Chinese mirror
|
5 |
-
RUN sed -i 's/deb.debian.org/mirrors.ustc.edu.cn/g' /etc/apt/sources.list
|
6 |
-
RUN apt-get update && apt-get -y install cmake git
|
7 |
-
RUN git clone https://huggingface.co/spaces/ikechan8370/vits-uma-genshin-honkai
|
8 |
-
WORKDIR /app/vits-uma-genshin-honkai
|
9 |
-
RUN sed -i "s/\.launch()/\.launch(server_name=\"0.0.0.0\")/" /app/vits-uma-genshin-honkai/app.py
|
10 |
-
ADD vits.sh /app/vits.sh
|
11 |
-
EXPOSE 7860
|
12 |
-
ENTRYPOINT [ "/app/vits.sh" ]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Docker/vits.sh
DELETED
@@ -1,20 +0,0 @@
|
|
1 |
-
#!/bin/bash
|
2 |
-
run() {
|
3 |
-
echo -e "\033[32m已完成初始化,启动服务...\033[0m"
|
4 |
-
python3 /app/vits-uma-genshin-honkai/app.py
|
5 |
-
}
|
6 |
-
install() {
|
7 |
-
echo -e "\033[33m正在初始化:安装依赖....\033[0m"
|
8 |
-
pip install -r /app/vits-uma-genshin-honkai/requirements.txt -i https://mirrors.ustc.edu.cn/pypi/web/simple
|
9 |
-
echo -e "\033[33m正在下载模型....\033[0m"
|
10 |
-
rm -f /app/vits-uma-genshin-honkai/model/G_953000.pth
|
11 |
-
wget -O /app/vits-uma-genshin-honkai/model/G_953000.pth https://huggingface.co/spaces/ikechan8370/vits-uma-genshin-honkai/resolve/main/model/G_953000.pth
|
12 |
-
echo -e "\033[32m初始化完成!\033[0m"
|
13 |
-
run
|
14 |
-
}
|
15 |
-
|
16 |
-
if [ ! -f "/app/vits-uma-genshin-honkai/model/G_953000.pth" ] || [ "$(stat -c%s "/app/vits-uma-genshin-honkai/model/G_953000.pth")" -lt 10000 ]; then
|
17 |
-
install
|
18 |
-
else
|
19 |
-
run
|
20 |
-
fi
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app.py
CHANGED
@@ -1,124 +1,123 @@
|
|
1 |
-
|
2 |
-
import
|
3 |
-
import
|
4 |
-
import
|
5 |
-
|
6 |
-
from
|
7 |
-
from
|
8 |
-
import
|
9 |
-
|
10 |
-
hps_ms = utils.get_hparams_from_file(r'./model/config.json')
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
hps_ms.
|
15 |
-
hps_ms.
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
text_norm
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
()
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
let
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
text
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
oA.
|
81 |
-
oA
|
82 |
-
|
83 |
-
oA.
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
"
|
92 |
-
|
93 |
-
'<div align="center"><a><font color="#dd0000"
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
app.queue(concurrency_count=1).launch()
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
import time
|
3 |
+
import gradio as gr
|
4 |
+
import utils
|
5 |
+
import commons
|
6 |
+
from models import SynthesizerTrn
|
7 |
+
from text import text_to_sequence
|
8 |
+
from torch import no_grad, LongTensor
|
9 |
+
|
10 |
+
hps_ms = utils.get_hparams_from_file(r'./model/config.json')
|
11 |
+
net_g_ms = SynthesizerTrn(
|
12 |
+
len(hps_ms.symbols),
|
13 |
+
hps_ms.data.filter_length // 2 + 1,
|
14 |
+
hps_ms.train.segment_size // hps_ms.data.hop_length,
|
15 |
+
n_speakers=hps_ms.data.n_speakers,
|
16 |
+
**hps_ms.model)
|
17 |
+
_ = net_g_ms.eval()
|
18 |
+
speakers = hps_ms.speakers
|
19 |
+
model, optimizer, learning_rate, epochs = utils.load_checkpoint(r'./model/G_953000.pth', net_g_ms, None)
|
20 |
+
|
21 |
+
def get_text(text, hps):
|
22 |
+
text_norm, clean_text = text_to_sequence(text, hps.symbols, hps.data.text_cleaners)
|
23 |
+
if hps.data.add_blank:
|
24 |
+
text_norm = commons.intersperse(text_norm, 0)
|
25 |
+
text_norm = LongTensor(text_norm)
|
26 |
+
return text_norm, clean_text
|
27 |
+
|
28 |
+
def vits(text, language, speaker_id, noise_scale, noise_scale_w, length_scale):
|
29 |
+
start = time.perf_counter()
|
30 |
+
if not len(text):
|
31 |
+
return "输入文本不能为空!", None, None
|
32 |
+
text = text.replace('\n', ' ').replace('\r', '').replace(" ", "")
|
33 |
+
if len(text) > 300:
|
34 |
+
return f"输入文字过长!{len(text)}>100", None, None
|
35 |
+
if language == 0:
|
36 |
+
text = f"[ZH]{text}[ZH]"
|
37 |
+
elif language == 1:
|
38 |
+
text = f"[JA]{text}[JA]"
|
39 |
+
else:
|
40 |
+
text = f"{text}"
|
41 |
+
stn_tst, clean_text = get_text(text, hps_ms)
|
42 |
+
with no_grad():
|
43 |
+
x_tst = stn_tst.unsqueeze(0)
|
44 |
+
x_tst_lengths = LongTensor([stn_tst.size(0)])
|
45 |
+
speaker_id = LongTensor([speaker_id])
|
46 |
+
audio = net_g_ms.infer(x_tst, x_tst_lengths, sid=speaker_id, noise_scale=noise_scale, noise_scale_w=noise_scale_w,
|
47 |
+
length_scale=length_scale)[0][0, 0].data.float().numpy()
|
48 |
+
|
49 |
+
return "生成成功!", (22050, audio), f"生成耗时 {round(time.perf_counter()-start, 2)} s"
|
50 |
+
|
51 |
+
def search_speaker(search_value):
|
52 |
+
for s in speakers:
|
53 |
+
if search_value == s:
|
54 |
+
return s
|
55 |
+
for s in speakers:
|
56 |
+
if search_value in s:
|
57 |
+
return s
|
58 |
+
|
59 |
+
def change_lang(language):
|
60 |
+
if language == 0:
|
61 |
+
return 0.6, 0.668, 1.2
|
62 |
+
else:
|
63 |
+
return 0.6, 0.668, 1.1
|
64 |
+
|
65 |
+
download_audio_js = """
|
66 |
+
() =>{{
|
67 |
+
let root = document.querySelector("body > gradio-app");
|
68 |
+
if (root.shadowRoot != null)
|
69 |
+
root = root.shadowRoot;
|
70 |
+
let audio = root.querySelector("#tts-audio").querySelector("audio");
|
71 |
+
let text = root.querySelector("#input-text").querySelector("textarea");
|
72 |
+
if (audio == undefined)
|
73 |
+
return;
|
74 |
+
text = text.value;
|
75 |
+
if (text == undefined)
|
76 |
+
text = Math.floor(Math.random()*100000000);
|
77 |
+
audio = audio.src;
|
78 |
+
let oA = document.createElement("a");
|
79 |
+
oA.download = text.substr(0, 20)+'.wav';
|
80 |
+
oA.href = audio;
|
81 |
+
document.body.appendChild(oA);
|
82 |
+
oA.click();
|
83 |
+
oA.remove();
|
84 |
+
}}
|
85 |
+
"""
|
86 |
+
|
87 |
+
if __name__ == '__main__':
|
88 |
+
with gr.Blocks() as app:
|
89 |
+
gr.Markdown(
|
90 |
+
"# <center> VITS语音在线合成demo\n"
|
91 |
+
"<div align='center'>主要有赛马娘,原神中文,原神日语,崩坏3的音色</div>"
|
92 |
+
'<div align="center"><a><font color="#dd0000">结果有随机性,语调可能很奇怪,可多次生成取最佳效果</font></a></div>'
|
93 |
+
'<div align="center"><a><font color="#dd0000">标点符号会影响生成的结果</font></a></div>'
|
94 |
+
)
|
95 |
+
|
96 |
+
with gr.Tabs():
|
97 |
+
with gr.TabItem("vits"):
|
98 |
+
with gr.Row():
|
99 |
+
with gr.Column():
|
100 |
+
input_text = gr.Textbox(label="Text (100 words limitation)", lines=5, value="今天晚上吃啥好呢。", elem_id=f"input-text")
|
101 |
+
lang = gr.Dropdown(label="Language", choices=["中文", "日语", "中日混合(中文用[ZH][ZH]包裹起来,日文用[JA][JA]包裹起来)"],
|
102 |
+
type="index", value="中文")
|
103 |
+
btn = gr.Button(value="Submit")
|
104 |
+
with gr.Row():
|
105 |
+
search = gr.Textbox(label="Search Speaker", lines=1)
|
106 |
+
btn2 = gr.Button(value="Search")
|
107 |
+
sid = gr.Dropdown(label="Speaker", choices=speakers, type="index", value=speakers[228])
|
108 |
+
with gr.Row():
|
109 |
+
ns = gr.Slider(label="noise_scale(控制感情变化程度)", minimum=0.1, maximum=1.0, step=0.1, value=0.6, interactive=True)
|
110 |
+
nsw = gr.Slider(label="noise_scale_w(控制音素发音长度)", minimum=0.1, maximum=1.0, step=0.1, value=0.668, interactive=True)
|
111 |
+
ls = gr.Slider(label="length_scale(控制整体语速)", minimum=0.1, maximum=2.0, step=0.1, value=1.2, interactive=True)
|
112 |
+
with gr.Column():
|
113 |
+
o1 = gr.Textbox(label="Output Message")
|
114 |
+
o2 = gr.Audio(label="Output Audio", elem_id=f"tts-audio")
|
115 |
+
o3 = gr.Textbox(label="Extra Info")
|
116 |
+
download = gr.Button("Download Audio")
|
117 |
+
btn.click(vits, inputs=[input_text, lang, sid, ns, nsw, ls], outputs=[o1, o2, o3], api_name="generate")
|
118 |
+
download.click(None, [], [], _js=download_audio_js.format())
|
119 |
+
btn2.click(search_speaker, inputs=[search], outputs=[sid])
|
120 |
+
lang.change(change_lang, inputs=[lang], outputs=[ns, nsw, ls])
|
121 |
+
with gr.TabItem("可用人物一览"):
|
122 |
+
gr.Radio(label="Speaker", choices=speakers, interactive=False, type="index")
|
123 |
+
app.queue(concurrency_count=1).launch()
|
|
models.py
CHANGED
@@ -496,10 +496,9 @@ class SynthesizerTrn(nn.Module):
|
|
496 |
return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
|
497 |
|
498 |
def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None):
|
499 |
-
|
500 |
-
x, m_p, logs_p, x_mask = self.enc_p(x.to(device), x_lengths.to(device))
|
501 |
if self.n_speakers > 0:
|
502 |
-
g = self.emb_g(sid
|
503 |
else:
|
504 |
g = None
|
505 |
|
|
|
496 |
return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
|
497 |
|
498 |
def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None):
|
499 |
+
x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths)
|
|
|
500 |
if self.n_speakers > 0:
|
501 |
+
g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
|
502 |
else:
|
503 |
g = None
|
504 |
|