Upload 76 files
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- LICENSE +201 -0
- READMEE.md +232 -0
- assets/languages/en-US.json +788 -0
- assets/languages/vi-VN.json +788 -0
- assets/logs/mute/f0/mute.wav.npy +3 -0
- assets/logs/mute/f0_voiced/mute.wav.npy +3 -0
- assets/logs/mute/sliced_audios/mute32000.wav +0 -0
- assets/logs/mute/sliced_audios/mute40000.wav +0 -0
- assets/logs/mute/sliced_audios/mute441000.wav +0 -0
- assets/logs/mute/sliced_audios/mute48000.wav +0 -0
- assets/logs/mute/sliced_audios_16k/mute.wav +0 -0
- assets/logs/mute/v1_extracted/mute.npy +3 -0
- assets/logs/mute/v2_extracted/mute.npy +3 -0
- assets/models/embedders/.gitattributes +0 -0
- assets/models/predictors/.gitattributes +0 -0
- assets/models/pretrained_custom/.gitattributes +0 -0
- assets/models/pretrained_v1/.gitattributes +0 -0
- assets/models/pretrained_v2/.gitattributes +0 -0
- assets/models/uvr5/.gitattributes +0 -0
- assets/presets/.gitattributes +0 -0
- assets/weights/.gitattributes +0 -0
- audios/.gitattributes +0 -0
- dataset/.gitattributes +0 -0
- main/app/app.py +0 -0
- main/app/tensorboard.py +30 -0
- main/configs/config.json +26 -0
- main/configs/config.py +70 -0
- main/configs/v1/32000.json +46 -0
- main/configs/v1/40000.json +46 -0
- main/configs/v1/44100.json +46 -0
- main/configs/v1/48000.json +46 -0
- main/configs/v2/32000.json +42 -0
- main/configs/v2/40000.json +42 -0
- main/configs/v2/44100.json +42 -0
- main/configs/v2/48000.json +42 -0
- main/inference/audio_effects.py +170 -0
- main/inference/convert.py +650 -0
- main/inference/create_dataset.py +240 -0
- main/inference/create_index.py +100 -0
- main/inference/extract.py +450 -0
- main/inference/preprocess.py +290 -0
- main/inference/separator_music.py +290 -0
- main/inference/train.py +1000 -0
- main/library/algorithm/commons.py +50 -0
- main/library/algorithm/modules.py +70 -0
- main/library/algorithm/mrf_hifigan.py +160 -0
- main/library/algorithm/refinegan.py +180 -0
- main/library/algorithm/residuals.py +140 -0
- main/library/algorithm/separator.py +330 -0
- main/library/algorithm/synthesizers.py +450 -0
LICENSE
ADDED
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Apache License
|
2 |
+
Version 2.0, January 2004
|
3 |
+
http://www.apache.org/licenses/
|
4 |
+
|
5 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
6 |
+
|
7 |
+
1. Definitions.
|
8 |
+
|
9 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
10 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
11 |
+
|
12 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
13 |
+
the copyright owner that is granting the License.
|
14 |
+
|
15 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
16 |
+
other entities that control, are controlled by, or are under common
|
17 |
+
control with that entity. For the purposes of this definition,
|
18 |
+
"control" means (i) the power, direct or indirect, to cause the
|
19 |
+
direction or management of such entity, whether by contract or
|
20 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
21 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
22 |
+
|
23 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
24 |
+
exercising permissions granted by this License.
|
25 |
+
|
26 |
+
"Source" form shall mean the preferred form for making modifications,
|
27 |
+
including but not limited to software source code, documentation
|
28 |
+
source, and configuration files.
|
29 |
+
|
30 |
+
"Object" form shall mean any form resulting from mechanical
|
31 |
+
transformation or translation of a Source form, including but
|
32 |
+
not limited to compiled object code, generated documentation,
|
33 |
+
and conversions to other media types.
|
34 |
+
|
35 |
+
"Work" shall mean the work of authorship, whether in Source or
|
36 |
+
Object form, made available under the License, as indicated by a
|
37 |
+
copyright notice that is included in or attached to the work
|
38 |
+
(an example is provided in the Appendix below).
|
39 |
+
|
40 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
41 |
+
form, that is based on (or derived from) the Work and for which the
|
42 |
+
editorial revisions, annotations, elaborations, or other modifications
|
43 |
+
represent, as a whole, an original work of authorship. For the purposes
|
44 |
+
of this License, Derivative Works shall not include works that remain
|
45 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
46 |
+
the Work and Derivative Works thereof.
|
47 |
+
|
48 |
+
"Contribution" shall mean any work of authorship, including
|
49 |
+
the original version of the Work and any modifications or additions
|
50 |
+
to that Work or Derivative Works thereof, that is intentionally
|
51 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
52 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
53 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
54 |
+
means any form of electronic, verbal, or written communication sent
|
55 |
+
to the Licensor or its representatives, including but not limited to
|
56 |
+
communication on electronic mailing lists, source code control systems,
|
57 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
58 |
+
Licensor for the purpose of discussing and improving the Work, but
|
59 |
+
excluding communication that is conspicuously marked or otherwise
|
60 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
61 |
+
|
62 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
63 |
+
on behalf of whom a Contribution has been received by Licensor and
|
64 |
+
subsequently incorporated within the Work.
|
65 |
+
|
66 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
67 |
+
this License, each Contributor hereby grants to You a perpetual,
|
68 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
69 |
+
copyright license to reproduce, prepare Derivative Works of,
|
70 |
+
publicly display, publicly perform, sublicense, and distribute the
|
71 |
+
Work and such Derivative Works in Source or Object form.
|
72 |
+
|
73 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
74 |
+
this License, each Contributor hereby grants to You a perpetual,
|
75 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
76 |
+
(except as stated in this section) patent license to make, have made,
|
77 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
78 |
+
where such license applies only to those patent claims licensable
|
79 |
+
by such Contributor that are necessarily infringed by their
|
80 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
81 |
+
with the Work to which such Contribution(s) was submitted. If You
|
82 |
+
institute patent litigation against any entity (including a
|
83 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
84 |
+
or a Contribution incorporated within the Work constitutes direct
|
85 |
+
or contributory patent infringement, then any patent licenses
|
86 |
+
granted to You under this License for that Work shall terminate
|
87 |
+
as of the date such litigation is filed.
|
88 |
+
|
89 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
90 |
+
Work or Derivative Works thereof in any medium, with or without
|
91 |
+
modifications, and in Source or Object form, provided that You
|
92 |
+
meet the following conditions:
|
93 |
+
|
94 |
+
(a) You must give any other recipients of the Work or
|
95 |
+
Derivative Works a copy of this License; and
|
96 |
+
|
97 |
+
(b) You must cause any modified files to carry prominent notices
|
98 |
+
stating that You changed the files; and
|
99 |
+
|
100 |
+
(c) You must retain, in the Source form of any Derivative Works
|
101 |
+
that You distribute, all copyright, patent, trademark, and
|
102 |
+
attribution notices from the Source form of the Work,
|
103 |
+
excluding those notices that do not pertain to any part of
|
104 |
+
the Derivative Works; and
|
105 |
+
|
106 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
107 |
+
distribution, then any Derivative Works that You distribute must
|
108 |
+
include a readable copy of the attribution notices contained
|
109 |
+
within such NOTICE file, excluding those notices that do not
|
110 |
+
pertain to any part of the Derivative Works, in at least one
|
111 |
+
of the following places: within a NOTICE text file distributed
|
112 |
+
as part of the Derivative Works; within the Source form or
|
113 |
+
documentation, if provided along with the Derivative Works; or,
|
114 |
+
within a display generated by the Derivative Works, if and
|
115 |
+
wherever such third-party notices normally appear. The contents
|
116 |
+
of the NOTICE file are for informational purposes only and
|
117 |
+
do not modify the License. You may add Your own attribution
|
118 |
+
notices within Derivative Works that You distribute, alongside
|
119 |
+
or as an addendum to the NOTICE text from the Work, provided
|
120 |
+
that such additional attribution notices cannot be construed
|
121 |
+
as modifying the License.
|
122 |
+
|
123 |
+
You may add Your own copyright statement to Your modifications and
|
124 |
+
may provide additional or different license terms and conditions
|
125 |
+
for use, reproduction, or distribution of Your modifications, or
|
126 |
+
for any such Derivative Works as a whole, provided Your use,
|
127 |
+
reproduction, and distribution of the Work otherwise complies with
|
128 |
+
the conditions stated in this License.
|
129 |
+
|
130 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
131 |
+
any Contribution intentionally submitted for inclusion in the Work
|
132 |
+
by You to the Licensor shall be under the terms and conditions of
|
133 |
+
this License, without any additional terms or conditions.
|
134 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
135 |
+
the terms of any separate license agreement you may have executed
|
136 |
+
with Licensor regarding such Contributions.
|
137 |
+
|
138 |
+
6. Trademarks. This License does not grant permission to use the trade
|
139 |
+
names, trademarks, service marks, or product names of the Licensor,
|
140 |
+
except as required for reasonable and customary use in describing the
|
141 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
142 |
+
|
143 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
144 |
+
agreed to in writing, Licensor provides the Work (and each
|
145 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
146 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
147 |
+
implied, including, without limitation, any warranties or conditions
|
148 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
149 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
150 |
+
appropriateness of using or redistributing the Work and assume any
|
151 |
+
risks associated with Your exercise of permissions under this License.
|
152 |
+
|
153 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
154 |
+
whether in tort (including negligence), contract, or otherwise,
|
155 |
+
unless required by applicable law (such as deliberate and grossly
|
156 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
157 |
+
liable to You for damages, including any direct, indirect, special,
|
158 |
+
incidental, or consequential damages of any character arising as a
|
159 |
+
result of this License or out of the use or inability to use the
|
160 |
+
Work (including but not limited to damages for loss of goodwill,
|
161 |
+
work stoppage, computer failure or malfunction, or any and all
|
162 |
+
other commercial damages or losses), even if such Contributor
|
163 |
+
has been advised of the possibility of such damages.
|
164 |
+
|
165 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
166 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
167 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
168 |
+
or other liability obligations and/or rights consistent with this
|
169 |
+
License. However, in accepting such obligations, You may act only
|
170 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
171 |
+
of any other Contributor, and only if You agree to indemnify,
|
172 |
+
defend, and hold each Contributor harmless for any liability
|
173 |
+
incurred by, or claims asserted against, such Contributor by reason
|
174 |
+
of your accepting any such warranty or additional liability.
|
175 |
+
|
176 |
+
END OF TERMS AND CONDITIONS
|
177 |
+
|
178 |
+
APPENDIX: How to apply the Apache License to your work.
|
179 |
+
|
180 |
+
To apply the Apache License to your work, attach the following
|
181 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
182 |
+
replaced with your own identifying information. (Don't include
|
183 |
+
the brackets!) The text should be enclosed in the appropriate
|
184 |
+
comment syntax for the file format. We also recommend that a
|
185 |
+
file or class name and description of purpose be included on the
|
186 |
+
same "printed page" as the copyright notice for easier
|
187 |
+
identification within third-party archives.
|
188 |
+
|
189 |
+
Copyright 2025 Phạm Huỳnh Anh
|
190 |
+
|
191 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
192 |
+
you may not use this file except in compliance with the License.
|
193 |
+
You may obtain a copy of the License at
|
194 |
+
|
195 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
196 |
+
|
197 |
+
Unless required by applicable law or agreed to in writing, software
|
198 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
199 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
200 |
+
See the License for the specific language governing permissions and
|
201 |
+
limitations under the License.
|
READMEE.md
ADDED
@@ -0,0 +1,232 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<div align="center">
|
2 |
+
|
3 |
+
# Vietnamese RVC BY ANH
|
4 |
+
Công cụ chuyển đổi giọng nói chất lượng và hiệu suất cao đơn giản dành cho người Việt.
|
5 |
+
|
6 |
+
[](https://github.com/PhamHuynhAnh16/Vietnamese-RVC)
|
7 |
+
[](https://colab.research.google.com/drive/18Ed5HbwcX0di6aJymX0EaUNz-xXU5uUc?hl=vi#scrollTo=ers351v_CMGN)
|
8 |
+
[](https://github.com/PhamHuynhAnh16/Vietnamese-RVC/blob/main/LICENSE)
|
9 |
+
|
10 |
+
</div>
|
11 |
+
|
12 |
+
<div align="center">
|
13 |
+
|
14 |
+
[](https://huggingface.co/spaces/AnhP/RVC-GUI)
|
15 |
+
[](https://huggingface.co/AnhP/Vietnamese-RVC-Project)
|
16 |
+
|
17 |
+
</div>
|
18 |
+
|
19 |
+
# Mô tả
|
20 |
+
Dự án này là một công cụ chuyển đổi giọng nói đơn giản, dễ sử dụng, được thiết kế cho người Việt Nam. Với mục tiêu tạo ra các sản phẩm chuyển đổi giọng nói chất lượng cao và hiệu suất tối ưu, dự án cho phép người dùng thay đổi giọng nói một cách mượt mà, tự nhiên.
|
21 |
+
|
22 |
+
# Các tính năng của dự án
|
23 |
+
|
24 |
+
- Tách nhạc (MDX-Net/Demucs)
|
25 |
+
|
26 |
+
- Chuyển đổi giọng nói (Chuyển đổi tệp/Chuyển đổi hàng loạt/Chuyển đổi văn bản)
|
27 |
+
|
28 |
+
- Áp dụng hiệu ứng cho âm thanh
|
29 |
+
|
30 |
+
- Tạo dữ liệu huấn luyện (Từ đường dẫn liên kết)
|
31 |
+
|
32 |
+
- Huấn luyện mô hình (v1/v2, bộ mã hóa chất lượng cao)
|
33 |
+
|
34 |
+
- Dung hợp mô hình
|
35 |
+
|
36 |
+
- Đọc thông tin mô hình
|
37 |
+
|
38 |
+
- Tải xuống từ kho mô hình có sẳn
|
39 |
+
|
40 |
+
- Tìm kiếm mô hình từ web
|
41 |
+
|
42 |
+
# Hướng dẫn sử dụng
|
43 |
+
|
44 |
+
**Sẽ có nếu tôi thực sự rảnh...**
|
45 |
+
|
46 |
+
# Cách cài đặt và sử dụng
|
47 |
+
|
48 |
+
- B1: **Cài đặt python từ trang chủ hoặc [python](https://www.python.org/ftp/python/3.10.7/python-3.10.7-amd64.exe)**
|
49 |
+
- B2: **Cài đặt ffmpeg từ [FFMPEG](https://github.com/BtbN/FFmpeg-Builds/releases) giải nén và thêm vào PATH**
|
50 |
+
- B2: **Tải mã nguồn về và giải nén ra**
|
51 |
+
- B3: **Vào thư mục mã nguồn và mở Command Prompt hoặc Terminal**
|
52 |
+
- B4: **Nhập lệnh để cài đặt thư viện cần thiết để hoạt động**
|
53 |
+
```
|
54 |
+
python -m venv env
|
55 |
+
env\\Scripts\\activate
|
56 |
+
python -m pip install pip==23.3
|
57 |
+
python -m pip install -r requirements.txt
|
58 |
+
```
|
59 |
+
- B5: **Chạy tệp run_app để mở giao diện sử dụng(Lưu ý: không tắt Command Prompt hoặc Terminal của giao diện)**
|
60 |
+
- Hoặc sử dụng cửa sổ Command Prompt hoặc cửa sổ Terminal trong thư mục mã nguồn
|
61 |
+
```
|
62 |
+
env\\Scripts\\python.exe main\\app\\app.py
|
63 |
+
```
|
64 |
+
|
65 |
+
**Với trường hợp bạn sử dụng Tensorboard để kiểm tra huấn luyện**
|
66 |
+
```
|
67 |
+
Chạy tệp: tensorboard hoặc lệnh env\\Scripts\\python.exe main/app/tensorboard.py
|
68 |
+
```
|
69 |
+
|
70 |
+
# Các đường dẫn thư mục chính của mã nguồn:
|
71 |
+
|
72 |
+
`assets\\languages`: **Thư mục chứa các tệp ngôn ngữ**
|
73 |
+
|
74 |
+
`assets\\logs`: **Thư mục chứa các tệp nhật ký và tệp chỉ mục mô hình**
|
75 |
+
|
76 |
+
`assets\\models\\embedders`: **Thư mục chứa các tệp mô hình nhúng**
|
77 |
+
|
78 |
+
`assets\\models\\predictors`: **Thư mục chứa một số tệp mô hình trích xuất dữ liệu của crepe, crepe-tiny, harvest, dio, rmvpe, fcpe**
|
79 |
+
|
80 |
+
`assets\\models\\pretrained_custom`: **Thư mục chứa các tệp mô hình huấn luyện trước tùy chỉnh**
|
81 |
+
|
82 |
+
`assets\\models\\pretrained_v1`: **Thư mục chứa các tệp mô hình huấn luyện trước v1**
|
83 |
+
|
84 |
+
`assets\\models\\pretrained_v2`: **Thư mục chứa các tệp mô hình huấn luyện trước v2**
|
85 |
+
|
86 |
+
`assets\\models\\uvr5`: **Thư mục chứa các tệp mô hình tách nhạc của Demucs và MDX**
|
87 |
+
|
88 |
+
`assets\\presets`: **Thư mục chứa các tệp cài đặt của chuyển đổi âm thanh**
|
89 |
+
|
90 |
+
`assets\\weights`: **Thư mục chứa các tệp mô hình**
|
91 |
+
|
92 |
+
`audios`: **Thư mục sẽ chứa các tệp âm thanh của bạn**
|
93 |
+
|
94 |
+
`dataset`: **Thư mục sẽ chứa các tệp dữ liệu âm thanh dùng cho việc huấn luyện mô hình**
|
95 |
+
|
96 |
+
# Các tệp tin cốt lỗi của mã nguồn
|
97 |
+
|
98 |
+
`main\\app\\app.py`: **Tệp tin hệ thống, giao diện của mã nguồn**
|
99 |
+
|
100 |
+
`main\\app\\tensorboard.py`: **Tệp tin hệ thống tensorboard**
|
101 |
+
|
102 |
+
`main\\configs\\v1`: **thư mục chứa các tệp cài đặt tốc độ lấy mẫu huấn luyện v1**
|
103 |
+
|
104 |
+
`main\\configs\\v2`: **thư mục chứa các tệp cài đặt tốc độ lấy mẫu huấn luyện v2**
|
105 |
+
|
106 |
+
`main\\configs\\config.json`: **Tệp tin cài đặt của giao diện**
|
107 |
+
|
108 |
+
`main\\configs\\config.py`: **Tệp khởi chạy các cài đặt**
|
109 |
+
|
110 |
+
`main\\inference\\audio_effects.py`: **Tệp tin thực hiện việc áp dụng hiệu ứng cho âm thanh**
|
111 |
+
|
112 |
+
`main\\inference\\convert.py`: **Tệp tin thực hiện xử lý và chuyển đổi âm thanh RVC**
|
113 |
+
|
114 |
+
`main\\inference\\create_dataset.py`: **Tệp tin thực hiện xử lý và tạo dữ liệu huấn luyện từ đường dẫn Youtube**
|
115 |
+
|
116 |
+
`main\\inference\\create_index.py`: **Tệp tin thực hiện việc tạo ra tệp tin chỉ mục**
|
117 |
+
|
118 |
+
`main\\inference\\extract.py`: **Tệp tin thực hiện việc trích xuất cao độ và trích xuất nhúng**
|
119 |
+
|
120 |
+
`main\\inference\\preprocess.py`: **Tệp tin thực hiện việc xử lý trước âm thanh dữ liệu huấn luyện trước khi trích xuất**
|
121 |
+
|
122 |
+
`main\\inference\\separator_music.py`: **Tệp tin thực hiện việc tách nhạc**
|
123 |
+
|
124 |
+
`main\\inference\\train.py`: **Tệp tin thực hiện việc huấn luyện mô hình RVC**
|
125 |
+
|
126 |
+
`main\\library\\algorithm\\commons.py`: **Tệp tin chức năng chung của RVC**
|
127 |
+
|
128 |
+
`main\\library\\algorithm\\modules.py`: **Tệp tin mô đun thuật toán sóng của RVC**
|
129 |
+
|
130 |
+
`main\\library\\algorithm\\mrf_hifigan.py`: **Tệp tin thuật toán của bộ mã hóa âm thanh MRF HIFIGAN**
|
131 |
+
|
132 |
+
`main\\library\\algorithm\\refinegan.py`: **Tệp tin thuật toán của bộ mã hóa âm thanh REFINEGAN**
|
133 |
+
|
134 |
+
`main\\library\\algorithm\\residuals.py`: **Tệp tin chứa các lớp thuật toán như ResBlock,...**
|
135 |
+
|
136 |
+
`main\\library\\algorithm\\separator.py`: **Tệp tin thuật toán tách nhạc chính của DEMUCS\MDX**
|
137 |
+
|
138 |
+
`main\\library\\algorithm\\synthesizers.py`: **Tệp tin thuật toán tổng hợp**
|
139 |
+
|
140 |
+
`main\\library\\architectures\\demucs_separator.py`: **Tệp tin cấu trúc của bộ tách nhạc Demucs**
|
141 |
+
|
142 |
+
`main\\library\\architectures\\mdx_separator.py`: **Tệp tin cấu trúc của bộ tách nhạc MDX**
|
143 |
+
|
144 |
+
`main\\library\\predictors\\CREPE.py`: **Tệp tin bộ trích xuất cao độ F0 CREPE và CREPE-TINY**
|
145 |
+
|
146 |
+
`main\\library\\predictors\\FCPE.py`: **Tệp tin bộ trích xuất cao độ F0 FCPE**
|
147 |
+
|
148 |
+
`main\\library\\predictors\\RMVPE.py`: **Tệp tin bộ trích xuất cao độ F0 RMVPE**
|
149 |
+
|
150 |
+
`main\\library\\predictors\\WORLD.py`: **Tệp tin bộ trích xuất cao độ F0 HARVEST VÀ DIO**
|
151 |
+
|
152 |
+
`main\\library\\uvr5_separator\\demucs\\apply.py`: **Tệp tin áp dụng dành riêng cho DEMUCS**
|
153 |
+
|
154 |
+
`main\\library\\uvr5_separator\\demucs\\demucs.py`: **Tệp tin thư viện tách nhạc cho mô hình DEMUCS**
|
155 |
+
|
156 |
+
`main\\library\\uvr5_separator\\demucs\\hdemucs.py`: **Tệp tin thư viện tách nhạc cho mô hình HDEMUCS**
|
157 |
+
|
158 |
+
`main\\library\\uvr5_separator\\demucs\\htdemucs.py`: **Tệp tin thư viện tách nhạc cho mô hình HTDEMUCS**
|
159 |
+
|
160 |
+
`main\\library\\uvr5_separator\\demucs\\states.py`: **Tệp tin trạng thái dành riêng cho DEMUCS**
|
161 |
+
|
162 |
+
`main\\library\\uvr5_separator\\demucs\\utils.py`: **Tệp tin tiện ích dành riêng cho DEMUCS**
|
163 |
+
|
164 |
+
`main\\library\\uvr5_separator\\common_separator.py`: **Tệp tin chức năng chung của hệ thống tách nhạc MDX và DEMUCS**
|
165 |
+
|
166 |
+
`main\\library\\uvr5_separator\\spec_utils.py`: **Tệp tin thông số kỷ thuật của hệ thống tách nhạc**
|
167 |
+
|
168 |
+
`main\\library\\utils.py`: **Tệp tin chứa các tiện ích như: xử lý, tải âm thanh, kiểm tra và tải xuống mô hình thiếu**
|
169 |
+
|
170 |
+
`main\\tools\\edge_tts.py`: **Tệp tin công cụ chuyển đổi văn bản thành giọng nói của EDGE**
|
171 |
+
|
172 |
+
`main\\tools\\gdown.py`: **Tệp tin tải xuống tệp tin từ google drive**
|
173 |
+
|
174 |
+
`main\\tools\\google_tts.py`: **Tệp tin công cụ chuyển đổi văn bản thành giọng nói của google**
|
175 |
+
|
176 |
+
`main\\tools\\huggingface.py`: **Tệp tin tải xuống tệp tin từ huggingface**
|
177 |
+
|
178 |
+
`main\\tools\\mediafire.py`: **Tệp tin tải xuống tệp từ mediafire**
|
179 |
+
|
180 |
+
`main\\tools\\meganz.py`: **Tệp tin tải xuống tệp từ MegaNZ**
|
181 |
+
|
182 |
+
`main\\tools\\noisereduce.py`: **Tệp tin công cụ giảm tiếng ồn âm thanh**
|
183 |
+
|
184 |
+
`main\\tools\\pixeldrain.py`: **Tệp tin tải xuống tệp từ pixeldrain**
|
185 |
+
|
186 |
+
# LƯU Ý
|
187 |
+
|
188 |
+
- **Hiện tại các bộ mã hóa mới như MRF HIFIGAN và REFINEGAN vẫn chưa đầy đủ các bộ huấn luyện trước**
|
189 |
+
- **Bộ mã hóa MRF HIFIGAN và REFINEGAN không hỗ trợ huấn luyện khi không không huấn luyện cao độ**
|
190 |
+
- **MRF HIFIGAN và REFINEGAN chưa có bất cứ huấn luyện trước nào**
|
191 |
+
|
192 |
+
# Điều khoản sử dụng
|
193 |
+
|
194 |
+
- Bạn phải đảm bảo rằng các nội dung âm thanh bạn tải lên và chuyển đổi qua dự án này không vi phạm quyền sở hữu trí tuệ của bên thứ ba.
|
195 |
+
|
196 |
+
- Không được phép sử dụng dự án này cho bất kỳ hoạt động nào bất hợp pháp, bao gồm nhưng không giới hạn ở việc sử dụng để lừa đảo, quấy rối, hay gây tổn hại đến người khác.
|
197 |
+
|
198 |
+
- Bạn chịu trách nhiệm hoàn toàn đối với bất kỳ thiệt hại nào phát sinh từ việc sử dụng sản phẩm không đúng cách.
|
199 |
+
|
200 |
+
- Tôi sẽ không chịu trách nhiệm với bất kỳ thiệt hại trực tiếp hoặc gián tiếp nào phát sinh từ việc sử dụng dự án này.
|
201 |
+
|
202 |
+
# Dự án này dựa trên một số dự án chính như
|
203 |
+
|
204 |
+
- **[Applio](https://github.com/IAHispano/Applio/tree/main)**
|
205 |
+
- **[Python-audio-separator](https://github.com/nomadkaraoke/python-audio-separator/tree/main)**
|
206 |
+
- **[Retrieval-based-Voice-Conversion-WebUI](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/tree/main)**
|
207 |
+
|
208 |
+
**và một số dự án khác**
|
209 |
+
|
210 |
+
- **[Torch-Onnx-Crepe](https://github.com/PhamHuynhAnh16/TORCH-ONNX-CREPE)**
|
211 |
+
- **[Local-attention](https://github.com/lucidrains/local-attention)**
|
212 |
+
- **[FcpeONNX](https://huggingface.co/deiteris/weights/tree/main)**
|
213 |
+
- **[ContentVec](https://github.com/auspicious3000/contentvec)**
|
214 |
+
- **[Mediafiredl](https://github.com/Gann4Life/mediafiredl)**
|
215 |
+
- **[Noisereduce](https://github.com/timsainb/noisereduce)**
|
216 |
+
- **[World.py](https://github.com/PhamHuynhAnh16/world.py)**
|
217 |
+
- **[Mega.py](https://github.com/odwyersoftware/mega.py)**
|
218 |
+
- **[Edge-TTS](https://github.com/rany2/edge-tts)**
|
219 |
+
- **[Gdown](https://github.com/wkentaro/gdown)**
|
220 |
+
|
221 |
+
# Kho mô hình của công cụ tìm kiếm mô hình
|
222 |
+
|
223 |
+
- **[VOICE-MODELS.COM](https://voice-models.com/)**
|
224 |
+
|
225 |
+
# Báo cáo lỗi
|
226 |
+
|
227 |
+
- **Với trường hợp gặp lỗi khi sử dụng mã nguồn này tôi thực sự xin lỗi bạn vì trải nghiệm không tốt này, bạn có thể gửi báo cáo lỗi thông qua cách phía dưới**
|
228 |
+
- **Bạn có thể báo cáo lỗi cho tôi thông qua hệ thống báo cáo lỗi webhook trong giao diện sử dụng**
|
229 |
+
- **Với trường hợp hệ thống báo cáo lỗi không hoạt động bạn có thể báo cáo lỗi cho tôi thông qua Discord `pham_huynh_anh` Hoặc [ISSUE](https://github.com/PhamHuynhAnh16/Vietnamese-RVC/issues)**
|
230 |
+
|
231 |
+
# ☎️ Liên hệ tôi
|
232 |
+
- Discord: **pham_huynh_anh**
|
assets/languages/en-US.json
ADDED
@@ -0,0 +1,788 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"set_lang": "Display language set to {lang}.",
|
3 |
+
"no_support_gpu": "Unfortunately, no compatible GPU is available to support your training.",
|
4 |
+
"text": "text",
|
5 |
+
"upload_success": "File {name} uploaded successfully.",
|
6 |
+
"download_url": "Download from the link",
|
7 |
+
"download_from_csv": "Download from the CSV model repository",
|
8 |
+
"search_models": "Search models",
|
9 |
+
"upload": "Upload",
|
10 |
+
"option_not_valid": "Invalid option!",
|
11 |
+
"list_model": "Model list",
|
12 |
+
"success": "Completed!",
|
13 |
+
"index": "index",
|
14 |
+
"model": "model",
|
15 |
+
"zip": "compress",
|
16 |
+
"search": "search",
|
17 |
+
"provide_file": "Please provide a valid {filename} file!",
|
18 |
+
"start": "Starting {start}...",
|
19 |
+
"not_found": "Not found {name}.",
|
20 |
+
"found": "Found {results} results!",
|
21 |
+
"download_music": "download music",
|
22 |
+
"download": "download",
|
23 |
+
"provide_url": "Please provide a url.",
|
24 |
+
"provide_name_is_save": "Please provide a model name to save.",
|
25 |
+
"not_support_url": "Your model url is not supported.",
|
26 |
+
"error_occurred": "An error occurred: {e}.",
|
27 |
+
"not_model": "The file you uploaded is not a model file!",
|
28 |
+
"unable_analyze_model": "Unable to analyze the model!",
|
29 |
+
"download_pretrain": "Downloading pre-trained model...",
|
30 |
+
"provide_pretrain": "Please provide a pre-trained model url {dg}.",
|
31 |
+
"provide_hubert": "Please provide a url to the embedding model.",
|
32 |
+
"sr_not_same": "The sample rates of the two models are not the same.",
|
33 |
+
"architectures_not_same": "Cannot merge models. The architectures are not the same.",
|
34 |
+
"fushion_model": "model fusion",
|
35 |
+
"model_fushion_info": "The model {name} is fused from {pth_1} and {pth_2} with a ratio of {ratio}.",
|
36 |
+
"not_found_create_time": "Creation time not found.",
|
37 |
+
"format_not_valid": "Invalid format.",
|
38 |
+
"read_info": "Models trained on different applications may produce different information or may not be readable!",
|
39 |
+
"epoch": "epoch.",
|
40 |
+
"step": "step",
|
41 |
+
"sr": "Sample rate",
|
42 |
+
"f0": "pitch training",
|
43 |
+
"version": "version.",
|
44 |
+
"not_f0": "Pitch training not performed",
|
45 |
+
"trained_f0": "Pitch training performed",
|
46 |
+
"model_info": "Model Name: {model_name}\n\n Model Creator: {model_author}\n\nEpoch: {epochs}\n\nSteps: {steps}\n\nVersion: {version}\n\nSample Rate: {sr}\n\nPitch Training: {pitch_guidance}\n\nHash (ID): {model_hash}\n\nCreation Time: {creation_date_str}\n",
|
47 |
+
"input_not_valid": "Please provide valid input!",
|
48 |
+
"output_not_valid": "Please provide valid output!",
|
49 |
+
"apply_effect": "apply effect",
|
50 |
+
"enter_the_text": "Please enter the text to speech!",
|
51 |
+
"choose_voice": "Please choose a voice!",
|
52 |
+
"convert": "Converting {name}...",
|
53 |
+
"separator_music": "music separation",
|
54 |
+
"notfound": "Not found",
|
55 |
+
"turn_on_use_audio": "Please enable using separated audio to proceed",
|
56 |
+
"turn_off_convert_backup": "Disable backup voice conversion to use the original voice",
|
57 |
+
"turn_off_merge_backup": "Disable merging backup voice to use the original voice",
|
58 |
+
"not_found_original_vocal": "Original vocal not found!",
|
59 |
+
"convert_vocal": "Converting voice...",
|
60 |
+
"convert_success": "Voice conversion completed!",
|
61 |
+
"convert_backup": "Converting backup voice...",
|
62 |
+
"convert_backup_success": "Backup voice conversion completed!",
|
63 |
+
"merge_backup": "Merging main voice with backup voice...",
|
64 |
+
"merge_success": "Merge completed.",
|
65 |
+
"is_folder": "Input is a folder: Converting all audio files in the folder...",
|
66 |
+
"not_found_in_folder": "No audio files found in the folder!",
|
67 |
+
"batch_convert": "Batch conversion in progress...",
|
68 |
+
"batch_convert_success": "Batch conversion successful!",
|
69 |
+
"create": "create",
|
70 |
+
"provide_name": "Please provide a model name.",
|
71 |
+
"not_found_data": "Data not found",
|
72 |
+
"not_found_data_preprocess": "Processed audio data not found, please reprocess.",
|
73 |
+
"not_found_data_extract": "Extracted audio data not found, please re-extract.",
|
74 |
+
"provide_pretrained": "Please provide pre-trained {dg}.",
|
75 |
+
"download_pretrained": "Download pre-trained {dg}{rvc_version} original",
|
76 |
+
"not_found_pretrain": "Pre-trained {dg} not found",
|
77 |
+
"not_use_pretrain": "No pre-trained model will be used",
|
78 |
+
"training": "training",
|
79 |
+
"display_title": "<h1> 🎵 Voice conversion and training interface created by Anh 🎵 <h1>",
|
80 |
+
"rick_roll": "Click here if you want to be Rick Roll :) ---> [RickRoll]({rickroll})",
|
81 |
+
"terms_of_use": "**Please do not use the project for any unethical, illegal, or harmful purposes to individuals or organizations...**",
|
82 |
+
"exemption": "**In cases where users do not comply with the terms or violate them, I will not be responsible for any claims, damages, or liabilities, whether in contract, negligence, or other causes arising from, outside of, or related to the software, its use, or other transactions associated with it.**",
|
83 |
+
"separator_tab": "Music Separation",
|
84 |
+
"4_part": "A simple music separation system can separate into 4 parts: Instruments, Vocals, Main vocals, Backup vocals",
|
85 |
+
"clear_audio": "Clean audio",
|
86 |
+
"separator_backing": "Separate backup vocals",
|
87 |
+
"denoise_mdx": "Denoise MDX separation",
|
88 |
+
"use_mdx": "Use MDX",
|
89 |
+
"dereveb_audio": "Remove vocal reverb",
|
90 |
+
"dereveb_backing": "Remove backup reverb",
|
91 |
+
"separator_model": "Music separation model",
|
92 |
+
"separator_backing_model": "Backup separation model",
|
93 |
+
"shift": "Shift",
|
94 |
+
"shift_info": "Higher is better quality but slower and uses more resources",
|
95 |
+
"segments_size": "Segments Size",
|
96 |
+
"segments_size_info": "Higher is better quality but uses more resources",
|
97 |
+
"batch_size": "Batch size",
|
98 |
+
"batch_size_info": "Number of samples processed simultaneously in one training cycle. Higher can cause memory overflow",
|
99 |
+
"mdx_batch_size_info": "Number of samples processed at a time. Batch processing optimizes calculations. Large batches can cause memory overflow; small batches reduce resource efficiency",
|
100 |
+
"overlap": "Overlap",
|
101 |
+
"overlap_info": "Overlap amount between prediction windows",
|
102 |
+
"export_format": "Export format",
|
103 |
+
"export_info": "The export format to export the audio file in",
|
104 |
+
"output_separator": "Separated output",
|
105 |
+
"hop_length_info": "Analyzing the time transfer window when performing transformations is allowed. The detailed value is compact but requires more calculation",
|
106 |
+
"drop_audio": "Drop audio here",
|
107 |
+
"drop_text": "Drop text file here",
|
108 |
+
"use_url": "YouTube link",
|
109 |
+
"url_audio": "Link audio",
|
110 |
+
"downloads": "Downloads",
|
111 |
+
"clean_strength": "Audio cleaning strength",
|
112 |
+
"clean_strength_info": "Strength of the audio cleaner for filtering vocals during export",
|
113 |
+
"input_output": "Audio input, output",
|
114 |
+
"audio_path": "Input audio path",
|
115 |
+
"refesh": "Refresh",
|
116 |
+
"output_folder": "Output audio folder path",
|
117 |
+
"output_folder_info": "Enter the folder path where the audio will be exported",
|
118 |
+
"input_audio": "Audio input",
|
119 |
+
"instruments": "Instruments",
|
120 |
+
"original_vocal": "Original vocal",
|
121 |
+
"main_vocal": "Main vocal",
|
122 |
+
"backing_vocal": "Backup vocal",
|
123 |
+
"convert_audio": "Convert Audio",
|
124 |
+
"convert_info": "Convert audio using a trained voice model",
|
125 |
+
"autotune": "Auto-tune",
|
126 |
+
"use_audio": "Use separated audio",
|
127 |
+
"convert_original": "Convert original voice",
|
128 |
+
"convert_backing": "Convert backup voice",
|
129 |
+
"not_merge_backing": "Do not merge backup voice",
|
130 |
+
"merge_instruments": "Merge instruments",
|
131 |
+
"pitch": "Pitch",
|
132 |
+
"pitch_info": "Recommendation: set to 12 to change male voice to female and vice versa",
|
133 |
+
"model_accordion": "Model and index",
|
134 |
+
"model_name": "Model file",
|
135 |
+
"index_path": "Index file",
|
136 |
+
"index_strength": "Index strength",
|
137 |
+
"index_strength_info": "Higher values increase strength. However, lower values may reduce artificial effects in the audio",
|
138 |
+
"output_path": "Audio output path",
|
139 |
+
"output_path_info": "Enter the output path (leave it as .wav format; it will auto-correct during conversion)",
|
140 |
+
"setting": "General settings",
|
141 |
+
"f0_method": "Extraction method",
|
142 |
+
"f0_method_info": "Method used for data extraction",
|
143 |
+
"f0_method_hybrid": "HYBRID extraction method",
|
144 |
+
"f0_method_hybrid_info": "Combination of two or more different types of extracts",
|
145 |
+
"hubert_model": "Embedding model",
|
146 |
+
"hubert_info": "Pre-trained model to assist embedding",
|
147 |
+
"modelname": "Model name",
|
148 |
+
"modelname_info": "If you have your own model, just upload it and input the name here",
|
149 |
+
"split_audio": "Split audio",
|
150 |
+
"autotune_rate": "Auto-tune rate",
|
151 |
+
"autotune_rate_info": "Level of auto-tuning adjustment",
|
152 |
+
"resample": "Resample",
|
153 |
+
"resample_info": "Resample post-processing to the final sample rate; 0 means no resampling, NOTE: SOME FORMATS DO NOT SUPPORT SPEEDS OVER 48000",
|
154 |
+
"filter_radius": "Filter radius",
|
155 |
+
"filter_radius_info": "If greater than three, median filtering is applied. The value represents the filter radius and can reduce breathiness or noise.",
|
156 |
+
"volume_envelope": "Volume envelope",
|
157 |
+
"volume_envelope_info": "Use the input volume envelope to replace or mix with the output volume envelope. The closer to 1, the more the output envelope is used",
|
158 |
+
"protect": "Consonant protection",
|
159 |
+
"protect_info": "Protect distinct consonants and breathing sounds to prevent audio tearing and other artifacts. Increasing this value provides comprehensive protection. Reducing it may reduce protection but also minimize indexing effects",
|
160 |
+
"output_convert": "Converted audio",
|
161 |
+
"main_convert": "Convert main voice",
|
162 |
+
"main_or_backing": "Main voice + Backup voice",
|
163 |
+
"voice_or_instruments": "Voice + Instruments",
|
164 |
+
"convert_text": "Convert Text",
|
165 |
+
"convert_text_markdown": "## Convert Text to Speech",
|
166 |
+
"convert_text_markdown_2": "Convert text to speech and read aloud using the trained voice model",
|
167 |
+
"input_txt": "Input data from a text file (.txt)",
|
168 |
+
"text_to_speech": "Text to read",
|
169 |
+
"voice_speed": "Reading speed",
|
170 |
+
"voice_speed_info": "Speed of the voice",
|
171 |
+
"tts_1": "1. Convert Text to Speech",
|
172 |
+
"tts_2": "2. Convert Speech",
|
173 |
+
"voice": "Voices by country",
|
174 |
+
"output_tts": "Output speech path",
|
175 |
+
"output_tts_convert": "Converted speech output path",
|
176 |
+
"tts_output": "Enter the output path",
|
177 |
+
"output_tts_markdown": "Unconverted and converted audio",
|
178 |
+
"output_text_to_speech": "Generated speech from text-to-speech conversion",
|
179 |
+
"output_file_tts_convert": "Speech converted using the model",
|
180 |
+
"output_audio": "Audio output",
|
181 |
+
"provide_output": "Enter the output path",
|
182 |
+
"audio_effects": "Audio Effects",
|
183 |
+
"apply_audio_effects": "## Add Additional Audio Effects",
|
184 |
+
"audio_effects_edit": "Add effects to audio",
|
185 |
+
"reverb": "Reverb effect",
|
186 |
+
"chorus": "Chorus effect",
|
187 |
+
"delay": "Delay effect",
|
188 |
+
"more_option": "Additional options",
|
189 |
+
"phaser": "Phaser effect",
|
190 |
+
"compressor": "Compressor effect",
|
191 |
+
"apply": "Apply",
|
192 |
+
"reverb_freeze": "Freeze mode",
|
193 |
+
"reverb_freeze_info": "Create a continuous echo effect when this mode is enabled",
|
194 |
+
"room_size": "Room size",
|
195 |
+
"room_size_info": "Adjust the room space to create reverberation",
|
196 |
+
"damping": "Damping",
|
197 |
+
"damping_info": "Adjust the level of absorption to control the amount of reverberation",
|
198 |
+
"wet_level": "Reverb signal level",
|
199 |
+
"wet_level_info": "Adjust the level of the reverb signal effect",
|
200 |
+
"dry_level": "Original signal level",
|
201 |
+
"dry_level_info": "Adjust the level of the signal without effects",
|
202 |
+
"width": "Audio width",
|
203 |
+
"width_info": "Adjust the width of the audio space",
|
204 |
+
"chorus_depth": "Chorus depth",
|
205 |
+
"chorus_depth_info": "Adjust the intensity of the chorus to create a wider sound",
|
206 |
+
"chorus_rate_hz": "Frequency",
|
207 |
+
"chorus_rate_hz_info": "Adjust the oscillation speed of the chorus effect",
|
208 |
+
"chorus_mix": "Mix signals",
|
209 |
+
"chorus_mix_info": "Adjust the mix level between the original and the processed signal",
|
210 |
+
"chorus_centre_delay_ms": "Center delay (ms)",
|
211 |
+
"chorus_centre_delay_ms_info": "The delay time between stereo channels to create the chorus effect",
|
212 |
+
"chorus_feedback": "Feedback",
|
213 |
+
"chorus_feedback_info": "Adjust the amount of the effect signal fed back into the original signal",
|
214 |
+
"delay_seconds": "Delay time",
|
215 |
+
"delay_seconds_info": "Adjust the delay time between the original and the processed signal",
|
216 |
+
"delay_feedback": "Delay feedback",
|
217 |
+
"delay_feedback_info": "Adjust the amount of feedback signal, creating a repeating effect",
|
218 |
+
"delay_mix": "Delay signal mix",
|
219 |
+
"delay_mix_info": "Adjust the mix level between the original and delayed signal",
|
220 |
+
"fade": "Fade effect",
|
221 |
+
"bass_or_treble": "Bass and treble",
|
222 |
+
"limiter": "Threshold limiter",
|
223 |
+
"distortion": "Distortion effect",
|
224 |
+
"gain": "Audio gain",
|
225 |
+
"bitcrush": "Bit reduction effect",
|
226 |
+
"clipping": "Clipping effect",
|
227 |
+
"fade_in": "Fade-in effect (ms)",
|
228 |
+
"fade_in_info": "Time for the audio to gradually increase from 0 to normal level",
|
229 |
+
"fade_out": "Fade-out effect (ms)",
|
230 |
+
"fade_out_info": "the time it takes for the sound to fade from normal to zero",
|
231 |
+
"bass_boost": "Bass boost level (dB)",
|
232 |
+
"bass_boost_info": "amount of bass boost in audio track",
|
233 |
+
"bass_frequency": "Low-pass filter cutoff frequency (Hz)",
|
234 |
+
"bass_frequency_info": "frequencies are reduced. Low frequencies make the bass clearer",
|
235 |
+
"treble_boost": "Treble boost level (dB)",
|
236 |
+
"treble_boost_info": "high level of sound reinforcement in the audio track",
|
237 |
+
"treble_frequency": "High-pass filter cutoff frequency (Hz)",
|
238 |
+
"treble_frequency_info": "The frequency will be filtered out. The higher the frequency, the higher the sound will be retained.",
|
239 |
+
"limiter_threashold_db": "Limiter threshold",
|
240 |
+
"limiter_threashold_db_info": "Limit the maximum audio level to prevent it from exceeding the threshold",
|
241 |
+
"limiter_release_ms": "Release time",
|
242 |
+
"limiter_release_ms_info": "Time for the audio to return after being limited (Mili Seconds)",
|
243 |
+
"distortion_info": "Adjust the level of distortion to create a noisy effect",
|
244 |
+
"gain_info": "Adjust the volume level of the signal",
|
245 |
+
"clipping_threashold_db": "Clipping threshold",
|
246 |
+
"clipping_threashold_db_info": "Trim signals exceeding the threshold, creating a distorted sound",
|
247 |
+
"bitcrush_bit_depth": "Bit depth",
|
248 |
+
"bitcrush_bit_depth_info": "Reduce audio quality by decreasing bit depth, creating a distorted effect",
|
249 |
+
"phaser_depth": "Phaser depth",
|
250 |
+
"phaser_depth_info": "Adjust the depth of the effect, impacting its intensity",
|
251 |
+
"phaser_rate_hz": "Frequency",
|
252 |
+
"phaser_rate_hz_info": "Adjust the frequency of the phaser effect",
|
253 |
+
"phaser_mix": "Mix signal",
|
254 |
+
"phaser_mix_info": "Adjust the mix level between the original and processed signals",
|
255 |
+
"phaser_centre_frequency_hz": "Center frequency",
|
256 |
+
"phaser_centre_frequency_hz_info": "The center frequency of the phaser effect, affecting the adjusted frequencies",
|
257 |
+
"phaser_feedback": "Feedback",
|
258 |
+
"phaser_feedback_info": "Adjust the feedback level of the effect, creating a stronger or lighter phaser feel",
|
259 |
+
"compressor_threashold_db": "Compressor threshold",
|
260 |
+
"compressor_threashold_db_info": "The threshold level above which the audio will be compressed",
|
261 |
+
"compressor_ratio": "Compression ratio",
|
262 |
+
"compressor_ratio_info": "Adjust the level of audio compression when exceeding the threshold",
|
263 |
+
"compressor_attack_ms": "Attack time (ms)",
|
264 |
+
"compressor_attack_ms_info": "Time for compression to start taking effect after the audio exceeds the threshold",
|
265 |
+
"compressor_release_ms": "Release time",
|
266 |
+
"compressor_release_ms_info": "Time for the audio to return to normal after being compressed",
|
267 |
+
"create_dataset_url": "Link to audio (use commas for multiple links)",
|
268 |
+
"createdataset": "Create dataset",
|
269 |
+
"create_dataset_markdown": "## Create Dataset training from YouTube",
|
270 |
+
"create_dataset_markdown_2": "Process and create training datasets using YouTube links",
|
271 |
+
"denoise": "Denoise",
|
272 |
+
"skip": "Skip",
|
273 |
+
"model_ver": "Voice separation version",
|
274 |
+
"model_ver_info": "The model version for separating vocals",
|
275 |
+
"create_dataset_info": "Dataset creation information",
|
276 |
+
"output_data": "Dataset output",
|
277 |
+
"output_data_info": "Output data after creation",
|
278 |
+
"skip_start": "Skip beginning",
|
279 |
+
"skip_start_info": "Skip the initial seconds of the audio; use commas for multiple audios",
|
280 |
+
"skip_end": "Skip end",
|
281 |
+
"skip_end_info": "Skip the final seconds of the audio; use commas for multiple audios",
|
282 |
+
"training_model": "Train Model",
|
283 |
+
"training_markdown": "Train and build a voice model with a set of voice data",
|
284 |
+
"training_model_name": "Name of the model during training (avoid special characters or spaces)",
|
285 |
+
"sample_rate": "Sample rate",
|
286 |
+
"sample_rate_info": "Sample rate of the model",
|
287 |
+
"training_version": "Model version",
|
288 |
+
"training_version_info": "Version of the model during training",
|
289 |
+
"training_pitch": "Pitch Guidance",
|
290 |
+
"training_pitch_info": "Train the pitch for the model",
|
291 |
+
"upload_dataset": "Upload dataset",
|
292 |
+
"preprocess_split": "Should be disabled if data has already been processed",
|
293 |
+
"preprocess_effect": "Post processing",
|
294 |
+
"preprocess_effect_info": "Should be disabled if data has already been processed",
|
295 |
+
"clear_dataset": "Clean dataset",
|
296 |
+
"preprocess_info": "Preprocessing information",
|
297 |
+
"preprocess_button": "1. Processing",
|
298 |
+
"extract_button": "2. Extract",
|
299 |
+
"extract_info": "Data extraction information",
|
300 |
+
"total_epoch": "Total epochs",
|
301 |
+
"total_epoch_info": "Total training epochs",
|
302 |
+
"save_epoch": "Save frequency",
|
303 |
+
"save_epoch_info": "Frequency of saving the model during training to allow retraining",
|
304 |
+
"create_index": "Create index",
|
305 |
+
"index_algorithm": "Index algorithm",
|
306 |
+
"index_algorithm_info": "Algorithm for creating the index",
|
307 |
+
"custom_dataset": "Custom dataset folder",
|
308 |
+
"custom_dataset_info": "Custom dataset folder for training data",
|
309 |
+
"overtraining_detector": "Overtraining detector",
|
310 |
+
"overtraining_detector_info": "Check for overtraining during model training",
|
311 |
+
"cleanup_training": "Clean Up",
|
312 |
+
"cleanup_training_info": "Only enable if you need to retrain the model from scratch.",
|
313 |
+
"cache_in_gpu": "Cache in GPU",
|
314 |
+
"cache_in_gpu_info": "Store the model in GPU cache memory",
|
315 |
+
"dataset_folder": "Folder containing dataset",
|
316 |
+
"threshold": "Overtraining threshold",
|
317 |
+
"setting_cpu_gpu": "CPU/GPU settings",
|
318 |
+
"gpu_number": "Number of GPUs used",
|
319 |
+
"gpu_number_info": "Number of GPUs used during training",
|
320 |
+
"save_only_latest": "Save only the latest",
|
321 |
+
"save_only_latest_info": "Save only the latest D and G models",
|
322 |
+
"save_every_weights": "Save all models",
|
323 |
+
"save_every_weights_info": "Save all models after each epoch",
|
324 |
+
"gpu_info": "GPU information",
|
325 |
+
"gpu_info_2": "Information about the GPU used during training",
|
326 |
+
"cpu_core": "Number of CPU cores available",
|
327 |
+
"cpu_core_info": "Number of CPU cores used during training",
|
328 |
+
"not_use_pretrain_2": "Do not use pretraining",
|
329 |
+
"not_use_pretrain_info": "Do not use pre-trained models",
|
330 |
+
"custom_pretrain": "Custom pretraining",
|
331 |
+
"custom_pretrain_info": "Customize pre-training settings",
|
332 |
+
"pretrain_file": "Pre-trained model file {dg}",
|
333 |
+
"train_info": "Training information",
|
334 |
+
"export_model": "5. Export Model",
|
335 |
+
"zip_model": "2. Compress model",
|
336 |
+
"output_zip": "Output file after compression",
|
337 |
+
"model_path": "Model path",
|
338 |
+
"model_ratio": "Model ratio",
|
339 |
+
"model_ratio_info": "Adjusting towards one side will make the model more like that side",
|
340 |
+
"output_model_path": "Model output path",
|
341 |
+
"fushion": "Model Fusion",
|
342 |
+
"fushion_markdown": "## Fushion Two Models",
|
343 |
+
"fushion_markdown_2": "Combine two voice models into a single model",
|
344 |
+
"read_model": "Read Information",
|
345 |
+
"read_model_markdown": "## Read Model Information",
|
346 |
+
"read_model_markdown_2": "Retrieve recorded information within the model",
|
347 |
+
"drop_model": "Drop model here",
|
348 |
+
"readmodel": "Read model",
|
349 |
+
"model_path_info": "Enter the path to the model file",
|
350 |
+
"modelinfo": "Model Information",
|
351 |
+
"download_markdown": "## Download Model",
|
352 |
+
"download_markdown_2": "Download voice models, pre-trained models, and embedding models",
|
353 |
+
"model_download": "Download voice model",
|
354 |
+
"model_url": "Link to the model",
|
355 |
+
"15s": "Please wait about 15 seconds. The system will restart automatically!",
|
356 |
+
"model_download_select": "Choose a model download method",
|
357 |
+
"model_warehouse": "Model repository",
|
358 |
+
"get_model": "Retrieve model",
|
359 |
+
"name_to_search": "Name to search",
|
360 |
+
"search_2": "Search",
|
361 |
+
"select_download_model": "Choose a searched model (Click to select)",
|
362 |
+
"download_pretrained_2": "Download pre-trained model",
|
363 |
+
"only_huggingface": "Supports only huggingface.co",
|
364 |
+
"pretrained_url": "Pre-trained model link {dg}",
|
365 |
+
"select_pretrain": "Choose pre-trained model",
|
366 |
+
"select_pretrain_info": "Choose a pre-trained model to download",
|
367 |
+
"pretrain_sr": "Model sample rate",
|
368 |
+
"drop_pretrain": "Drop pre-trained model {dg} here",
|
369 |
+
"hubert_download": "Download embedding model",
|
370 |
+
"hubert_url": "Link to embedding model",
|
371 |
+
"drop_hubert": "Drop embedding model here",
|
372 |
+
"settings": "Settings",
|
373 |
+
"settings_markdown": "## Additional Settings",
|
374 |
+
"settings_markdown_2": "Customize additional features of the project",
|
375 |
+
"lang": "Language",
|
376 |
+
"lang_restart": "The display language in the project (When changing the language, the system will automatically restart after 15 seconds to update)",
|
377 |
+
"change_lang": "Change Language",
|
378 |
+
"theme": "Theme",
|
379 |
+
"theme_restart": "Theme type displayed in the interface (When changing the theme, the system will automatically restart after 15 seconds to update)",
|
380 |
+
"theme_button": "Change Theme",
|
381 |
+
"change_light_dark": "Switch Light/Dark Mode",
|
382 |
+
"tensorboard_url": "Tensorboard URL",
|
383 |
+
"errors_loading_audio": "Error loading audio: {e}",
|
384 |
+
"apply_error": "An error occurred while applying effects: {e}",
|
385 |
+
"indexpath": "Index path",
|
386 |
+
"skip_file": "Part {i} skipped because it is too short: {chunk}ms",
|
387 |
+
"split_total": "Total parts split",
|
388 |
+
"process_audio_error": "An error occurred while processing the audio",
|
389 |
+
"merge_error": "An error occurred while merging audio",
|
390 |
+
"not_found_convert_file": "Processed file not found",
|
391 |
+
"convert_batch": "Batch conversion...",
|
392 |
+
"found_audio": "Found {audio_files} audio files for conversion.",
|
393 |
+
"not_found_audio": "No audio files found!",
|
394 |
+
"error_convert": "An error occurred during audio conversion: {e}",
|
395 |
+
"error_convert_batch": "An error occurred during the conversion of audio segments: {e}",
|
396 |
+
"error_convert_batch_2": "An error occurred during batch audio conversion: {e}",
|
397 |
+
"convert_batch_success": "Batch conversion completed successfully in {elapsed_time} seconds. {output_path}",
|
398 |
+
"convert_audio_success": "File {input_path} converted successfully in {elapsed_time} seconds. {output_path}",
|
399 |
+
"hybrid_methods": "Estimating f0 pitch using methods {methods}",
|
400 |
+
"method_not_valid": "Invalid method",
|
401 |
+
"read_faiss_index_error": "An error occurred while reading the FAISS index: {e}",
|
402 |
+
"read_model_error": "Failed to load model: {e}",
|
403 |
+
"starting_download": "Starting download",
|
404 |
+
"version_not_valid": "Invalid vocal separation version",
|
405 |
+
"skip<audio": "Cannot skip as skip time is less than audio file length",
|
406 |
+
"skip>audio": "Cannot skip as skip time is greater than audio file length",
|
407 |
+
"=<0": "Skip time is less than or equal to 0 and has been skipped",
|
408 |
+
"skip_warning": "Skip duration ({seconds} seconds) exceeds audio length ({total_duration} seconds). Skipping.",
|
409 |
+
"download_success": "Download completed successfully",
|
410 |
+
"create_dataset_error": "An error occurred while creating the training dataset",
|
411 |
+
"create_dataset_success": "Training dataset creation completed in {elapsed_time} seconds",
|
412 |
+
"skip_start_audio": "Successfully skipped start of audio: {input_file}",
|
413 |
+
"skip_end_audio": "Successfully skipped end of audio: {input_file}",
|
414 |
+
"merge_audio": "Merged all parts containing audio",
|
415 |
+
"separator_process": "Separating vocals: {input}...",
|
416 |
+
"not_found_main_vocal": "Main vocal not found!",
|
417 |
+
"not_found_backing_vocal": "Backup vocal not found!",
|
418 |
+
"not_found_instruments": "Instruments not found",
|
419 |
+
"merge_instruments_process": "Merging vocals with instruments...",
|
420 |
+
"dereverb": "Removing vocal reverb",
|
421 |
+
"dereverb_success": "Successfully removed vocal reverb",
|
422 |
+
"save_index": "Index file saved",
|
423 |
+
"create_index_error": "An error occurred while creating the index",
|
424 |
+
"sr_not_16000": "Sample rate must be 16000",
|
425 |
+
"gpu_not_valid": "Invalid GPU index. Switching to CPU.",
|
426 |
+
"extract_file_error": "An error occurred while extracting the file",
|
427 |
+
"extract_f0_method": "Starting pitch extraction using {num_processes} cores with method {f0_method}...",
|
428 |
+
"extract_f0": "Pitch Extraction",
|
429 |
+
"extract_f0_success": "Pitch extraction completed in {elapsed_time} seconds.",
|
430 |
+
"NaN": "contains NaN values and will be ignored.",
|
431 |
+
"start_extract_hubert": "Starting Embedding extraction...",
|
432 |
+
"not_found_audio_file": "Audio file not found. Please ensure you provided the correct audio.",
|
433 |
+
"extract_hubert": "Extract Embeddings",
|
434 |
+
"process_error": "An error occurred during processing",
|
435 |
+
"extract_hubert_success": "Embedding extraction completed in {elapsed_time} seconds.",
|
436 |
+
"export_process": "Model path",
|
437 |
+
"extract_error": "An error occurred during data extraction",
|
438 |
+
"extract_success": "Data extraction successful",
|
439 |
+
"min_length>=min_interval>=hop_size": "min_length must be greater than or equal to min_interval and hop_size",
|
440 |
+
"max_sil_kept>=hop_size": "max_sil_kept must be greater than or equal to hop_size",
|
441 |
+
"start_preprocess": "Starting data preprocessing with {num_processes} cores...",
|
442 |
+
"not_integer": "Voice ID folder must be an integer; instead got",
|
443 |
+
"preprocess": "Preprocessing",
|
444 |
+
"preprocess_success": "Preprocessing completed in {elapsed_time} seconds.",
|
445 |
+
"preprocess_model_success": "Preprocessing data for the model completed successfully",
|
446 |
+
"turn_on_dereverb": "Reverb removal for backup vocals requires enabling reverb removal",
|
447 |
+
"turn_on_separator_backing": "Backup vocal separation requires enabling vocal separation",
|
448 |
+
"backing_model_ver": "Backup vocal separation model version",
|
449 |
+
"clean_audio_success": "Audio cleaned successfully!",
|
450 |
+
"separator_error": "An error occurred during music separation",
|
451 |
+
"separator_success": "Music separation completed in {elapsed_time} seconds",
|
452 |
+
"separator_process_2": "Processing music separation",
|
453 |
+
"separator_success_2": "Music separation successful!",
|
454 |
+
"separator_process_backing": "Processing backup vocal separation",
|
455 |
+
"separator_process_backing_success": "Backup vocal separation successful!",
|
456 |
+
"process_original": "Processing original vocal reverb removal...",
|
457 |
+
"process_original_success": "Original vocal reverb removal successful!",
|
458 |
+
"process_main": "Processing main vocal reverb removal...",
|
459 |
+
"process_main_success": "Main vocal reverb removal successful!",
|
460 |
+
"process_backing": "Processing backup vocal reverb removal...",
|
461 |
+
"process_backing_success": "Backup vocal reverb removal successful!",
|
462 |
+
"save_every_epoch": "Save model after: ",
|
463 |
+
"total_e": "Total epochs: ",
|
464 |
+
"dorg": "Pre-trained G: {pretrainG} | Pre-trained D: {pretrainD}",
|
465 |
+
"training_f0": "Pitch Guidance",
|
466 |
+
"not_gpu": "No GPU detected, reverting to CPU (not recommended)",
|
467 |
+
"not_found_checkpoint": "Checkpoint file not found: {checkpoint_path}",
|
468 |
+
"save_checkpoint": "Reloaded checkpoint '{checkpoint_path}' (epoch {checkpoint_dict})",
|
469 |
+
"save_model": "Saved model '{checkpoint_path}' (epoch {iteration})",
|
470 |
+
"sr_does_not_match": "{sample_rate} Sample rate does not match target {sample_rate2} Sample rate",
|
471 |
+
"spec_error": "An error occurred while retrieving specifications from {spec_filename}: {e}",
|
472 |
+
"time_or_speed_training": "time={current_time} | training speed={elapsed_time_str}",
|
473 |
+
"savemodel": "Saved model '{model_dir}' (epoch {epoch} and step {step})",
|
474 |
+
"model_author": "Credit model to {model_author}",
|
475 |
+
"unregistered": "Model unregistered",
|
476 |
+
"not_author": "Model not credited",
|
477 |
+
"training_author": "Model creator name",
|
478 |
+
"training_author_info": "To credit the model, enter your name here",
|
479 |
+
"extract_model_error": "An error occurred while extracting the model",
|
480 |
+
"start_training": "Starting training",
|
481 |
+
"import_pretrain": "Loaded pre-trained model ({dg}) '{pretrain}'",
|
482 |
+
"not_using_pretrain": "No pre-trained model ({dg}) will be used",
|
483 |
+
"training_warning": "WARNING: Generated loss is lower than the lower threshold loss for the next epoch.",
|
484 |
+
"overtraining_find": "Overtraining detected at epoch {epoch} with smoothed generator loss {smoothed_value_gen} and smoothed discriminator loss {smoothed_value_disc}",
|
485 |
+
"best_epoch": "New best epoch {epoch} with smoothed generator loss {smoothed_value_gen} and smoothed discriminator loss {smoothed_value_disc}",
|
486 |
+
"success_training": "Training completed with {epoch} epochs, {global_step} steps, and {loss_gen_all} total generator loss.",
|
487 |
+
"training_info": "Lowest generator loss: {lowest_value_rounded} at epoch {lowest_value_epoch}, step {lowest_value_step}",
|
488 |
+
"model_training_info": "{model_name} | epoch={epoch} | step={global_step} | {epoch_recorder} | lowest value={lowest_value_rounded} (epoch {lowest_value_epoch} and step {lowest_value_step}) | remaining epochs for overtraining: g/total: {remaining_epochs_gen} d/total: {remaining_epochs_disc} | smoothed generator loss={smoothed_value_gen} | smoothed discriminator loss={smoothed_value_disc}",
|
489 |
+
"model_training_info_2": "{model_name} | epoch={epoch} | step={global_step} | {epoch_recorder} | lowest value={lowest_value_rounded} (epoch {lowest_value_epoch} and step {lowest_value_step})",
|
490 |
+
"model_training_info_3": "{model_name} | epoch={epoch} | step={global_step} | {epoch_recorder}",
|
491 |
+
"training_error": "An error occurred while training the model:",
|
492 |
+
"separator_info": "Initializing with output path: {output_dir}, output format: {output_format}",
|
493 |
+
"output_dir_is_none": "Output folder not specified. Using current working directory.",
|
494 |
+
">0or=1": "Normalization threshold must be greater than 0 and less than or equal to 1.",
|
495 |
+
"output_single": "Single root output requested; only one file ({output_single_stem}) will be written",
|
496 |
+
"step2": "The second step will be reversed using spectrogram instead of waveform. This may improve quality but is slightly slower.",
|
497 |
+
"name_ver": "Version {name}",
|
498 |
+
"os": "Operating System",
|
499 |
+
"platform_info": "System: {system_info} Name: {node} Release: {release} Machine: {machine} Processor: {processor}",
|
500 |
+
"none_ffmpeg": "FFmpeg is not installed. Please install FFmpeg to use this package.",
|
501 |
+
"install_onnx": "ONNX Runtime package {pu} installed with version",
|
502 |
+
"running_in_cpu": "Unable to configure hardware acceleration, running in CPU mode",
|
503 |
+
"running_in_cuda": "CUDA available in Torch, setting Torch device to CUDA",
|
504 |
+
"onnx_have": "ONNXruntime available {have}, enabling acceleration",
|
505 |
+
"onnx_not_have": "{have} not available in ONNXruntime; acceleration will NOT be enabled",
|
506 |
+
"python_not_install": "Python package: {package_name} is not installed",
|
507 |
+
"hash": "Calculating hash for model file {model_path}",
|
508 |
+
"ioerror": "IOError while seeking -10 MB or reading model file to compute hash: {e}",
|
509 |
+
"cancel_download": "File already exists at {output_path}, skipping download",
|
510 |
+
"download_model": "Downloading file from {url} to {output_path} with a timeout of 300 seconds",
|
511 |
+
"download_error": "Failed to download file from {url}, response code: {status_code}",
|
512 |
+
"vip_model": "Model: '{model_friendly_name}' is a premium model intended by Anjok07 only for paid subscriber access.",
|
513 |
+
"vip_print": "Hey there, if you haven't subscribed, please consider supporting UVR's developer, Anjok07, by subscribing here: https://patreon.com/uvr",
|
514 |
+
"search_model": "Searching for model {model_filename} in the list of supported models in the group",
|
515 |
+
"load_download_json": "Downloaded model list loaded",
|
516 |
+
"single_model": "Identified single model file: {model_friendly_name}",
|
517 |
+
"not_found_model": "Model not found in the UVR repository, attempting to download from the audio model separation repository...",
|
518 |
+
"single_model_path": "Returning path for single model file: {model_path}",
|
519 |
+
"find_model": "Input file name {model_filename} found in multi-file model: {model_friendly_name}",
|
520 |
+
"find_models": "Identified multi-file model: {model_friendly_name}, iterating through files to download",
|
521 |
+
"find_path": "Attempting to determine download PATH for config pair",
|
522 |
+
"not_found_model_warehouse": "Model not found in the UVR repository, attempting to download from the audio model separation repository...",
|
523 |
+
"yaml_warning": "The model name you specified, {model_filename}, is actually a model config file rather than a model file.",
|
524 |
+
"yaml_warning_2": "We found a model matching this config file: {config_key}, so we'll use that model file for this run.",
|
525 |
+
"yaml_warning_3": "To avoid confusing/inconsistent behavior in the future, specify the actual model file name instead.",
|
526 |
+
"yaml_debug": "Config YAML model file not found in UVR repository, attempting to download from the audio model separation repository...",
|
527 |
+
"download_model_friendly": "All files downloaded for model {model_friendly_name}, returning original path {model_path}",
|
528 |
+
"not_found_model_2": "Model file {model_filename} not found in the supported files",
|
529 |
+
"load_yaml": "Loading model data from YAML at path {model_data_yaml_filepath}",
|
530 |
+
"load_yaml_2": "Model data loaded from YAML file: {model_data}",
|
531 |
+
"hash_md5": "Computing MD5 hash for model file to identify model parameters from UVR data...",
|
532 |
+
"model_hash": "Model {model_path} has hash {model_hash}",
|
533 |
+
"mdx_data": "MDX model data path set to {mdx_model_data_path}",
|
534 |
+
"load_mdx": "Loading MDX model parameters from UVR model data file...",
|
535 |
+
"model_not_support": "Unsupported model file: no parameters found for MD5 hash {model_hash} in UVR model data for MDX vault.",
|
536 |
+
"uvr_json": "Model data loaded from UVR JSON with hash {model_hash}: {model_data}",
|
537 |
+
"loading_model": "Loading model {model_filename}...",
|
538 |
+
"download_model_friendly_2": "Downloaded model, friendly name: {model_friendly_name}, Model path: {model_path}",
|
539 |
+
"model_type_not_support": "Unsupported model type: {model_type}",
|
540 |
+
"demucs_not_support_python<3.10": "Demucs models require Python version 3.10 or higher.",
|
541 |
+
"import_module": "Importing module for model type",
|
542 |
+
"initialization": "Initializing separator class for model type",
|
543 |
+
"loading_model_success": "Model loading completed.",
|
544 |
+
"loading_model_duration": "Model loading duration",
|
545 |
+
"starting_separator": "Starting separation process for audio file path",
|
546 |
+
"normalization": "Normalization threshold set to {normalization_threshold}, waveform will be scaled down to this maximum amplitude to prevent clipping.",
|
547 |
+
"loading_separator_model": "Downloading model {model_filename}...",
|
548 |
+
"separator_success_3": "Separation process completed.",
|
549 |
+
"separator_duration": "Separation duration",
|
550 |
+
"downloading_model": "Downloaded model, type: {model_type}, friendly name: {model_friendly_name}, Model path: {model_path}, Model data: {model_data_dict_size} items",
|
551 |
+
"demucs_info": "Demucs parameters: Segment size = {segment_size}, Segment size active = {segments_enabled}",
|
552 |
+
"demucs_info_2": "Demucs parameters: Number of predictions = {shifts}, Overlap = {overlap}",
|
553 |
+
"start_demucs": "Demucs Separator initialization completed",
|
554 |
+
"start_separator": "Starting separation process...",
|
555 |
+
"prepare_mix": "Preparing mixture...",
|
556 |
+
"demix": "Mixture prepared for demixing. Shape: {shape}",
|
557 |
+
"cancel_mix": "Loading model for demixing...",
|
558 |
+
"model_review": "Model loaded and set to evaluation mode.",
|
559 |
+
"del_gpu_cache_after_demix": "Cleared model and GPU cache after demixing.",
|
560 |
+
"process_output_file": "Processing output file...",
|
561 |
+
"source_length": "Processing source array, source length is {source_length}",
|
562 |
+
"process_ver": "Processing source version...",
|
563 |
+
"set_map": "Set source map to {part} parts...",
|
564 |
+
"process_all_part": "Processing for all root parts...",
|
565 |
+
"skip_part": "Skipping root part {stem_name} as out_single_stem is set to {output_single_stem}...",
|
566 |
+
"starting_demix_demucs": "Starting the demix process in demix_demucs...",
|
567 |
+
"model_infer": "Running model inference...",
|
568 |
+
"name_not_pretrained": "{name} is not a pre-trained model or a model bundle.",
|
569 |
+
"invalid_checksum": "Invalid checksum for file {path}, expected {checksum} but got {actual_checksum}",
|
570 |
+
"mdx_info": "MDX parameters: Batch size = {batch_size}, Segment size = {segment_size}",
|
571 |
+
"mdx_info_2": "MDX parameters: Overlap = {overlap}, Hop_length = {hop_length}, Denoising enabled = {enable_denoise}",
|
572 |
+
"mdx_info_3": "MDX parameters",
|
573 |
+
"load_model_onnx": "Loading ONNX model for inference...",
|
574 |
+
"load_model_onnx_success": "Successfully loaded model using ONNXruntime inference session.",
|
575 |
+
"onnx_to_pytorch": "Model converted from ONNX to PyTorch due to mismatched segment size with dim_t, processing may be slower.",
|
576 |
+
"stft": "Inverse STFT applied. Returning result with shape",
|
577 |
+
"no_denoise": "Model running on spectrum without denoising.",
|
578 |
+
"mix": "Preparing mix for input audio file {audio_file_path}...",
|
579 |
+
"normalization_demix": "Normalizing mix prior to demixing...",
|
580 |
+
"mix_success": "Mix preparation completed.",
|
581 |
+
"primary_source": "Normalizing primary source...",
|
582 |
+
"secondary_source": "Producing secondary source: Mixing in compatible mode",
|
583 |
+
"invert_using_spec": "Inverting secondary stem using spectrum when invert_USE_spec is set to True",
|
584 |
+
"invert_using_spec_2": "Inverting secondary stem by subtracting transformed stem from the initial transformed mix",
|
585 |
+
"enable_denoise": "Model running on both positive and negative spectrums for denoising.",
|
586 |
+
"is_match_mix": "is_match_mix: Predicted spectrum obtained directly from STFT output.",
|
587 |
+
"save_secondary_stem_output_path": "Saving secondary stem {stem_name} to {stem_output_path}...",
|
588 |
+
"starting_model": "Initializing model settings...",
|
589 |
+
"input_info": "Model input parameters",
|
590 |
+
"model_settings": "Model settings",
|
591 |
+
"initialize_mix": "Initializing mix with is_ckpt = {is_ckpt}. Initial mix shape: {shape}",
|
592 |
+
"!=2": "Expected 2-channel audio signal but got {shape} channels",
|
593 |
+
"process_check": "Processing in checkpoint mode...",
|
594 |
+
"stft_2": "STFT applied to mix. Spectrum shape: {shape}",
|
595 |
+
"cache": "Computed padding",
|
596 |
+
"shape": "Mix shape after padding: {shape}, Number of parts: {num_chunks}",
|
597 |
+
"process_no_check": "Processing in no-checkpoint mode...",
|
598 |
+
"n_sample_or_pad": "Number of samples: {n_sample}, Computed padding: {pad}",
|
599 |
+
"shape_2": "Mix shape after padding",
|
600 |
+
"process_part": "Processed part {mix_waves}: Start {i}, End {ii}",
|
601 |
+
"mix_waves_to_tensor": "Converted mix_waves to tensor. Tensor shape: {shape}",
|
602 |
+
"mix_match": "Mix mode Match; applying compensation factor.",
|
603 |
+
"tar_waves": "tar_waves. Shape",
|
604 |
+
"normalization_2": "Normalizing result by dividing it by divisor.",
|
605 |
+
"mix_wave": "Processing mix_wave batch",
|
606 |
+
"mix_or_batch": "Mix parts into batches. Number of batches",
|
607 |
+
"demix_is_match_mix": "Starting demix process with is_match_mix,",
|
608 |
+
"mix_shape": "Root mix parts stored. Shape",
|
609 |
+
"chunk_size_or_overlap": "Chunk size for compatible mixing: {chunk_size}, Overlap: {overlap}",
|
610 |
+
"chunk_size_or_overlap_standard": "Standard chunk size: {chunk_size}, Overlap: {overlap}",
|
611 |
+
"calc_size": "Generated size calculated",
|
612 |
+
"window": "Window applied to this segment.",
|
613 |
+
"process_part_2": "Processing segment {total}/{total_chunks}: Start {start}, End {end}",
|
614 |
+
"all_process_part": "Total segments to process",
|
615 |
+
"step_or_overlap": "Step size to process parts: {step} with overlap set to {overlap}.",
|
616 |
+
"mix_cache": "Mix prepared with padding. Mix shape",
|
617 |
+
"dims": "Cannot use sin/cos position encoding with odd dimensions (dim={dims})",
|
618 |
+
"activation": "activation must be relu/gelu, not {activation}",
|
619 |
+
"length_or_training_length": "Provided length {length} exceeds training duration {training_length}",
|
620 |
+
"type_not_valid": "Invalid type for",
|
621 |
+
"del_parameter": "Removing non-existent parameter ",
|
622 |
+
"info": "Common parameters: Model name = {model_name}, Model path = {model_path}",
|
623 |
+
"info_2": "Common parameters: Output path = {output_dir}, Output format = {output_format}",
|
624 |
+
"info_3": "Common parameters: Normalization threshold = {normalization_threshold}",
|
625 |
+
"info_4": "Common parameters: Denoising enabled = {enable_denoise}, Single stem output = {output_single_stem}",
|
626 |
+
"info_5": "Common parameters: Inversion using specs = {invert_using_spec}, Sample rate = {sample_rate}",
|
627 |
+
"info_6": "Common parameters: Primary root name = {primary_stem_name}, Secondary root name = {secondary_stem_name}",
|
628 |
+
"info_7": "Common parameters: Karaoke mode = {is_karaoke}, BV model = {is_bv_model}, BV model rebalancing = {bv_model_rebalance}",
|
629 |
+
"success_process": "Completed processing root {stem_name} and writing audio...",
|
630 |
+
"load_audio": "Loading audio from file",
|
631 |
+
"load_audio_success": "Audio loaded. Sample rate: {sr}, Audio shape: {shape}",
|
632 |
+
"convert_mix": "Converting provided mix array.",
|
633 |
+
"convert_shape": "Converted mix shape: {shape}",
|
634 |
+
"audio_not_valid": "Audio file {audio_path} is empty or invalid",
|
635 |
+
"audio_valid": "Audio file is valid and contains data.",
|
636 |
+
"mix_single": "Mix is mono. Converting to stereo.",
|
637 |
+
"convert_mix_audio": "Converted to stereo mix.",
|
638 |
+
"mix_success_2": "Mix preparation completed.",
|
639 |
+
"duration": "Audio duration is {duration_hours} hours ({duration_seconds} seconds).",
|
640 |
+
"write": "Using {name} to write.",
|
641 |
+
"write_audio": "Writing {name} with root path:",
|
642 |
+
"original_not_valid": "Warning: Original source array is nearly silent or empty.",
|
643 |
+
"shape_audio": "Audio data shape before processing",
|
644 |
+
"convert_data": "Data type before conversion",
|
645 |
+
"original_source_to_int16": "Converted original_source to int16.",
|
646 |
+
"shape_audio_2": "Interleaved audio data shape",
|
647 |
+
"create_audiosegment": "Successfully created AudioSegment.",
|
648 |
+
"create_audiosegment_error": "Specific error while creating AudioSegment",
|
649 |
+
"export_error": "Error exporting audio file",
|
650 |
+
"export_success": "Successfully exported audio file to",
|
651 |
+
"clean": "Running garbage collection...",
|
652 |
+
"clean_cache": "Clearing {name} cache...",
|
653 |
+
"del_path": "Deleting path, source, and root of input audio file...",
|
654 |
+
"not_success": "Process was not successful: ",
|
655 |
+
"resample_error": "Error during resampling",
|
656 |
+
"shapes": "Shapes",
|
657 |
+
"wav_resolution": "Resolution type",
|
658 |
+
"warnings": "Warning: Extremely aggressive values detected",
|
659 |
+
"warnings_2": "Warning: NaN or infinite values detected in wave input. Shape",
|
660 |
+
"process_file": "Processing file... \n",
|
661 |
+
"save_instruments": "Saving reverse track...",
|
662 |
+
"assert": "Audio files must have the same shape - Mix: {mixshape}, Inst: {instrumentalshape}",
|
663 |
+
"rubberband": "Rubberband CLI cannot be executed. Please ensure Rubberband-CLI is installed.",
|
664 |
+
"rate": "Rate must be strictly positive",
|
665 |
+
"gdown_error": "Could not retrieve the public link for the file. You may need to change its permissions to 'Anyone with the link' or there may already be excessive access permissions.",
|
666 |
+
"to": "To:",
|
667 |
+
"gdown_value_error": "A path or ID must be specified",
|
668 |
+
"missing_url": "URL is missing",
|
669 |
+
"mac_not_match": "MAC does not match",
|
670 |
+
"file_not_access": "File is not accessible",
|
671 |
+
"int_resp==-3": "Request failed, retrying",
|
672 |
+
"search_separate": "Search for separate files...",
|
673 |
+
"found_choice": "Found {choice}",
|
674 |
+
"separator==0": "No separate files found!",
|
675 |
+
"select_separate": "Select separate files",
|
676 |
+
"start_app": "Starting interface...",
|
677 |
+
"provide_audio": "Enter the path to the audio file",
|
678 |
+
"set_torch_mps": "Set Torch device to MPS",
|
679 |
+
"googletts": "Convert text using Google",
|
680 |
+
"pitch_info_2": "Pitch adjustment for text-to-speech converter",
|
681 |
+
"waveform": "Waveform must have the shape (# frames, # channels)",
|
682 |
+
"freq_mask_smooth_hz": "freq_mask_smooth_hz must be at least {hz}Hz",
|
683 |
+
"time_mask_smooth_ms": "time_mask_smooth_ms must be at least {ms}ms",
|
684 |
+
"x": "x must be greater",
|
685 |
+
"xn": "xn must be greater",
|
686 |
+
"not_found_pid": "No processes found!",
|
687 |
+
"end_pid": "Process terminated!",
|
688 |
+
"clean_audios": "Starting audio cleanup...",
|
689 |
+
"clean_audios_success": "Audio file cleanup complete!",
|
690 |
+
"clean_separate": "Starting cleanup of separation model...",
|
691 |
+
"clean_separate_success": "Separation model cleanup complete!",
|
692 |
+
"clean_model": "Starting model cleanup...",
|
693 |
+
"clean_model_success": "Model file cleanup complete!",
|
694 |
+
"clean_index": "Starting index cleanup...",
|
695 |
+
"clean_index_success": "Index file cleanup complete!",
|
696 |
+
"clean_pretrain": "Starting pretrained model cleanup...",
|
697 |
+
"clean_pretrain_success": "Pretrained model cleanup complete!",
|
698 |
+
"clean_all_audios": "Starting cleanup of all audio files...",
|
699 |
+
"clean_all_audios_success": "All audio file cleanup complete!",
|
700 |
+
"not_found_separate_model": "No separation model files found!",
|
701 |
+
"clean_all_separate_model": "Starting cleanup of all separation model files...",
|
702 |
+
"clean_all_separate_model_success": "All separation model files cleanup complete!",
|
703 |
+
"clean_all_models_success": "All model files cleanup complete",
|
704 |
+
"not_found_pretrained": "No pretrained model files found!",
|
705 |
+
"clean_all_pretrained": "Starting cleanup of all pretrained model files...",
|
706 |
+
"clean_all_pretrained_success": "All pretrained model cleanup complete!",
|
707 |
+
"not_found_log": "No log files found!",
|
708 |
+
"clean_all_log": "Starting cleanup of all log files...",
|
709 |
+
"clean_all_log_success": "Log file cleanup complete!",
|
710 |
+
"not_found_predictors": "No predictor model files found!",
|
711 |
+
"clean_all_predictors": "Starting cleanup of all predictor model files...",
|
712 |
+
"clean_all_predictors_success": "Predictor model cleanup complete!",
|
713 |
+
"not_found_embedders": "No embedder model files found!",
|
714 |
+
"clean_all_embedders": "Starting cleanup of all embedder model files...",
|
715 |
+
"clean_all_embedders_success": "Embedder model cleanup complete!",
|
716 |
+
"provide_folder": "Please provide a valid folder!",
|
717 |
+
"empty_folder": "The data folder is empty!",
|
718 |
+
"clean_dataset": "Starting dataset folder cleanup...",
|
719 |
+
"clean_dataset_success": "Dataset folder cleanup complete!",
|
720 |
+
"vocoder": "Vocoder",
|
721 |
+
"vocoder_info": "A vocoder analyzes and synthesizes human speech signals for voice transformation.",
|
722 |
+
"code_error": "Error: Received status code",
|
723 |
+
"json_error": "Error: Unable to parse response.",
|
724 |
+
"requests_error": "Request failed: {e}",
|
725 |
+
"memory_efficient_training": "Using memory-efficient training",
|
726 |
+
"not_use_pretrain_error_download": "Will not use pretrained models due to missing files",
|
727 |
+
"start_clean_model": "Starting cleanup of all models...",
|
728 |
+
"provide_file_settings": "Please provide a preset settings file!",
|
729 |
+
"load_presets": "Loaded preset file {presets}",
|
730 |
+
"provide_filename_settings": "Please provide a preset file name!",
|
731 |
+
"choose1": "Please select one to export!",
|
732 |
+
"export_settings": "Exported preset file {name}",
|
733 |
+
"use_presets": "Using preset file",
|
734 |
+
"file_preset": "Preset file",
|
735 |
+
"load_file": "Load file",
|
736 |
+
"export_file": "Export preset file",
|
737 |
+
"save_clean": "Save cleanup",
|
738 |
+
"save_autotune": "Save autotune",
|
739 |
+
"save_pitch": "Save pitch",
|
740 |
+
"save_index_2": "Save index impact",
|
741 |
+
"save_resample": "Save resampling",
|
742 |
+
"save_filter": "Save median filter",
|
743 |
+
"save_envelope": "Save sound envelope",
|
744 |
+
"save_protect": "Save sound protection",
|
745 |
+
"save_split": "Save sound split",
|
746 |
+
"filename_to_save": "File name to save",
|
747 |
+
"upload_presets": "Upload preset file",
|
748 |
+
"stop": "Stop process",
|
749 |
+
"stop_separate": "Stop Music Separation",
|
750 |
+
"stop_convert": "Stop Conversion",
|
751 |
+
"stop_create_dataset": "Stop Dataset Creation",
|
752 |
+
"stop_training": "Stop Training",
|
753 |
+
"stop_extract": "Stop Data Processing",
|
754 |
+
"stop_preprocess": "Stop Data Extraction",
|
755 |
+
"cleaner": "Cleaner",
|
756 |
+
"clean_audio": "Clean audio files",
|
757 |
+
"clean_all": "Clean all",
|
758 |
+
"clean_file": "Clean file",
|
759 |
+
"clean_models": "Clean model files",
|
760 |
+
"clean_pretrained": "Clean pretrained model files",
|
761 |
+
"clean_separated": "Clean separated model files",
|
762 |
+
"clean_presets": "Clean preset files",
|
763 |
+
"clean_datasets": "Clean training dataset folder",
|
764 |
+
"clean_dataset_folder": "Clean dataset folder",
|
765 |
+
"clean_log": "Clean log files",
|
766 |
+
"clean_predictors": "Clean predictor models",
|
767 |
+
"clean_embed": "Clean embedder models",
|
768 |
+
"clean_presets_2": "Starting cleanup of preset files...",
|
769 |
+
"clean_presets_success": "Preset file cleanup complete!",
|
770 |
+
"not_found_presets": "No preset files found in the folder!",
|
771 |
+
"clean_all_presets": "Starting cleanup of all preset files...",
|
772 |
+
"clean_all_presets_success": "All preset file cleanup complete!",
|
773 |
+
"port": "Port {port} is unavailable! Lowering port by one...",
|
774 |
+
"empty_json": "{file}: Corrupted or empty",
|
775 |
+
"thank": "Thank you for reporting the issue, and apologies for any inconvenience caused!",
|
776 |
+
"error_read_log": "An error occurred while reading log files!",
|
777 |
+
"error_send": "An error occurred while sending the report! Please contact me on Discord: pham_huynh_anh!",
|
778 |
+
"report_bugs": "Report Bugs",
|
779 |
+
"agree_log": "Agree to provide all log files",
|
780 |
+
"error_info": "Error description",
|
781 |
+
"error_info_2": "Provide more information about the error",
|
782 |
+
"report_bug_info": "Report bugs encountered during program usage",
|
783 |
+
"sr_info": "NOTE: SOME FORMATS DO NOT SUPPORT RATES ABOVE 48000",
|
784 |
+
"report_info": "If possible, agree to provide log files to help with debugging.\n\nIf log files are not provided, please describe the error in detail, including when and where it occurred.\n\nIf this reporting system also fails, you can reach out via [ISSUE]({github}) or Discord: `pham_huynh_anh`",
|
785 |
+
"default_setting": "An error occurred during separation, resetting all settings to default...",
|
786 |
+
"dataset_folder1": "Please enter the data folder name",
|
787 |
+
"checkpointing_err": "Pretrained model parameters such as sample rate or architecture do not match the selected model."
|
788 |
+
}
|
assets/languages/vi-VN.json
ADDED
@@ -0,0 +1,788 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"set_lang": "Đã đặt ngôn ngữ hiển thị là {lang}",
|
3 |
+
"no_support_gpu": "Thật không may, không có GPU tương thích để hỗ trợ việc đào tạo của bạn.",
|
4 |
+
"text": "văn bản",
|
5 |
+
"upload_success": "Đã tải lên tệp {name} hoàn tất.",
|
6 |
+
"download_url": "Tải từ đường dẫn liên kết",
|
7 |
+
"download_from_csv": "Tải từ kho mô hình csv",
|
8 |
+
"search_models": "Tìm kiếm mô hình",
|
9 |
+
"upload": "Tải lên",
|
10 |
+
"option_not_valid": "Tùy chọn không hợp lệ!",
|
11 |
+
"list_model": "Danh sách mô hình",
|
12 |
+
"success": "Hoàn tất!",
|
13 |
+
"index": "chỉ mục",
|
14 |
+
"model": "mô hình",
|
15 |
+
"zip": "nén",
|
16 |
+
"search": "tìm kiếm",
|
17 |
+
"provide_file": "Vui lòng cung cấp tệp {filename} hợp lệ!",
|
18 |
+
"start": "Bắt đầu {start}...",
|
19 |
+
"not_found": "Không tìm thấy {name}",
|
20 |
+
"found": "Đã tìm thấy {results} kết quả!",
|
21 |
+
"download_music": "tải nhạc",
|
22 |
+
"download": "tải xuống",
|
23 |
+
"provide_url": "Vui lòng cung cấp đường dẫn liên kết.",
|
24 |
+
"provide_name_is_save": "Vui lòng cung cấp tên mô hình để lưu.",
|
25 |
+
"not_support_url": "Liên kết mô hình của bạn không được hỗ trợ.",
|
26 |
+
"error_occurred": "Đã xảy ra lỗi: {e}",
|
27 |
+
"not_model": "Tệp bạn vừa tải lên không phải là tệp mô hình!",
|
28 |
+
"unable_analyze_model": "Không phân tích được mô hình!",
|
29 |
+
"download_pretrain": "Tải xuống huấn luyện trước...",
|
30 |
+
"provide_pretrain": "Vui lòng cung cấp đường dẫn mô hình huấn luyện trước {dg}.",
|
31 |
+
"provide_hubert": "Vui lòng đưa đường dẫn liên kết đến mô hình nhúng",
|
32 |
+
"sr_not_same": "Tốc độ lấy mẫu của hai mô hình không giống nhau",
|
33 |
+
"architectures_not_same": "Không thể hợp nhất các mô hình. Các kiến trúc mô hình không giống nhau.",
|
34 |
+
"fushion_model": "dung hợp mô hình",
|
35 |
+
"model_fushion_info": "Mô hình được {name} được dung hợp từ {pth_1} và {pth_2} với ratio {ratio}.",
|
36 |
+
"not_found_create_time": "Không tìm thấy thời gian tạo.",
|
37 |
+
"format_not_valid": "Định dạng không hợp lệ.",
|
38 |
+
"read_info": "Các mô hình được huấn luyện trên các ứng dụng khác nhau có thể đem lại các thông tin khác nhau hoặc không thể đọc!",
|
39 |
+
"epoch": "kỷ nguyên.",
|
40 |
+
"step": "bước",
|
41 |
+
"sr": "Tốc độ lấy mẫu",
|
42 |
+
"f0": "huấn luyện cao độ",
|
43 |
+
"version": "phiên bản.",
|
44 |
+
"not_f0": "Không được huấn luyện cao độ",
|
45 |
+
"trained_f0": "Được huấn luyện cao độ",
|
46 |
+
"model_info": "Tên mô hình: {model_name}\n\n Người tạo mô hình: {model_author}\n\nKỷ nguyên: {epochs}\n\nSố bước: {steps}\n\nPhiên bản của mô hình: {version}\n\nTốc độ lấy mẫu: {sr}\n\nHuấn luyện cao độ: {pitch_guidance}\n\nHash (ID): {model_hash}\n\nThời gian tạo: {creation_date_str}\n",
|
47 |
+
"input_not_valid": "Vui lòng nhập đầu vào hợp lệ!",
|
48 |
+
"output_not_valid": "Vui lòng nhập đầu ra hợp lệ!",
|
49 |
+
"apply_effect": "áp dụng hiệu ứng",
|
50 |
+
"enter_the_text": "Vui lòng nhập văn bản để chuyển!",
|
51 |
+
"choose_voice": "Vui lòng chọn giọng!",
|
52 |
+
"convert": "Chuyển đổi {name}...",
|
53 |
+
"separator_music": "tách nhạc",
|
54 |
+
"notfound": "Không tìm thấy",
|
55 |
+
"turn_on_use_audio": "Vui lòng bật sử dụng âm thanh vừa tách để sử dụng",
|
56 |
+
"turn_off_convert_backup": "Tắt chuyển đổi giọng bè để có thể sử dụng giọng gốc",
|
57 |
+
"turn_off_merge_backup": "Tắt không kết hợp giọng bè để có thể sử dụng giọng gốc",
|
58 |
+
"not_found_original_vocal": "Không tìm thấy giọng gốc!",
|
59 |
+
"convert_vocal": "Đang chuyển đổi giọng nói...",
|
60 |
+
"convert_success": "Đã hoàn tất chuyển đổi giọng nói!",
|
61 |
+
"convert_backup": "Đang chuyển đổi giọng bè...",
|
62 |
+
"convert_backup_success": "Đã Hoàn tất chuyển đổi giọng bè!",
|
63 |
+
"merge_backup": "Kết hợp giọng với giọng bè...",
|
64 |
+
"merge_success": "Kết hợp Hoàn tất.",
|
65 |
+
"is_folder": "Đầu vào là một thư mục: Chuyển đổi tất cả tệp âm thanh trong thư mục...",
|
66 |
+
"not_found_in_folder": "Không tìm thấy tệp âm thanh trong thư mục!",
|
67 |
+
"batch_convert": "Đang chuyển đổi hàng loạt...",
|
68 |
+
"batch_convert_success": "Chuyển đổi hàng loạt hoàn tất!",
|
69 |
+
"create": "tạo",
|
70 |
+
"provide_name": "Vui lòng cung cấp tên mô hình.",
|
71 |
+
"not_found_data": "Không tìm thấy dữ liệu",
|
72 |
+
"not_found_data_preprocess": "Không tìm thấy dữ liệu được xử lý, vui lòng xử lý lại âm thanh",
|
73 |
+
"not_found_data_extract": "Không tìm thấy dữ liệu được trích xuất, vui lòng trích xuất lại âm thanh",
|
74 |
+
"provide_pretrained": "Vui lòng nhập huấn luyện {dg}",
|
75 |
+
"download_pretrained": "Tải xuống huấn luyện trước {dg}{rvc_version} gốc",
|
76 |
+
"not_found_pretrain": "Không tìm thấy huấn luyện trước {dg}",
|
77 |
+
"not_use_pretrain": "Sẽ không có huấn luyện trước được sử dụng",
|
78 |
+
"training": "huấn luyện",
|
79 |
+
"display_title": "<h1> 🎵 Giao diện chuyển đổi và huấn luyện mô hình giọng nói được tạo bởi Anh 🎵 <h1>",
|
80 |
+
"rick_roll": "Bấm vào đây nếu bạn muốn bị Rick Roll:) ---> [RickRoll]({rickroll})",
|
81 |
+
"terms_of_use": "**Vui lòng không sử dụng Dự án với bất kỳ mục đích nào vi phạm đạo đức, pháp luật, hoặc gây tổn hại đến cá nhân, tổ chức...**",
|
82 |
+
"exemption": "**Trong trường hợp người sử dụng không tuân thủ các điều khoản hoặc vi phạm, tôi sẽ không chịu trách nhiệm về bất kỳ khiếu nại, thiệt hại, hay trách nhiệm pháp lý nào, dù là trong hợp đồng, do sơ suất, hay các lý do khác, phát sinh từ, ngoài, hoặc liên quan đến phần mềm, việc sử dụng phần mềm hoặc các giao dịch khác liên quan đến phần mềm.**",
|
83 |
+
"separator_tab": "Tách Nhạc",
|
84 |
+
"4_part": "Một hệ thống tách nhạc đơn giản có thể tách được 4 phần: Nhạc, giọng, giọng chính, giọng bè",
|
85 |
+
"clear_audio": "Làm sạch âm thanh",
|
86 |
+
"separator_backing": "Tách giọng bè",
|
87 |
+
"denoise_mdx": "Khữ tách MDX",
|
88 |
+
"use_mdx": "Sử dụng MDX",
|
89 |
+
"dereveb_audio": "Tách vang",
|
90 |
+
"dereveb_backing": "Tách vang bè",
|
91 |
+
"separator_model": "Mô hình tách nhạc",
|
92 |
+
"separator_backing_model": "Mô hình tách bè",
|
93 |
+
"shift": "Số lượng dự đoán",
|
94 |
+
"shift_info": "Càng cao chất lượng càng tốt nhưng lâu nhưng tốn tài nguyên",
|
95 |
+
"segments_size": "Kích Thước Phân Đoạn",
|
96 |
+
"segments_size_info": "Càng cao chất lượng càng tốt nhưng tốn tài nguyên",
|
97 |
+
"batch_size": "Kích thước lô",
|
98 |
+
"batch_size_info": "Số lượng mẫu xử lý đồng thời trong một lần huấn luyện. Cao có thể gây tràn bộ nhớ",
|
99 |
+
"mdx_batch_size_info": "Số lượng mẫu được xử lý cùng một lúc. Việc chia thành các lô giúp tối ưu hóa quá trình tính toán. Lô quá lớn có thể làm tràn bộ nhớ, khi lô quá nhỏ sẽ làm giảm hiệu quả dùng tài nguyên",
|
100 |
+
"overlap": "Chồng chéo",
|
101 |
+
"overlap_info": "Số lượng chồng chéo giữa các cửa sổ dự đoán",
|
102 |
+
"export_format": "Định dạng âm thanh",
|
103 |
+
"export_info": "Định dạng âm thanh khi xuất tệp âm thanh ra",
|
104 |
+
"output_separator": "Âm thanh đã được tách",
|
105 |
+
"hop_length_info": "Khoảng thời gian chuyển cửa sổ phân tích khi thực hiện phép biến đổi. Giá trị nhỏ độ chi tiết cao nhưng cần tính toán nhiều hơn",
|
106 |
+
"drop_audio": "Thả âm thanh vào đây",
|
107 |
+
"drop_text": "Thả tệp văn bản vào đây",
|
108 |
+
"use_url": "Sử dụng đường dẫn youtube",
|
109 |
+
"url_audio": "Đường dẫn liên kết đến âm thanh",
|
110 |
+
"downloads": "Tải Xuống",
|
111 |
+
"clean_strength": "Mức độ làm sạch âm thanh",
|
112 |
+
"clean_strength_info": "Mức độ của bộ làm sạch âm thanh để lọc giọng hát khi xuất",
|
113 |
+
"input_output": "Đầu vào, đầu ra âm thanh",
|
114 |
+
"audio_path": "Đường dẫn đầu vào âm thanh",
|
115 |
+
"refesh": "Tải lại",
|
116 |
+
"output_folder": "Đường dẫn thư mục đầu ra âm thanh",
|
117 |
+
"output_folder_info": "Nhập đường dẫn thư mục âm thanh sẽ xuất ra ở đó",
|
118 |
+
"input_audio": "Đầu vào âm thanh",
|
119 |
+
"instruments": "Nhạc nền",
|
120 |
+
"original_vocal": "Giọng gốc",
|
121 |
+
"main_vocal": "Giọng chính",
|
122 |
+
"backing_vocal": "Giọng bè",
|
123 |
+
"convert_audio": "Chuyển Đổi Âm Thanh",
|
124 |
+
"convert_info": "Chuyển đổi âm thanh bằng mô hình giọng nói đã được huấn luyện",
|
125 |
+
"autotune": "Tự động điều chỉnh",
|
126 |
+
"use_audio": "Sử dụng âm thanh vừa tách",
|
127 |
+
"convert_original": "Chuyển đổi giọng gốc",
|
128 |
+
"convert_backing": "Chuyển đổi giọng bè",
|
129 |
+
"not_merge_backing": "Không kết hợp giọng bè",
|
130 |
+
"merge_instruments": "Kết hợp nhạc nền",
|
131 |
+
"pitch": "Cao độ",
|
132 |
+
"pitch_info": "Khuyến cáo: chỉnh lên 12 để chuyển giọng nam thành nữ và ngược lại",
|
133 |
+
"model_accordion": "Mô hình và chỉ mục",
|
134 |
+
"model_name": "Tệp mô hình",
|
135 |
+
"index_path": "Tệp chỉ mục",
|
136 |
+
"index_strength": "Ảnh hưởng của chỉ mục",
|
137 |
+
"index_strength_info": "Càng cao ảnh hưởng càng lớn. Tuy nhiên, việc chọn giá trị thấp hơn có thể giảm hiện tượng giả trong âm thanh",
|
138 |
+
"output_path": "Đường dẫn đầu ra âm thanh",
|
139 |
+
"output_path_info": "Nhập đường dẫn đ���u ra(cứ để định dạng .wav khi chuyển đổi nó tự sửa)",
|
140 |
+
"setting": "Cài đặt chung",
|
141 |
+
"f0_method": "Phương pháp trích xuất",
|
142 |
+
"f0_method_info": "Phương pháp để trích xuất dữ liệu",
|
143 |
+
"f0_method_hybrid": "Phương pháp trích xuất HYBRID",
|
144 |
+
"f0_method_hybrid_info": "Sự kết hợp của hai hoặc nhiều loại trích xuất khác nhau",
|
145 |
+
"hubert_model": "Mô hình nhúng",
|
146 |
+
"hubert_info": "Mô hình được huấn luyện trước để giúp nhúng",
|
147 |
+
"modelname": "Tên của mô hình",
|
148 |
+
"modelname_info": "Nếu bạn có mô hình riêng chỉ cần tải và nhập tên của mô hình vào đây",
|
149 |
+
"split_audio": "Cắt âm thanh",
|
150 |
+
"autotune_rate": "Mức độ điều chỉnh",
|
151 |
+
"autotune_rate_info": "Mức độ điều chỉnh tự động",
|
152 |
+
"resample": "Lấy mẫu lại",
|
153 |
+
"resample_info": "Lấy mẫu lại sau xử lý đến tốc độ lấy mẫu cuối cùng, 0 có nghĩa là không lấy mẫu lại, LƯU Ý: MỘT SỐ ĐỊNH DẠNG KHÔNG HỖ TRỢ TỐC ĐỘ TRÊN 48000",
|
154 |
+
"filter_radius": "Lọc trung vị",
|
155 |
+
"filter_radius_info": "Nếu giá trị lớn hơn ba sẽ áp dụng tính năng lọc trung vị. Giá trị đại diện cho bán kính bộ lọc và có thể làm giảm hơi thở hoặc tắt thở.",
|
156 |
+
"volume_envelope": "Đường bao âm thanh",
|
157 |
+
"volume_envelope_info": "Sử dụng đường bao âm lượng của đầu vào để thay thế hoặc trộn với đường bao âm lượng của đầu ra. Càng gần 1 thì đường bao đầu ra càng được sử dụng nhiều",
|
158 |
+
"protect": "Bảo vệ phụ âm",
|
159 |
+
"protect_info": "Bảo vệ các phụ âm riêng biệt và âm thanh thở ngăn chặn việc rách điện âm và các hiện tượng giả khác. Việc chỉnh tối đa sẽ bảo vệ toàn diện. Việc giảm giá trị này có thể giảm độ bảo vệ, đồng thời có khả năng giảm thiểu hiệu ứng lập chỉ mục",
|
160 |
+
"output_convert": "Âm thanh đã được chuyển đổi",
|
161 |
+
"main_convert": "Chuyển đổi giọng chính",
|
162 |
+
"main_or_backing": "Giọng chính + Giọng bè",
|
163 |
+
"voice_or_instruments": "Giọng + Nhạc nền",
|
164 |
+
"convert_text": "Chuyển Đổi Văn Bản",
|
165 |
+
"convert_text_markdown": "## Chuyển Đổi Văn Bản Thành Giọng Nói",
|
166 |
+
"convert_text_markdown_2": "Chuyển văn bản thành giọng nói và đọc lại bằng mô hình giọng nói được huấn luyện",
|
167 |
+
"input_txt": "Nhập dữ liệu từ tệp văn bản txt",
|
168 |
+
"text_to_speech": "Văn bản cần đọc",
|
169 |
+
"voice_speed": "Tốc độ đọc",
|
170 |
+
"voice_speed_info": "Tốc độ đọc của giọng nói",
|
171 |
+
"tts_1": "1. Chuyển Đổi Văn Bản",
|
172 |
+
"tts_2": "2. Chuyển Đổi Giọng Nói",
|
173 |
+
"voice": "Giọng nói của các nước",
|
174 |
+
"output_tts": "Đường dẫn đầu ra giọng nói",
|
175 |
+
"output_tts_convert": "Đường dẫn đầu ra giọng chuyển đổi",
|
176 |
+
"tts_output": "Nhập đường dẫn đầu ra",
|
177 |
+
"output_tts_markdown": "Âm thanh chưa được chuyển đổi và âm thanh đã được chuyển đổi",
|
178 |
+
"output_text_to_speech": "Giọng được tạo bởi chuyển đổi văn bản thành giọng nói",
|
179 |
+
"output_file_tts_convert": "Giọng được chuyển đổi bởi mô hình",
|
180 |
+
"output_audio": "Đầu ra âm thanh",
|
181 |
+
"provide_output": "Nhập đường dẫn đầu ra",
|
182 |
+
"audio_effects": "Hiệu Ứng Âm Thanh",
|
183 |
+
"apply_audio_effects": "## Áp Dụng Thêm Hiệu Ứng Cho Âm Thanh",
|
184 |
+
"audio_effects_edit": "Chỉnh sửa thêm hiệu ứng cho âm thanh",
|
185 |
+
"reverb": "Hiệu ứng vọng âm",
|
186 |
+
"chorus": "Hiệu ứng hòa âm",
|
187 |
+
"delay": "Hiệu ứng độ trễ",
|
188 |
+
"more_option": "Tùy chọn thêm",
|
189 |
+
"phaser": "Hiệu ứng xoay pha",
|
190 |
+
"compressor": "Hiệu ứng nén",
|
191 |
+
"apply": "Áp dụng",
|
192 |
+
"reverb_freeze": "Chế độ đóng băng",
|
193 |
+
"reverb_freeze_info": "Tạo hiệu ứng vang liên tục khi bật chế độ này",
|
194 |
+
"room_size": "Kích thước phòng",
|
195 |
+
"room_size_info": "Điều chỉnh không gian của phòng để tạo độ vang",
|
196 |
+
"damping": "Giảm âm",
|
197 |
+
"damping_info": "Điều chỉnh độ hút âm, kiểm soát mức độ vang",
|
198 |
+
"wet_level": "Mức độ tín hiệu vang",
|
199 |
+
"wet_level_info": "Điều chỉnh mức độ của tín hiệu có hiệu ứng vọng âm",
|
200 |
+
"dry_level": "Mức độ tín hiệu gốc",
|
201 |
+
"dry_level_info": "Điều chỉnh mức độ của tín hiệu không có hiệu ứng",
|
202 |
+
"width": "Chiều rộng âm thanh",
|
203 |
+
"width_info": "Điều chỉnh độ rộng của không gian âm thanh",
|
204 |
+
"chorus_depth": "Giảm âm",
|
205 |
+
"chorus_depth_info": "Điều chỉnh cường độ hòa âm, tạo ra cảm giác rộng cho âm thanh",
|
206 |
+
"chorus_rate_hz": "Tần số",
|
207 |
+
"chorus_rate_hz_info": "Điều chỉnh tốc độ dao động của hòa âm",
|
208 |
+
"chorus_mix": "Trộn tín hiệu",
|
209 |
+
"chorus_mix_info": "Điều chỉnh mức độ trộn giữa âm gốc và âm có hiệu ứng",
|
210 |
+
"chorus_centre_delay_ms": "Đỗ trễ trung tâm (mili giây)",
|
211 |
+
"chorus_centre_delay_ms_info": "Khoảng thời gian trễ giữa các kênh stereo để tạo hiệu ứng hòa âm",
|
212 |
+
"chorus_feedback": "Phản hồi",
|
213 |
+
"chorus_feedback_info": "Điều chỉnh lượng tín hiệu hiệu ứng được quay lại vào tín hiệu gốc",
|
214 |
+
"delay_seconds": "Thời gian trễ",
|
215 |
+
"delay_seconds_info": "Điều chỉnh khoảng thời gian trễ giữa âm gốc và âm có hiệu ứng",
|
216 |
+
"delay_feedback": "Phản hồi độ trễ",
|
217 |
+
"delay_feedback_info": "Điều chỉnh lượng tín hiệu được quay lại, tạo hiệu ứng lặp lại",
|
218 |
+
"delay_mix": "Trộn tín hiệu độ trễ",
|
219 |
+
"delay_mix_info": "Điều chỉnh mức độ trộn giữa âm gốc và âm trễ",
|
220 |
+
"fade": "Hiệu ứng mờ dần",
|
221 |
+
"bass_or_treble": "Âm trầm và âm cao",
|
222 |
+
"limiter": "Giới hạn ngưỡng",
|
223 |
+
"distortion": "Hiệu ứng nhiễu âm",
|
224 |
+
"gain": "Cường độ âm",
|
225 |
+
"bitcrush": "Hiệu ứng giảm bits",
|
226 |
+
"clipping": "Hiệu ứng méo âm",
|
227 |
+
"fade_in": "Hiệu ứng mờ dần vào (mili giây)",
|
228 |
+
"fade_in_info": "Thời gian mà âm thanh sẽ tăng dần từ mức 0 đến mức bình thường",
|
229 |
+
"fade_out": "Hiệu ứng mờ dần ra (mili giây)",
|
230 |
+
"fade_out_info": "thời gian mà âm thanh sẽ giảm dần từ bình thường xuống mức 0",
|
231 |
+
"bass_boost": "Độ khuếch đại âm trầm (db)",
|
232 |
+
"bass_boost_info": "mức độ tăng cường âm trầm trong đoạn âm thanh",
|
233 |
+
"bass_frequency": "Tần số cắt của bộ lọc thông thấp (Hz)",
|
234 |
+
"bass_frequency_info": "tần số bị giảm. Tần số thấp sẽ làm âm trầm rõ hơn",
|
235 |
+
"treble_boost": "Độ khuếch đại âm cao (db)",
|
236 |
+
"treble_boost_info": "mức độ tăng cường âm cao trong đoạn âm thanh",
|
237 |
+
"treble_frequency": "Tần số cắt của bộ lọc thông cao (Hz)",
|
238 |
+
"treble_frequency_info": "tần số sẽ lọc bỏ. Tần số càng cao thì giữ lại âm càng cao",
|
239 |
+
"limiter_threashold_db": "Ngưỡng giới hạn",
|
240 |
+
"limiter_threashold_db_info": "Giới hạn mức độ âm thanh tối đa, ngăn không cho vượt quá ngưỡng",
|
241 |
+
"limiter_release_ms": "Thời gian thả",
|
242 |
+
"limiter_release_ms_info": "Khoảng thời gian để âm thanh trở lại sau khi bị giới hạn (Mili Giây)",
|
243 |
+
"distortion_info": "Điều chỉnh mức độ nhiễu âm, tạo hiệu ứng méo tiếng",
|
244 |
+
"gain_info": "Tăng giảm âm lượng của tín hiệu",
|
245 |
+
"clipping_threashold_db": "Ngưỡng cắt",
|
246 |
+
"clipping_threashold_db_info": "Cắt bớt tín hiệu vượt quá ngưỡng, tạo âm thanh méo",
|
247 |
+
"bitcrush_bit_depth": "Độ sâu bit",
|
248 |
+
"bitcrush_bit_depth_info": "Giảm chất lượng âm thanh bằng cách giảm số bit, tạo hiệu ứng âm thanh bị méo",
|
249 |
+
"phaser_depth": "Độ sâu",
|
250 |
+
"phaser_depth_info": "Điều chỉnh độ sâu của hiệu ứng, ảnh hưởng đến cường độ của hiệu ứng xoay pha",
|
251 |
+
"phaser_rate_hz": "Tần số",
|
252 |
+
"phaser_rate_hz_info": "Điều chỉnh tốc độ của hiệu ứng hiệu ứng xoay pha",
|
253 |
+
"phaser_mix": "Trộn tín hiệu",
|
254 |
+
"phaser_mix_info": "Điều chỉnh mức độ trộn giữa tín hiệu gốc và tín hiệu đã qua xử lý",
|
255 |
+
"phaser_centre_frequency_hz": "Tần số trung tâm",
|
256 |
+
"phaser_centre_frequency_hz_info": "Tần số trung tâm của hiệu ứng xoay pha, ảnh hưởng đến tần số bị điều chỉnh",
|
257 |
+
"phaser_feedback": "Phản hồi",
|
258 |
+
"phaser_feedback_info": "Điều chỉnh lượng phản hồi tín hiệu, tạo cảm giác xoay pha mạnh hoặc nhẹ",
|
259 |
+
"compressor_threashold_db": "Ngưỡng nén",
|
260 |
+
"compressor_threashold_db_info": "Ngưỡng mức âm thanh sẽ bị nén khi vượt qua ngưỡng này",
|
261 |
+
"compressor_ratio": "Tỉ lệ nén",
|
262 |
+
"compressor_ratio_info": "Điều chỉnh mức độ nén âm thanh khi vượt qua ngưỡng",
|
263 |
+
"compressor_attack_ms": "Thời gian tấn công (mili giây)",
|
264 |
+
"compressor_attack_ms_info": "Khoảng thời gian nén bắt đầu tác dụng sau khi âm thanh vượt ngưỡng",
|
265 |
+
"compressor_release_ms": "Thời gian thả",
|
266 |
+
"compressor_release_ms_info": "Thời gian để âm thanh trở lại trạng thái bình thường sau khi bị nén",
|
267 |
+
"create_dataset_url": "Đường dẫn liên kết đến âm thanh(sử dụng dấu , để sử dụng nhiều liên kết)",
|
268 |
+
"createdataset": "Tạo dữ liệu",
|
269 |
+
"create_dataset_markdown": "## Tạo Dữ Liệu Huấn Luyện Từ Youtube",
|
270 |
+
"create_dataset_markdown_2": "Xử lý và tạo tập tin dữ liệu huấn luyện bằng đường dẫn youtube",
|
271 |
+
"denoise": "Khử tách mô hình",
|
272 |
+
"skip": "Bỏ qua giây",
|
273 |
+
"model_ver": "Phiên bản tách giọng",
|
274 |
+
"model_ver_info": "Phiên bản của mô hình tách nhạc để tách giọng",
|
275 |
+
"create_dataset_info": "Thông tin tạo dữ liệu",
|
276 |
+
"output_data": "Đầu ra dữ liệu",
|
277 |
+
"output_data_info": "Đầu ra dữ liệu sau khi tạo xong dữ liệu",
|
278 |
+
"skip_start": "Bỏ qua phần đầu",
|
279 |
+
"skip_start_info": "Bỏ qua số giây đầu của âm thanh, dùng dấu , để sử dụng cho nhiều âm thanh",
|
280 |
+
"skip_end": "Bỏ qua phần cuối",
|
281 |
+
"skip_end_info": "Bỏ qua số giây cuối của âm thanh, dùng dấu , để sử dụng cho nhiều âm thanh",
|
282 |
+
"training_model": "Huấn Luyện Mô Hình",
|
283 |
+
"training_markdown": "Huấn luyện và đào tạo mô hình giọng nói bằng một lượng dữ liệu giọng nói",
|
284 |
+
"training_model_name": "Tên của mô hình khi huấn luyện(không sử dụng ký tự đặc biệt hay dấu cách)",
|
285 |
+
"sample_rate": "Tỉ lệ lấy mẫu",
|
286 |
+
"sample_rate_info": "Tỉ lệ lấy mẫu của mô hình",
|
287 |
+
"training_version": "Phiên bản mô hình",
|
288 |
+
"training_version_info": "Phiên bản mô hình khi huấn luyện",
|
289 |
+
"training_pitch": "Huấn luyện cao độ",
|
290 |
+
"training_pitch_info": "Huấn luyện cao độ cho mô hình",
|
291 |
+
"upload_dataset": "Tải lên dữ liệu huấn luyện",
|
292 |
+
"preprocess_split": "Nên tắt nếu dữ liệu đã được xử lý",
|
293 |
+
"preprocess_effect": "Xử lý hậu kỳ",
|
294 |
+
"preprocess_effect_info": "Nên tắt nếu dữ liệu đã được xử lý",
|
295 |
+
"clear_dataset": "Làm sạch dữ liệu",
|
296 |
+
"preprocess_info": "Thông tin phần xử lý trước",
|
297 |
+
"preprocess_button": "1. Xử lý dữ liệu",
|
298 |
+
"extract_button": "2. Trích xuất dữ liệu",
|
299 |
+
"extract_info": "Thông tin phần trích xuất dữ liệu",
|
300 |
+
"total_epoch": "Tổng số kỷ nguyên",
|
301 |
+
"total_epoch_info": "Tổng số kỷ nguyên huấn luyện đào tạo",
|
302 |
+
"save_epoch": "Tần suất lưu",
|
303 |
+
"save_epoch_info": "Tần suất lưu mô hình khi huấn luyện, giúp việc huấn luyện lại mô hình",
|
304 |
+
"create_index": "Tạo chỉ mục",
|
305 |
+
"index_algorithm": "Thuật toán chỉ mục",
|
306 |
+
"index_algorithm_info": "Thuật toán tạo chỉ mục",
|
307 |
+
"custom_dataset": "Tùy chọn thư mục",
|
308 |
+
"custom_dataset_info": "Tùy chọn thư mục dữ liệu huấn luyện",
|
309 |
+
"overtraining_detector": "Kiểm tra quá sức",
|
310 |
+
"overtraining_detector_info": "Kiểm tra huấn luyện mô hình quá sức",
|
311 |
+
"cleanup_training": "Làm sạch huấn luyện",
|
312 |
+
"cleanup_training_info": "Bật khi cần huấn luyện lại từ đầu.",
|
313 |
+
"cache_in_gpu": "Lưu mô hình vào đệm",
|
314 |
+
"cache_in_gpu_info": "Lưu mô hình vào bộ nhớ đệm gpu",
|
315 |
+
"dataset_folder": "Thư mục chứa dữ liệu",
|
316 |
+
"threshold": "Ngưỡng huấn luyện quá sức",
|
317 |
+
"setting_cpu_gpu": "Tùy chọn CPU/GPU",
|
318 |
+
"gpu_number": "Số gpu được sử dụng",
|
319 |
+
"gpu_number_info": "Số của GPU được sử dụng trong huấn luyện",
|
320 |
+
"save_only_latest": "Chỉ lưu mới nhất",
|
321 |
+
"save_only_latest_info": "Chỉ lưu mô hình D và G mới nhất",
|
322 |
+
"save_every_weights": "Lưu mọi mô hình",
|
323 |
+
"save_every_weights_info": "Lưu mọi mô hình sau mỗi lượt kỷ nguyên",
|
324 |
+
"gpu_info": "Thông tin của GPU",
|
325 |
+
"gpu_info_2": "Thông tin của GPU được sử dụng trong huấn luyện",
|
326 |
+
"cpu_core": "Số lõi xử lý có thể sử dụng",
|
327 |
+
"cpu_core_info": "Số lõi được sử dụng trong việc huấn luyện",
|
328 |
+
"not_use_pretrain_2": "Không dùng huấn luyện",
|
329 |
+
"not_use_pretrain_info": "Không dùng huấn luyện trước",
|
330 |
+
"custom_pretrain": "Tùy chỉnh huấn luyện",
|
331 |
+
"custom_pretrain_info": "Tùy chỉnh huấn luyện trước",
|
332 |
+
"pretrain_file": "Tệp mô hình huấn luyện trước {dg}",
|
333 |
+
"train_info": "Thông tin phần huấn luyện",
|
334 |
+
"export_model": "5. Xuất Mô hình",
|
335 |
+
"zip_model": "2. Nén mô hình",
|
336 |
+
"output_zip": "Đầu ra tệp khi nén",
|
337 |
+
"model_path": "Đường dẫn mô hình",
|
338 |
+
"model_ratio": "Tỉ lệ mô hình",
|
339 |
+
"model_ratio_info": "Chỉnh hướng về bên nào sẽ làm cho mô hình giống với bên đó",
|
340 |
+
"output_model_path": "Đầu ra mô hình",
|
341 |
+
"fushion": "Dung Hợp Mô Hình",
|
342 |
+
"fushion_markdown": "## Dung Hợp Hai Mô Hình Với Nhau",
|
343 |
+
"fushion_markdown_2": "Dung hợp hai mô hình giọng nói lại với nhau để tạo thành một mô hình duy nhất",
|
344 |
+
"read_model": "Đọc Thông Tin",
|
345 |
+
"read_model_markdown": "## Đọc Thông Tin Của Mô Hình",
|
346 |
+
"read_model_markdown_2": "Đọc các thông tin được ghi trong mô hình",
|
347 |
+
"drop_model": "Thả mô hình vào đây",
|
348 |
+
"readmodel": "Đọc mô hình",
|
349 |
+
"model_path_info": "Nhập đường dẫn đến tệp mô hình",
|
350 |
+
"modelinfo": "Thông Tin Mô Hình",
|
351 |
+
"download_markdown": "## Tải Xuống Mô Hình",
|
352 |
+
"download_markdown_2": "Tải xuống mô hình giọng nói, mô hình huấn luyện trước, mô hình nhúng",
|
353 |
+
"model_download": "Tải xuống mô hình giọng nói",
|
354 |
+
"model_url": "Đường dẫn liên kết đến mô hình",
|
355 |
+
"15s": "Vui lòng đợi khoảng 15 giây. Hệ thống sẽ tự khởi động lại!",
|
356 |
+
"model_download_select": "Chọn cách tải mô hình",
|
357 |
+
"model_warehouse": "Kho mô hình",
|
358 |
+
"get_model": "Nhận mô hình",
|
359 |
+
"name_to_search": "Tên để tìm kiếm",
|
360 |
+
"search_2": "Tìm kiếm",
|
361 |
+
"select_download_model": "Chọn mô hình đã được tìm kiếm(Bấm vào để chọn)",
|
362 |
+
"download_pretrained_2": "Tải xuống mô hình huấn luyện trước",
|
363 |
+
"only_huggingface": "Chỉ hỗ trợ huggingface.co",
|
364 |
+
"pretrained_url": "Đường dẫn liên kết đến mô hình huấn luyện trước {dg}",
|
365 |
+
"select_pretrain": "Chọn mô hình huấn luyện trước",
|
366 |
+
"select_pretrain_info": "Chọn mô hình huấn luyện trước để cài đặt về",
|
367 |
+
"pretrain_sr": "Tốc độ lấy mẫu của mô hình",
|
368 |
+
"drop_pretrain": "Thả mô hình huấn luyện trước {dg} vào đây",
|
369 |
+
"hubert_download": "Tải xuống mô hình nhúng",
|
370 |
+
"hubert_url": "Đường dẫn liên kết tới mô hình nhúng",
|
371 |
+
"drop_hubert": "Thả mô hình nhúng vào đây",
|
372 |
+
"settings": "Tùy Chỉnh",
|
373 |
+
"settings_markdown": "## Tùy Chỉnh Thêm",
|
374 |
+
"settings_markdown_2": "Tùy chỉnh thêm một số tính năng của dự án",
|
375 |
+
"lang": "Ngôn ngữ",
|
376 |
+
"lang_restart": "Ngôn ngữ được hiển thị trong dự án(Khi đổi ngôn ngữ hệ thống sẽ tự khởi động lại sau 15 giây để cập nhật)",
|
377 |
+
"change_lang": "Đổi Ngôn Ngữ",
|
378 |
+
"theme": "Chủ đề",
|
379 |
+
"theme_restart": "Loại Chủ đề của giao diện được hiển thị(Khi đổi chủ đề hệ thống sẽ tự khởi động lại sau 15 giây để cập nhật)",
|
380 |
+
"theme_button": "Đổi Chủ Đề",
|
381 |
+
"change_light_dark": "Đổi Chế Độ Sáng/Tối",
|
382 |
+
"tensorboard_url": "Đường dẫn biểu đồ",
|
383 |
+
"errors_loading_audio": "Lỗi khi tải âm thanh: {e}",
|
384 |
+
"apply_error": "Đã xảy ra lỗi khi áp dụng hiệu ứng: {e}",
|
385 |
+
"indexpath": "Đường dẫn chỉ mục",
|
386 |
+
"skip_file": "Phần {i} được bỏ qua vì quá ngắn: {chunk}ms",
|
387 |
+
"split_total": "Tổng số phần đã cắt",
|
388 |
+
"process_audio_error": "Đã xảy ra lỗi khi xử lý âm thanh",
|
389 |
+
"merge_error": "Đã xảy ra lỗi khi ghép âm thanh",
|
390 |
+
"not_found_convert_file": "Không tìm thấy tệp đã xử lý",
|
391 |
+
"convert_batch": "Chuyển đổi hàng loạt...",
|
392 |
+
"found_audio": "Tìm thấy {audio_files} tệp âm thanh cho việc chuyển đổi.",
|
393 |
+
"not_found_audio": "Không tìm thấy tệp âm thanh!",
|
394 |
+
"error_convert": "Đã xảy ra lỗi khi chuyển đổi âm thanh: {e}",
|
395 |
+
"error_convert_batch": "Đã xảy ra lỗi khi chuyển đổi các đoạn âm thanh cắt: {e}",
|
396 |
+
"error_convert_batch_2": "Đã xảy ra lỗi khi chuyển đổi âm thanh hàng loạt: {e}",
|
397 |
+
"convert_batch_success": "Đã chuyển đổi hàng loạt hoàn tất sau {elapsed_time} giây. {output_path}",
|
398 |
+
"convert_audio_success": "Tệp {input_path} được chuyển đổi hoàn tất sau {elapsed_time} giây. {output_path}",
|
399 |
+
"hybrid_methods": "Tính toán ước lượng cao độ f0 cho các phương pháp {methods}",
|
400 |
+
"method_not_valid": "Phương pháp không hợp lệ",
|
401 |
+
"read_faiss_index_error": "Đã xảy ra lỗi khi đọc chỉ mục FAISS: {e}",
|
402 |
+
"read_model_error": "Thất bại khi tải mô hình: {e}",
|
403 |
+
"starting_download": "Bắt đầu tải xuống",
|
404 |
+
"version_not_valid": "Phiên bản tách giọng không hợp lệ",
|
405 |
+
"skip<audio": "Không thể bỏ qua vì số lượng thời gian bỏ qua thấp hơn số lượng tệp âm thanh",
|
406 |
+
"skip>audio": "Không thể bỏ qua vì số lượng thời gian bỏ qua cao hơn số lượng tệp âm thanh",
|
407 |
+
"=<0": "Thời gian bỏ qua bé hơn hoặc bằng 0 nên bỏ qua",
|
408 |
+
"skip_warning": "Thời lượng bỏ qua ({seconds} giây) vượt quá thời lượng âm thanh ({total_duration} giây). Bỏ qua.",
|
409 |
+
"download_success": "Đã tải xuống hoàn tất",
|
410 |
+
"create_dataset_error": "Đã xảy ra lỗi khi tạo dữ liệu huấn luyện",
|
411 |
+
"create_dataset_success": "Quá trình tạo dữ liệu huấn huyện đã hoàn tất sau: {elapsed_time} giây",
|
412 |
+
"skip_start_audio": "Bỏ qua âm thanh đầu hoàn tất: {input_file}",
|
413 |
+
"skip_end_audio": "Bỏ qua âm thanh cuối hoàn tất: {input_file}",
|
414 |
+
"merge_audio": "Đã ghép các phần chứa âm thanh lại",
|
415 |
+
"separator_process": "Đang tách giọng: {input}...",
|
416 |
+
"not_found_main_vocal": "Không tìm thấy giọng chính!",
|
417 |
+
"not_found_backing_vocal": "Không tìm thấy giọng bè!",
|
418 |
+
"not_found_instruments": "Không tìm thấy nhạc nền",
|
419 |
+
"merge_instruments_process": "Kết hợp giọng với nhạc nền...",
|
420 |
+
"dereverb": "Đang tách âm vang",
|
421 |
+
"dereverb_success": "Đã tách âm vang hoàn tất",
|
422 |
+
"save_index": "Đã lưu tệp chỉ mục",
|
423 |
+
"create_index_error": "Đã xảy ra lỗi khi tạo chỉ mục",
|
424 |
+
"sr_not_16000": "Tỉ lệ mẫu phải là 16000",
|
425 |
+
"gpu_not_valid": "Chỉ số GPU không hợp lệ. Chuyển sang CPU.",
|
426 |
+
"extract_file_error": "Đã xảy ra lỗi khi giải nén tập tin",
|
427 |
+
"extract_f0_method": "Bắt đầu trích xuất cao độ với {num_processes} lõi với phương pháp trích xuất {f0_method}...",
|
428 |
+
"extract_f0": "Trích Xuất Cao Độ",
|
429 |
+
"extract_f0_success": "Quá trình trích xuất cao độ đã hoàn tất vào {elapsed_time} giây.",
|
430 |
+
"NaN": "chứa giá trị NaN và sẽ bị bỏ qua.",
|
431 |
+
"start_extract_hubert": "Đang bắt đầu nhúng trích xuất...",
|
432 |
+
"not_found_audio_file": "Không tìm thấy tập tin âm thanh. Hãy chắc chắn rằng bạn đã cung cấp âm thanh chính xác.",
|
433 |
+
"extract_hubert": "Trích xuất nhúng",
|
434 |
+
"process_error": "Đã xảy ra lỗi khi xử lý",
|
435 |
+
"extract_hubert_success": "Quá trình trích xuất nhúng đã hoàn tất trong {elapsed_time} giây.",
|
436 |
+
"export_process": "Đường dẫn của mô hình",
|
437 |
+
"extract_error": "Đã xảy ra lỗi khi trích xuất dữ liệu",
|
438 |
+
"extract_success": "Đã trích xuất hoàn tất mô hình",
|
439 |
+
"min_length>=min_interval>=hop_size": "min_length lớn hơn hoặc bằng min_interval lớn hơn hoặc bằng hop_size là bắt buộc",
|
440 |
+
"max_sil_kept>=hop_size": "max_sil_kept lớn hơn hoặc bằng hop_size là bắt buộc",
|
441 |
+
"start_preprocess": "Đang bắt đầu xử lý dữ liệu với {num_processes} lõi xử lý...",
|
442 |
+
"not_integer": "Thư mục ID giọng nói phải là số nguyên, thay vào đó có",
|
443 |
+
"preprocess": "Xử Lý Dữ Liệu",
|
444 |
+
"preprocess_success": "Quá trình xử lý hoàn tất sau {elapsed_time} giây.",
|
445 |
+
"preprocess_model_success": "Đã hoàn tất xử lý trước dữ liệu cho mô hình",
|
446 |
+
"turn_on_dereverb": "Điều kiện cần để sử dụng tách vang giọng bè là phải bật tách vang",
|
447 |
+
"turn_on_separator_backing": "Điều kiện cần để sử dụng tách vang giọng bè là phải bật tách bè",
|
448 |
+
"backing_model_ver": "Phiên bản mô hình của tách bè",
|
449 |
+
"clean_audio_success": "Đã làm sạch âm hoàn tất!",
|
450 |
+
"separator_error": "Đã xảy ra lỗi khi tách nhạc",
|
451 |
+
"separator_success": "Quá trình tách nhạc đã hoàn tất sau: {elapsed_time} giây",
|
452 |
+
"separator_process_2": "Đang xử lý tách nhạc",
|
453 |
+
"separator_success_2": "Đã tách nhạc hoàn tất!",
|
454 |
+
"separator_process_backing": "Đang xử lý tách giọng bè",
|
455 |
+
"separator_process_backing_success": "Đã tách giọng bè hoàn tất!",
|
456 |
+
"process_original": "Đang xử lý tách âm vang giọng gốc...",
|
457 |
+
"process_original_success": "Đã tách âm vang giọng gốc hoàn tất!",
|
458 |
+
"process_main": "Đang xử lý tách âm vang giọng chính...",
|
459 |
+
"process_main_success": "Đã tách âm vang giọng chính hoàn tất!",
|
460 |
+
"process_backing": "Đang xử lý tách âm vang giọng bè...",
|
461 |
+
"process_backing_success": "Đã tách âm vang giọng bè hoàn tất!",
|
462 |
+
"save_every_epoch": "Lưu mô hình sau: ",
|
463 |
+
"total_e": "Tổng số kỷ nguyên huấn luyện: ",
|
464 |
+
"dorg": "Huấn luyện trước G: {pretrainG} | Huấn luyện trước D: {pretrainD}",
|
465 |
+
"training_f0": "Huấn luyện cao độ",
|
466 |
+
"not_gpu": "Không phát hiện thấy GPU, hoàn nguyên về CPU (không khuyến nghị)",
|
467 |
+
"not_found_checkpoint": "Không tìm thấy tệp điểm đã lưu: {checkpoint_path}",
|
468 |
+
"save_checkpoint": "Đã tải lại điểm đã lưu '{checkpoint_path}' (kỷ nguyên {checkpoint_dict})",
|
469 |
+
"save_model": "Đã lưu mô hình '{checkpoint_path}' (kỷ nguyên {iteration})",
|
470 |
+
"sr_does_not_match": "{sample_rate} Tỉ lệ mẫu không khớp với mục tiêu {sample_rate2} Tỉ lệ mẫu",
|
471 |
+
"spec_error": "Đã xảy ra lỗi khi nhận thông số kỹ thuật từ {spec_filename}: {e}",
|
472 |
+
"time_or_speed_training": "thời gian={current_time} | tốc độ huấn luyện={elapsed_time_str}",
|
473 |
+
"savemodel": "Đã lưu mô hình '{model_dir}' (kỷ nguyên {epoch} và bước {step})",
|
474 |
+
"model_author": "Ghi công mô hình cho {model_author}",
|
475 |
+
"unregistered": "Mô hình không được ghi chép",
|
476 |
+
"not_author": "Mô hình không được ghi chép",
|
477 |
+
"training_author": "Tên chủ mô hình",
|
478 |
+
"training_author_info": "Nếu bạn muốn ghi công mô hình hãy nhập tên của bạn vào đây",
|
479 |
+
"extract_model_error": "Đã xảy ra lỗi khi trích xuất mô hình",
|
480 |
+
"start_training": "Bắt đầu huấn luyện",
|
481 |
+
"import_pretrain": "Đã nạp huấn luyện trước ({dg}) '{pretrain}'",
|
482 |
+
"not_using_pretrain": "Sẽ không có huấn luyện trước ({dg}) được sử dụng",
|
483 |
+
"training_warning": "CẢNH BÁO: Tổn thất tạo ra thấp hơn đã bị vượt quá tổn thất thấp hơn trong kỷ nguyên tiếp theo.",
|
484 |
+
"overtraining_find": "Tập luyện quá sức được phát hiện ở kỷ nguyên {epoch} với mất mát g được làm mịn {smoothed_value_gen} và mất mát d được làm mịn {smoothed_value_disc}",
|
485 |
+
"best_epoch": "Kỷ nguyên mới tốt nhất {epoch} với mất mát g được làm mịn {smoothed_value_gen} và mất mát d được làm mịn {smoothed_value_disc}",
|
486 |
+
"success_training": "Đã đào tạo hoàn tất với {epoch} kỷ nguyên, {global_step} các bước và {loss_gen_all} mất mát gen.",
|
487 |
+
"training_info": "Tổn thất gen thấp nhất: {lowest_value_rounded} ở ký nguyên {lowest_value_epoch}, bước {lowest_value_step}",
|
488 |
+
"model_training_info": "{model_name} | kỷ nguyên={epoch} | bước={global_step} | {epoch_recorder} | giá trị thấp nhất={lowest_value_rounded} (kỷ nguyên {lowest_value_epoch} và bước {lowest_value_step}) | Số kỷ nguyên còn lại để tập luyện quá sức: g/total: {remaining_epochs_gen} d/total: {remaining_epochs_disc} | làm mịn mất mát gen={smoothed_value_gen} | làm mịn mất mát disc={smoothed_value_disc}",
|
489 |
+
"model_training_info_2": "{model_name} | kỷ nguyên={epoch} | bước={global_step} | {epoch_recorder} | giá trị thấp nhất={lowest_value_rounded} (kỷ nguyên {lowest_value_epoch} và bước {lowest_value_step})",
|
490 |
+
"model_training_info_3": "{model_name} | kỷ nguyên={epoch} | bước={global_step} | {epoch_recorder}",
|
491 |
+
"training_error": "Đã xảy ra lỗi khi huấn luyện mô hình:",
|
492 |
+
"separator_info": "Đang khởi tạo với đường dẫn đầu ra: {output_dir}, định dạng đầu ra: {output_format}",
|
493 |
+
"output_dir_is_none": "Thư mục đầu ra không được chỉ định. Sử dụng thư mục làm việc hiện tại.",
|
494 |
+
">0or=1": "Ngưỡng chuẩn hóa phải lớn hơn 0 và nhỏ hơn hoặc bằng 1.",
|
495 |
+
"output_single": "Đã yêu cầu đầu ra một gốc nên chỉ có một tệp đầu ra ({output_single_stem}) sẽ được ghi",
|
496 |
+
"step2": "Bước thứ hai sẽ được đảo ngược bằng cách sử dụng quang phổ thay vì dạng sóng. Điều này có thể cải thiện chất lượng nhưng chậm hơn một chút.",
|
497 |
+
"name_ver": "Phiên bản {name}",
|
498 |
+
"os": "Hệ điều hành",
|
499 |
+
"platform_info": "Hệ thống: {system_info} Tên: {node} Phát hành: {release} Máy: {machine} Vi xử lý: {processor}",
|
500 |
+
"none_ffmpeg": "FFmpeg chưa được cài đặt. Vui lòng cài đặt FFmpeg để sử dụng gói này.",
|
501 |
+
"install_onnx": "Gói {pu} ONNX Runtime được cài đặt cùng với phiên bản",
|
502 |
+
"running_in_cpu": "Không thể cấu hình khả năng tăng tốc phần cứng, chạy ở chế độ CPU",
|
503 |
+
"running_in_cuda": "CUDA có sẵn trong Torch, cài đặt thiết bị Torch thành CUDA",
|
504 |
+
"onnx_have": "ONNXruntime có sẵn {have}, cho phép tăng tốc",
|
505 |
+
"onnx_not_have": "{have} không có sẵn trong ONNXruntime, do đó khả năng tăng tốc sẽ KHÔNG được bật",
|
506 |
+
"python_not_install": "Gói Python: {package_name} chưa được cài đặt",
|
507 |
+
"hash": "Tính hash của tệp mô hình {model_path}",
|
508 |
+
"ioerror": "IOError đang tìm kiếm -10 MB hoặc đọc tệp mô hình để tính toán hàm băm: {e}",
|
509 |
+
"cancel_download": "Tệp đã tồn tại tại {output_path}, bỏ qua quá trình tải xuống",
|
510 |
+
"download_model": "Đang tải tệp từ {url} xuống {output_path} với thời gian chờ 300 giây",
|
511 |
+
"download_error": "Không tải được tệp xuống từ {url}, mã phản hồi: {status_code}",
|
512 |
+
"vip_model": "Mô hình: '{model_friendly_name}' là mô hình cao cấp, được Anjok07 dự định chỉ dành cho những người đăng ký trả phí truy cập.",
|
513 |
+
"vip_print": "Này bạn, nếu bạn chưa đăng ký, vui lòng cân nhắc việc hỗ trợ cho nhà phát triển của UVR, Anjok07 bằng cách đăng ký tại đây: https://patreon.com/uvr",
|
514 |
+
"search_model": "Đang tìm kiếm mô hình {model_filename} trong tập tin các mô hình được hỗ trợ trong nhóm",
|
515 |
+
"load_download_json": "Đã tải danh sách tải xuống mô hình",
|
516 |
+
"single_model": "Đã xác định được tệp mô hình đơn: {model_friendly_name}",
|
517 |
+
"not_found_model": "Không tìm thấy mô hình trong kho lưu trữ UVR, đang cố tải xuống từ kho lưu trữ mô hình phân tách âm thanh...",
|
518 |
+
"single_model_path": "Đường dẫn trả về cho tệp mô hình đơn: {model_path}",
|
519 |
+
"find_model": "Đã tìm thấy tên tệp đầu vào {model_filename} trong mô hình nhiều tệp: {model_friendly_name}",
|
520 |
+
"find_models": "Đã xác định mô hình nhiều tệp: {model_friendly_name}, lặp qua các tệp để tải xuống",
|
521 |
+
"find_path": "Đang cố gắng xác định ĐƯỜNG DẪN tải xuống cho cặp cấu hình",
|
522 |
+
"not_found_model_warehouse": "Không tìm thấy mô hình trong kho lưu trữ UVR, đang cố tải xuống từ kho lưu trữ mô hình phân tách âm thanh...",
|
523 |
+
"yaml_warning": "Tên mô hình bạn đã chỉ định, {model_filename} thực sự là tệp cấu hình mô hình chứ không phải tệp mô hình.",
|
524 |
+
"yaml_warning_2": "Chúng tôi đã tìm thấy một mô hình khớp với tệp cấu hình này: {config_key} nên chúng tôi sẽ sử dụng tệp mô hình đó cho lần chạy này.",
|
525 |
+
"yaml_warning_3": "Để tránh hành vi gây nhầm lẫn/không nhất quán trong tương lai, thay vào đó hãy chỉ định tên tệp mô hình thực tế.",
|
526 |
+
"yaml_debug": "Không tìm thấy tệp cấu hình mô hình YAML trong kho lưu trữ UVR, đang cố tải xuống từ kho lưu trữ mô hình phân tách âm thanh...",
|
527 |
+
"download_model_friendly": "Tất cả các tệp đã tải xuống cho mô hình {model_friendly_name}, trả về đường dẫn ban đầu {model_path}",
|
528 |
+
"not_found_model_2": "Không tìm thấy tệp mô hình {model_filename} trong các tệp mô hình được hỗ trợ",
|
529 |
+
"load_yaml": "Đang tải dữ liệu mô hình từ YAML tại đường dẫn {model_data_yaml_filepath}",
|
530 |
+
"load_yaml_2": "Dữ liệu mô hình được tải từ tệp YAML: {model_data}",
|
531 |
+
"hash_md5": "Tính hash MD5 cho tệp mô hình để xác định các tham số mô hình từ dữ liệu UVR...",
|
532 |
+
"model_hash": "Mô hình {model_path} có hash {model_hash}",
|
533 |
+
"mdx_data": "Đường dẫn dữ liệu mô hình MDX được đặt thành {mdx_model_data_path}",
|
534 |
+
"load_mdx": "Đang tải các tham số mô hình MDX từ tệp dữ liệu mô hình UVR...",
|
535 |
+
"model_not_support": "Tệp mô hình không được hỗ trợ: không thể tìm thấy tham số cho hash MD5 {model_hash} trong tệp dữ liệu mô hình UVR cho vòm MDX.",
|
536 |
+
"uvr_json": "Dữ liệu mô hình được tải từ UVR JSON bằng hàm băm {model_hash}: {model_data}",
|
537 |
+
"loading_model": "Đang tải mô hình {model_filename}...",
|
538 |
+
"download_model_friendly_2": "Đã tải xuống mô hình, tên thân thiện: {model_friendly_name}, Đường dẫn mô hình: {model_path}",
|
539 |
+
"model_type_not_support": "Loại mô hình không được hỗ trợ: {model_type}",
|
540 |
+
"demucs_not_support_python<3.10": "Các mô hình Demucs yêu cầu phiên bản Python 3.10 trở lên.",
|
541 |
+
"import_module": "Nhập mô-đun cho loại mô hình",
|
542 |
+
"initialization": "Khởi tạo lớp phân cách cho loại mô hình",
|
543 |
+
"loading_model_success": "Đang tải mô hình hoàn tất.",
|
544 |
+
"loading_model_duration": "Tải thời lượng mô hình",
|
545 |
+
"starting_separator": "Bắt đầu quá trình tách cho đường dẫn tập tin âm thanh",
|
546 |
+
"normalization": "Ngưỡng chuẩn hóa được đặt thành {normalization_threshold}, dạng sóng sẽ hạ xuống biên độ tối đa này để tránh bị cắt.",
|
547 |
+
"loading_separator_model": "Đang tải xuống mô hình {model_filename}...",
|
548 |
+
"separator_success_3": "Quá trình tách hoàn tất.",
|
549 |
+
"separator_duration": "Thời gian tách",
|
550 |
+
"downloading_model": "Đã tải xuống mô hình, loại: {model_type}, tên thân thiện: {model_friendly_name}, đường dẫn mô hình: {model_path}, dữ liệu mô hình: {model_data_dict_size} mục",
|
551 |
+
"demucs_info": "Thông số Demucs: Kích thước phân đoạn = {segment_size}, Kích hoạt kích thước phân đoạn = {segments_enabled}",
|
552 |
+
"demucs_info_2": "Thông số Demucs: Số lượng dự đoán = {shifts}, Chồng chéo = {overlap}",
|
553 |
+
"start_demucs": "Khởi tạo hoàn tất Demucs Separator",
|
554 |
+
"start_separator": "Bắt đầu quá trình tách...",
|
555 |
+
"prepare_mix": "Chuẩn bị hỗn hợp...",
|
556 |
+
"demix": "Hỗn hợp đã chuẩn bị để khử trộn. Hình dạng: {shape}",
|
557 |
+
"cancel_mix": "Đang tải mô hình để hủy trộn...",
|
558 |
+
"model_review": "Mô hình được tải và đặt ở chế độ đánh giá.",
|
559 |
+
"del_gpu_cache_after_demix": "Đã xóa bộ nhớ đệm mô hình và GPU sau khi hủy trộn.",
|
560 |
+
"process_output_file": "Đang xử lý tập tin đầu ra...",
|
561 |
+
"source_length": "Đang xử lý mảng nguồn, độ dài nguồn là {source_length}",
|
562 |
+
"process_ver": "Đang xử lý nguồn phiên bản...",
|
563 |
+
"set_map": "Đặt bản đồ nguồn thành {part} phần gốc...",
|
564 |
+
"process_all_part": "Xử lý cho tất cả các phần gốc...",
|
565 |
+
"skip_part": "Bỏ qua phần viết gốc {stem_name} vì out_single_stem được đặt thành {output_single_stem}...",
|
566 |
+
"starting_demix_demucs": "Đang bắt đầu quá trình trộn trong demix_demucs...",
|
567 |
+
"model_infer": "Chạy mô hình suy luận...",
|
568 |
+
"name_not_pretrained": "{name} không phải là một mô hình được đào tạo trước hay một túi mô hình.",
|
569 |
+
"invalid_checksum": "Tổng kiểm tra không hợp lệ cho tệp {path}, dự kiến {checksum} nhưng lại nhận được {actual_checksum}",
|
570 |
+
"mdx_info": "Thông số MDX: Kích thước lô = {batch_size}, Kích thước phân đoạn = {segment_size}",
|
571 |
+
"mdx_info_2": "Thông số MDX: Chồng chéo = {overlap}, Hop_length = {hop_length}, Kích hoạt khữ nhiễu = {enable_denoise}",
|
572 |
+
"mdx_info_3": "Thông số MDX",
|
573 |
+
"load_model_onnx": "Đang tải mô hình ONNX để suy luận...",
|
574 |
+
"load_model_onnx_success": "Đã tải mô hình hoàn tất bằng phiên suy luận ONNXruntime.",
|
575 |
+
"onnx_to_pytorch": "Mô hình được chuyển đổi từ onnx sang pytorch do kích thước phân đoạn không khớp với dim_t, quá trình xử lý có thể chậm hơn.",
|
576 |
+
"stft": "STFT nghịch đảo được áp dụng. Trả về kết quả có hình dạng",
|
577 |
+
"no_denoise": "Mô hình chạy trên quang phổ mà không khử nhiễu.",
|
578 |
+
"mix": "Đang chuẩn bị trộn cho tệp âm thanh đầu vào {audio_file_path}...",
|
579 |
+
"normalization_demix": "Chuẩn hóa hỗn hợp trước khi khử trộn...",
|
580 |
+
"mix_success": "Quá trình trộn hoàn tất.",
|
581 |
+
"primary_source": "Bình thường hóa nguồn chính...",
|
582 |
+
"secondary_source": "Sản xuất nguồn thứ cấp: Trộn ở chế độ trộn phù hợp",
|
583 |
+
"invert_using_spec": "Đảo ngược thân thứ cấp bằng cách sử dụng quang phổ khi invert_USE_spec được đặt thành True",
|
584 |
+
"invert_using_spec_2": "Đảo ngược thân thứ cấp bằng cách trừ đi thân cây được chuyển đổi từ hỗn hợp ban đầu được chuyển đổi",
|
585 |
+
"enable_denoise": "Mô hình chạy trên cả phổ âm và dương để khử nhiễu.",
|
586 |
+
"is_match_mix": "is_match_mix: dự đoán phổ thu được trực tiếp từ đầu ra STFT.",
|
587 |
+
"save_secondary_stem_output_path": "Đang lưu phần gốc {stem_name} vào {stem_output_path}...",
|
588 |
+
"starting_model": "Đang khởi tạo cài đặt mô hình...",
|
589 |
+
"input_info": "Thông số đầu vào của mô hình",
|
590 |
+
"model_settings": "Cài đặt mô hình",
|
591 |
+
"initialize_mix": "Đang khởi tạo kết hợp với is_ckpt = {is_ckpt}. Hình dạng trộn ban đầu: {shape}",
|
592 |
+
"!=2": "Dự kiến có tín hiệu âm thanh 2 kênh nhưng lại có {shape} kênh",
|
593 |
+
"process_check": "Xử lý ở chế độ điểm kiểm tra...",
|
594 |
+
"stft_2": "STFT được áp dụng trên hỗn hợp. Hình dạng quang phổ: {shape}",
|
595 |
+
"cache": "Khoảng đệm được tính toán",
|
596 |
+
"shape": "Hình dạng hỗn hợp sau khi đệm: {shape}, Số phần: {num_chunks}",
|
597 |
+
"process_no_check": "Xử lý ở chế độ không có điểm kiểm tra...",
|
598 |
+
"n_sample_or_pad": "Số lượng mẫu: {n_sample}, Đã tính đệm: {pad}",
|
599 |
+
"shape_2": "Hình dạng hỗn hợp sau khi đệm",
|
600 |
+
"process_part": "Đoạn đã xử lý {mix_waves}: Bắt đầu {i}, Kết thúc {ii}",
|
601 |
+
"mix_waves_to_tensor": "Đã chuyển đổi mix_waves thành tensor. Hình dạng tensor: {shape}",
|
602 |
+
"mix_match": "Chế độ trộn Match; áp dụng hệ số bù.",
|
603 |
+
"tar_waves": "Sóng tar_waves. Hình dạng",
|
604 |
+
"normalization_2": "Chuẩn hóa kết quả bằng cách chia kết quả cho số chia.",
|
605 |
+
"mix_wave": "Đang xử lý lô mix_wave",
|
606 |
+
"mix_or_batch": "Trộn phần chia thành từng đợt. Số lượng lô",
|
607 |
+
"demix_is_match_mix": "Bắt đầu quá trình hủy trộn với is_match_mix,",
|
608 |
+
"mix_shape": "Hỗn hợp phần gốc được lưu trữ. Hình dạng",
|
609 |
+
"chunk_size_or_overlap": "Kích thước đoạn để trộn phù hợp: {chunk_size}, Chồng chéo: {overlap}",
|
610 |
+
"chunk_size_or_overlap_standard": "Kích thước phần tiêu chuẩn: {chunk_size}, Chồng chéo: {overlap}",
|
611 |
+
"calc_size": "Kích thước được tạo được tính toán",
|
612 |
+
"window": "Cửa sổ được áp dụng cho đoạn này.",
|
613 |
+
"process_part_2": "Đang xử lý đoạn {total}/{total_chunks}: Bắt đầu {start}, Kết thúc {end}",
|
614 |
+
"all_process_part": "Tổng số phần cần xử lý",
|
615 |
+
"step_or_overlap": "Kích thước bước để xử lý các phần: {step} khi chồng chéo được đặt thành {overlap}.",
|
616 |
+
"mix_cache": "Hỗn hợp được chuẩn bị với lớp đệm. Hình dạng hỗn hợp",
|
617 |
+
"dims": "Không thể sử dụng mã hóa vị trí sin/cos với thứ nguyên lẻ (có dim={dims})",
|
618 |
+
"activation": "kích hoạt phải là relu/gelu, không phải {activation}",
|
619 |
+
"length_or_training_length": "Độ dài cho trước {length} dài hơn thời lượng huấn luyện {training_length}",
|
620 |
+
"type_not_valid": "Loại không hợp lệ cho",
|
621 |
+
"del_parameter": "Bỏ tham số không tồn tại ",
|
622 |
+
"info": "Các thông số phổ biến: Tên mô hình = {model_name}, Đường dẫn mô hình = {model_path}",
|
623 |
+
"info_2": "Các thông số phổ biến: Đường dẫn đầu ra = {output_dir}, Định dạng đầu ra = {output_format}",
|
624 |
+
"info_3": "Các thông số phổ biến: ngưỡng chuẩn hóa = {normalization_threshold}",
|
625 |
+
"info_4": "Các thông số phổ biến: Kích hoạt khữ nhiễu = {enable_denoise}, Đầu ra một phần = {output_single_stem}",
|
626 |
+
"info_5": "Các thông số phổ biến: Đảo ngược bằng cách sử dụng thông số kỹ thuật = {invert_using_spec}, tỷ lệ mẫu = {sample_rate}",
|
627 |
+
"info_6": "Các thông số phổ biến: Tên phần gốc chính = {primary_stem_name}, Tên phần gốc phụ = {secondary_stem_name}",
|
628 |
+
"info_7": "Các thông số phổ biến: Là Karaoke = {is_karaoke}, là mô hình bv = {is_bv_model}, tái cân bằng mô hình bv = {bv_model_rebalance}",
|
629 |
+
"success_process": "Đang hoàn tất quá trình xử lý phần gốc {stem_name} và ghi âm thanh...",
|
630 |
+
"load_audio": "Đang tải âm thanh từ tập tin",
|
631 |
+
"load_audio_success": "Đã tải âm thanh. Tốc độ mẫu: {sr}, Hình dạng âm thanh: {shape}",
|
632 |
+
"convert_mix": "Chuyển đổi mảng hỗn hợp được cung cấp.",
|
633 |
+
"convert_shape": "Hình dạng hỗn hợp chuyển đổi: {shape}",
|
634 |
+
"audio_not_valid": "Tệp âm thanh {audio_path} trống hoặc không hợp lệ",
|
635 |
+
"audio_valid": "Tệp âm thanh hợp lệ và chứa dữ liệu.",
|
636 |
+
"mix_single": "Hỗn hợp là đơn sắc. Chuyển đổi sang âm thanh nổi.",
|
637 |
+
"convert_mix_audio": "Đã chuyển đổi thành bản trộn âm thanh nổi.",
|
638 |
+
"mix_success_2": "Công tác chuẩn bị hỗn hợp đã hoàn tất.",
|
639 |
+
"duration": "Thời lượng âm thanh là {duration_hours} giờ ({duration_seconds} giây).",
|
640 |
+
"write": "Sử dụng {name} để viết.",
|
641 |
+
"write_audio": "Đang nhập {name} bằng đường dẫn gốc:",
|
642 |
+
"original_not_valid": "Cảnh báo: mảng nguồn gốc gần như im lặng hoặc trống.",
|
643 |
+
"shape_audio": "Hình dạng dữ liệu âm thanh trước khi xử lý",
|
644 |
+
"convert_data": "Kiểu dữ liệu trước khi chuyển đổi",
|
645 |
+
"original_source_to_int16": "Đã chuyển đổi original_source thành int16.",
|
646 |
+
"shape_audio_2": "Hình dạng dữ liệu âm thanh xen kẽ",
|
647 |
+
"create_audiosegment": "Đã tạo AudioSegment hoàn tất.",
|
648 |
+
"create_audiosegment_error": "Lỗi cụ thể khi tạo AudioSegment",
|
649 |
+
"export_error": "Lỗi xuất file âm thanh",
|
650 |
+
"export_success": "Đã xuất hoàn tất tệp âm thanh sang",
|
651 |
+
"clean": "Chạy thu gom rác...",
|
652 |
+
"clean_cache": "Xóa bộ nhớ đệm {name}...",
|
653 |
+
"del_path": "Xóa đường dẫn, nguồn và gốc của tệp âm thanh đầu vào...",
|
654 |
+
"not_success": "Quá trình đăng không hoàn tất: ",
|
655 |
+
"resample_error": "Lỗi trong quá trình lấy mẫu lại",
|
656 |
+
"shapes": "Hình dạng",
|
657 |
+
"wav_resolution": "Loại độ phân giải",
|
658 |
+
"warnings": "Cảnh báo: Đã phát hiện các giá trị cực kỳ hung hãn",
|
659 |
+
"warnings_2": "Cảnh báo: Đã phát hiện NaN hoặc giá trị vô hạn trong đầu vào sóng. Hình dạng",
|
660 |
+
"process_file": "Đang xử lý tập tin... \n",
|
661 |
+
"save_instruments": "Lưu bản nhạc ngược...",
|
662 |
+
"assert": "Các tệp âm thanh phải có hình dạng giống nhau - Mix: {mixshape}, Inst: {instrumentalshape}",
|
663 |
+
"rubberband": "Không thể thực Rubberband. Vui lòng xác minh rằng Rubberband-cli đã được cài đặt.",
|
664 |
+
"rate": "Tỉ lệ phải hoàn toàn tích cực",
|
665 |
+
"gdown_error": "Không thể truy xuất liên kết công khai của tệp. Bạn có thể cần phải thay đổi quyền thành bất kỳ ai có liên kết hoặc đã có nhiều quyền truy cập.",
|
666 |
+
"to": "Đến:",
|
667 |
+
"gdown_value_error": "Phải chỉ định đường dẫn hoặc id",
|
668 |
+
"missing_url": "Thiếu đường dẫn",
|
669 |
+
"mac_not_match": "MAC không khớp",
|
670 |
+
"file_not_access": "Tệp tin không thể truy cập",
|
671 |
+
"int_resp==-3": "Yêu cầu không hoàn tất, đang thử lại",
|
672 |
+
"search_separate": "Tìm bản tách...",
|
673 |
+
"found_choice": "Tìm thấy {choice}",
|
674 |
+
"separator==0": "Không tìm thấy bản tách nào!",
|
675 |
+
"select_separate": "Chọn bản tách",
|
676 |
+
"start_app": "Khởi động giao diện...",
|
677 |
+
"provide_audio": "Nhập đường dẫn đến tệp âm thanh",
|
678 |
+
"set_torch_mps": "Cài đặt thiết bị Torch thành MPS",
|
679 |
+
"googletts": "Chuyển ��ổi văn bản bằng google",
|
680 |
+
"pitch_info_2": "Cao độ giọng nói của bộ chuyển đổi văn bản",
|
681 |
+
"waveform": "Dạng sóng phải có hình dạng (# khung, # kênh)",
|
682 |
+
"freq_mask_smooth_hz": "freq_mask_smooth_hz cần ít nhất là {hz}Hz",
|
683 |
+
"time_mask_smooth_ms": "time_mask_smooth_ms cần ít nhất là {ms}ms",
|
684 |
+
"x": "x phải lớn hơn",
|
685 |
+
"xn": "xn phải lớn hơn",
|
686 |
+
"not_found_pid": "Không thấy tiến trình nào!",
|
687 |
+
"end_pid": "Đã kết thúc tiến trình!",
|
688 |
+
"clean_audios": "Bắt đầu dọn dẹp âm thanh...",
|
689 |
+
"clean_audios_success": "Hoàn tất dọn dẹp tệp âm thanh!",
|
690 |
+
"clean_separate": "Bắt đầu dọn dẹp mô hình tách nhạc...",
|
691 |
+
"clean_separate_success": "Hoàn tất dọn dẹp mô hình tách nhạc!",
|
692 |
+
"clean_model": "Bắt đầu dọn dẹp mô hình...",
|
693 |
+
"clean_model_success": "Hoàn tất dọn dẹp tệp mô hình!",
|
694 |
+
"clean_index": "Bắt đầu dọn dẹp chỉ mục...",
|
695 |
+
"clean_index_success": "Hoàn tất dọn dẹp tệp chỉ mục!",
|
696 |
+
"clean_pretrain": "Bắt đầu dọn mô hình huấn luyện...",
|
697 |
+
"clean_pretrain_success": "Dọn tệp hoàn tất!",
|
698 |
+
"clean_all_audios": "Bắt đầu dọn tất cả tệp âm thanh...",
|
699 |
+
"clean_all_audios_success": "Hoàn tất dọn tất cả tệp âm thanh!",
|
700 |
+
"not_found_separate_model": "Không tìm thấy tệp mô hình tách nhạc nào!",
|
701 |
+
"clean_all_separate_model": "Bắt đầu dọn tất cả tệp mô hình tách nhạc...",
|
702 |
+
"clean_all_separate_model_success": "Hoàn tất dọn tất cả tệp mô hình tách nhạc!",
|
703 |
+
"clean_all_models_success": "Hoàn tất dọn dẹp tất cả các tệp mô hình",
|
704 |
+
"not_found_pretrained": "Không tìm thấy tệp mô hình huấn luyện trước nào!",
|
705 |
+
"clean_all_pretrained": "Bắt đầu dọn tất cả tệp mô hình huấn luyện trước...",
|
706 |
+
"clean_all_pretrained_success": "Hoàn tất dọn tất cả tệp mô hình huấn luyện trước!",
|
707 |
+
"not_found_log": "Không tìm thấy tệp nhật ký nào!",
|
708 |
+
"clean_all_log": "Bắt đầu dọn dẹp tất cả tệp nhật ký...",
|
709 |
+
"clean_all_log_success": "Hoàn tất dọn dẹp nhật ký!",
|
710 |
+
"not_found_predictors": "Không tìm thấy tệp mô hình dự đoán nào!",
|
711 |
+
"clean_all_predictors": "Bắt đầu dọn dẹp tất cả tệp mô hình dự đoán...",
|
712 |
+
"clean_all_predictors_success": "Hoàn tất dọn dẹp tất cả tệp mô hình dự đoán!",
|
713 |
+
"not_found_embedders": "Không tìm thấy tệp mô hình nhúng nào!",
|
714 |
+
"clean_all_embedders": "Bắt đầu dọn tất cả tệp mô hình nhúng...",
|
715 |
+
"clean_all_embedders_success": "Hoàn tất dọn dẹp tất cả tệp mô hình nhúng!",
|
716 |
+
"provide_folder": "Vui lòng cung cấp thư mục hợp lệ!",
|
717 |
+
"empty_folder": "Thư mục dữ liệu trống!",
|
718 |
+
"clean_dataset": "Bắt đầu dọn dẹp thư mục dữ liệu...",
|
719 |
+
"clean_dataset_success": "Hoàn tất dọn dẹp thư mục dữ liệu!",
|
720 |
+
"vocoder": "Bộ mã hóa",
|
721 |
+
"vocoder_info": "Bộ mã hóa giọng nói dùng để phân tích và tổng hợp tín hiệu giọng nói của con người để chuyển đổi giọng nói.",
|
722 |
+
"code_error": "Lỗi: Nhận mã trạng thái",
|
723 |
+
"json_error": "Lỗi: Không thể phân tích từ phản hồi.",
|
724 |
+
"requests_error": "Yêu cầu thất bại: {e}",
|
725 |
+
"memory_efficient_training": "Sử dụng hiệu quả bộ nhớ",
|
726 |
+
"not_use_pretrain_error_download": "Sẽ không dùng huấn luyện trước vì không có mô hình",
|
727 |
+
"start_clean_model": "Bắt đầu dọn tất cả mô hình...",
|
728 |
+
"provide_file_settings": "Vui lòng cung cấp tệp cài đặt trước!",
|
729 |
+
"load_presets": "Đã tải tệp cài đặt trước {presets}",
|
730 |
+
"provide_filename_settings": "Vui lòng cung cấp tên tệp cài đặt trước!",
|
731 |
+
"choose1": "Vui lòng chọn 1 để xuất!",
|
732 |
+
"export_settings": "Đã xuất tệp cài đặt trước {name}",
|
733 |
+
"use_presets": "Sử dụng tệp cài đặt trước",
|
734 |
+
"file_preset": "Tệp cài đặt trước",
|
735 |
+
"load_file": "Tải tệp",
|
736 |
+
"export_file": "Xuất tệp cài đặt trước",
|
737 |
+
"save_clean": "Lưu làm sạch",
|
738 |
+
"save_autotune": "Lưu tự điều chỉnh",
|
739 |
+
"save_pitch": "Lưu cao độ",
|
740 |
+
"save_index_2": "Lưu ảnh hưởng chỉ mục",
|
741 |
+
"save_resample": "Lưu lấy mẫu lại",
|
742 |
+
"save_filter": "Lưu trung vị",
|
743 |
+
"save_envelope": "Lưu đường bao âm",
|
744 |
+
"save_protect": "Lưu bảo vệ âm",
|
745 |
+
"save_split": "Lưu cắt âm",
|
746 |
+
"filename_to_save": "Tên khi lưu tệp",
|
747 |
+
"upload_presets": "Tải lên tệp cài đặt",
|
748 |
+
"stop": "Dừng tiến trình",
|
749 |
+
"stop_separate": "Dừng Tách Nhạc",
|
750 |
+
"stop_convert": "Dừng Chuyển Đổi",
|
751 |
+
"stop_create_dataset": "Dừng Tạo Dữ Liệu",
|
752 |
+
"stop_training": "Dừng Huấn Luyện",
|
753 |
+
"stop_extract": "Dừng Xử Lí Dữ Liệu",
|
754 |
+
"stop_preprocess": "Dừng Trích Xuất Dữ Liệu",
|
755 |
+
"cleaner": "Dọn Dẹp",
|
756 |
+
"clean_audio": "Dọn dẹp âm thanh",
|
757 |
+
"clean_all": "Dọn tất cả",
|
758 |
+
"clean_file": "Dọn tệp",
|
759 |
+
"clean_models": "Dọn tệp mô hình",
|
760 |
+
"clean_pretrained": "Dọn tệp mô hình huấn luyện trước",
|
761 |
+
"clean_separated": "Dọn tệp mô hình tách nhạc",
|
762 |
+
"clean_presets": "Dọn dẹp tệp cài đặt trước",
|
763 |
+
"clean_datasets": "Dọn dẹp thư mục dữ liệu huấn luyện",
|
764 |
+
"clean_dataset_folder": "Dọn thư mục dữ liệu",
|
765 |
+
"clean_log": "Dọn dẹp tệp nhật ký",
|
766 |
+
"clean_predictors": "Dọn mô hình dự đoán",
|
767 |
+
"clean_embed": "Dọn mô hình nhúng",
|
768 |
+
"clean_presets_2": "Bắt đầu dọn dẹp tệp cài đặt trước...",
|
769 |
+
"clean_presets_success": "Hoàn tất dọn dẹp tệp!",
|
770 |
+
"not_found_presets": "Không tìm thấy tệp cài đặt sẳn nào trong thư mục!",
|
771 |
+
"clean_all_presets": "Bắt đầu dọn dẹp tất cả tệp cài đặt sẳn...",
|
772 |
+
"clean_all_presets_success": "Hoàn tất dọn dẹp tất cả tệp cài đặt sẳn!",
|
773 |
+
"port": "Cổng {port} không thể dùng! Giảm cổng xuống một...",
|
774 |
+
"empty_json": "{file}: Bị lỗi hoặc trống",
|
775 |
+
"thank": "Cảm ơn bạn đã báo cáo lỗi và cũng xin lỗi bạn vì sự bất tiện do lỗi gây ra này!",
|
776 |
+
"error_read_log": "Đã xảy ra lỗi khi đọc các tệp nhật ký!",
|
777 |
+
"error_send": "Đã xảy ra lỗi khi gửi báo cáo! Hãy liên hệ tôi qua Discord: pham_huynh_anh!",
|
778 |
+
"report_bugs": "Báo Cáo Lỗi",
|
779 |
+
"agree_log": "Đồng ý cung cấp tất cả tệp nhật ký",
|
780 |
+
"error_info": "Mô tả lỗi",
|
781 |
+
"error_info_2": "Cung cấp thêm thông tin về lỗi",
|
782 |
+
"report_bug_info": "Báo cáo các lỗi xảy ra khi sử dụng chương trình",
|
783 |
+
"sr_info": "LƯU Ý: MỘT SỐ ĐỊNH DẠNG KHÔNG HỖ TRỢ TRÊN 48000",
|
784 |
+
"report_info": "Nếu được bạn hãy đồng ý cung cấp các tệp nhật ký để hỗ trợ quá trình sửa lỗi\n\nNếu không cung cấp các tệp nhật ký bạn hãy mô tả chi tiết lỗi, lỗi xảy ra khi nào ở đâu\n\nNếu hệ thống báo cáo này bị lỗi nốt thì bạn có thể liên hệ qua [ISSUE]({github}) hoặc discord: `pham_huynh_anh`",
|
785 |
+
"default_setting": "Đã xảy ra lỗi khi sử dụng tách, đặt tất cả cài đặt về mặc định...",
|
786 |
+
"dataset_folder1": "Vui lòng nhập tên thư mục dữ liệu",
|
787 |
+
"checkpointing_err": "Các tham số của mô hình đào tạo trước như tốc độ mẫu hoặc kiến trúc không khớp với mô hình đã chọn."
|
788 |
+
}
|
assets/logs/mute/f0/mute.wav.npy
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9b9acf9ab7facdb032e1d687fe35182670b0b94566c4b209ae48c239d19956a6
|
3 |
+
size 1332
|
assets/logs/mute/f0_voiced/mute.wav.npy
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:30792849c8e72d67e6691754077f2888b101cb741e9c7f193c91dd9692870c87
|
3 |
+
size 2536
|
assets/logs/mute/sliced_audios/mute32000.wav
ADDED
Binary file (192 kB). View file
|
|
assets/logs/mute/sliced_audios/mute40000.wav
ADDED
Binary file (240 kB). View file
|
|
assets/logs/mute/sliced_audios/mute441000.wav
ADDED
Binary file (529 kB). View file
|
|
assets/logs/mute/sliced_audios/mute48000.wav
ADDED
Binary file (288 kB). View file
|
|
assets/logs/mute/sliced_audios_16k/mute.wav
ADDED
Binary file (96.1 kB). View file
|
|
assets/logs/mute/v1_extracted/mute.npy
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:64d5abbac078e19a3f649c0d78a02cb33a71407ded3ddf2db78e6b803d0c0126
|
3 |
+
size 152704
|
assets/logs/mute/v2_extracted/mute.npy
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:16ef62b957887ac9f0913aa5158f18983afff1ef5a3e4c5fd067ac20fc380d54
|
3 |
+
size 457856
|
assets/models/embedders/.gitattributes
ADDED
File without changes
|
assets/models/predictors/.gitattributes
ADDED
File without changes
|
assets/models/pretrained_custom/.gitattributes
ADDED
File without changes
|
assets/models/pretrained_v1/.gitattributes
ADDED
File without changes
|
assets/models/pretrained_v2/.gitattributes
ADDED
File without changes
|
assets/models/uvr5/.gitattributes
ADDED
File without changes
|
assets/presets/.gitattributes
ADDED
File without changes
|
assets/weights/.gitattributes
ADDED
File without changes
|
audios/.gitattributes
ADDED
File without changes
|
dataset/.gitattributes
ADDED
File without changes
|
main/app/app.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
main/app/tensorboard.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
import json
|
4 |
+
import logging
|
5 |
+
import webbrowser
|
6 |
+
|
7 |
+
from tensorboard import program
|
8 |
+
|
9 |
+
sys.path.append(os.getcwd())
|
10 |
+
|
11 |
+
from main.configs.config import Config
|
12 |
+
translations = Config().translations
|
13 |
+
|
14 |
+
with open(os.path.join("main", "configs", "config.json"), "r") as f:
|
15 |
+
configs = json.load(f)
|
16 |
+
|
17 |
+
def launch_tensorboard():
|
18 |
+
for l in ["root", "tensorboard"]:
|
19 |
+
logging.getLogger(l).setLevel(logging.ERROR)
|
20 |
+
|
21 |
+
tb = program.TensorBoard()
|
22 |
+
tb.configure(argv=[None, "--logdir", "assets/logs", f"--port={configs["tensorboard_port"]}"])
|
23 |
+
url = tb.launch()
|
24 |
+
|
25 |
+
print(f"{translations['tensorboard_url']}: {url}")
|
26 |
+
if "--open" in sys.argv: webbrowser.open(url)
|
27 |
+
|
28 |
+
return f"{translations['tensorboard_url']}: {url}"
|
29 |
+
|
30 |
+
if __name__ == "__main__": launch_tensorboard()
|
main/configs/config.json
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"language": "vi-VN",
|
3 |
+
"support_language": ["en-US", "vi-VN"],
|
4 |
+
"theme": "NoCrypt/miku",
|
5 |
+
"themes": ["NoCrypt/miku", "gstaff/xkcd", "JohnSmith9982/small_and_pretty", "ParityError/Interstellar", "earneleh/paris", "shivi/calm_seafoam", "Hev832/Applio", "YTheme/Minecraft", "gstaff/sketch", "SebastianBravo/simci_css", "allenai/gradio-theme", "Nymbo/Nymbo_Theme_5", "lone17/kotaemon", "Zarkel/IBM_Carbon_Theme", "SherlockRamos/Feliz", "freddyaboulton/dracula_revamped", "freddyaboulton/bad-theme-space", "gradio/dracula_revamped", "abidlabs/dracula_revamped", "gradio/dracula_test", "gradio/seafoam", "gradio/glass", "gradio/monochrome", "gradio/soft", "gradio/default", "gradio/base", "abidlabs/pakistan", "dawood/microsoft_windows", "ysharma/steampunk", "ysharma/huggingface", "abidlabs/Lime", "freddyaboulton/this-theme-does-not-exist-2", "aliabid94/new-theme", "aliabid94/test2", "aliabid94/test3", "aliabid94/test4", "abidlabs/banana", "freddyaboulton/test-blue", "gstaff/whiteboard", "ysharma/llamas", "abidlabs/font-test", "YenLai/Superhuman", "bethecloud/storj_theme", "sudeepshouche/minimalist", "knotdgaf/gradiotest", "ParityError/Anime", "Ajaxon6255/Emerald_Isle", "ParityError/LimeFace", "finlaymacklon/smooth_slate", "finlaymacklon/boxy_violet", "derekzen/stardust", "EveryPizza/Cartoony-Gradio-Theme", "Ifeanyi/Cyanister", "Tshackelton/IBMPlex-DenseReadable", "snehilsanyal/scikit-learn", "Himhimhim/xkcd", "nota-ai/theme", "rawrsor1/Everforest", "rottenlittlecreature/Moon_Goblin", "abidlabs/test-yellow", "abidlabs/test-yellow3", "idspicQstitho/dracula_revamped", "kfahn/AnimalPose", "HaleyCH/HaleyCH_Theme", "simulKitke/dracula_test", "braintacles/CrimsonNight", "wentaohe/whiteboardv2", "reilnuud/polite", "remilia/Ghostly", "Franklisi/darkmode", "coding-alt/soft", "xiaobaiyuan/theme_land", "step-3-profit/Midnight-Deep", "xiaobaiyuan/theme_demo", "Taithrah/Minimal", "Insuz/SimpleIndigo", "zkunn/Alipay_Gradio_theme", "Insuz/Mocha", "xiaobaiyuan/theme_brief", "Ama434/434-base-Barlow", "Ama434/def_barlow", "Ama434/neutral-barlow", "dawood/dracula_test", "nuttea/Softblue", "BlueDancer/Alien_Diffusion", "naughtondale/monochrome", "Dagfinn1962/standard", "default"],
|
6 |
+
"mdx_model": ["Main_340", "Main_390", "Main_406", "Main_427", "Main_438", "Inst_full_292", "Inst_HQ_1", "Inst_HQ_2", "Inst_HQ_3", "Inst_HQ_4", "Inst_HQ_5", "Kim_Vocal_1", "Kim_Vocal_2", "Kim_Inst", "Inst_187_beta", "Inst_82_beta", "Inst_90_beta", "Voc_FT", "Crowd_HQ", "Inst_1", "Inst_2", "Inst_3", "MDXNET_1_9703", "MDXNET_2_9682", "MDXNET_3_9662", "Inst_Main", "MDXNET_Main", "MDXNET_9482"],
|
7 |
+
"demucs_model": ["HT-Normal", "HT-Tuned", "HD_MMI", "HT_6S"],
|
8 |
+
"edge_tts": ["af-ZA-AdriNeural", "af-ZA-WillemNeural", "sq-AL-AnilaNeural", "sq-AL-IlirNeural", "am-ET-AmehaNeural", "am-ET-MekdesNeural", "ar-DZ-AminaNeural", "ar-DZ-IsmaelNeural", "ar-BH-AliNeural", "ar-BH-LailaNeural", "ar-EG-SalmaNeural", "ar-EG-ShakirNeural", "ar-IQ-BasselNeural", "ar-IQ-RanaNeural", "ar-JO-SanaNeural", "ar-JO-TaimNeural", "ar-KW-FahedNeural", "ar-KW-NouraNeural", "ar-LB-LaylaNeural", "ar-LB-RamiNeural", "ar-LY-ImanNeural", "ar-LY-OmarNeural", "ar-MA-JamalNeural", "ar-MA-MounaNeural", "ar-OM-AbdullahNeural", "ar-OM-AyshaNeural", "ar-QA-AmalNeural", "ar-QA-MoazNeural", "ar-SA-HamedNeural", "ar-SA-ZariyahNeural", "ar-SY-AmanyNeural", "ar-SY-LaithNeural", "ar-TN-HediNeural", "ar-TN-ReemNeural", "ar-AE-FatimaNeural", "ar-AE-HamdanNeural", "ar-YE-MaryamNeural", "ar-YE-SalehNeural", "az-AZ-BabekNeural", "az-AZ-BanuNeural", "bn-BD-NabanitaNeural", "bn-BD-PradeepNeural", "bn-IN-BashkarNeural", "bn-IN-TanishaaNeural", "bs-BA-GoranNeural", "bs-BA-VesnaNeural", "bg-BG-BorislavNeural", "bg-BG-KalinaNeural", "my-MM-NilarNeural", "my-MM-ThihaNeural", "ca-ES-EnricNeural", "ca-ES-JoanaNeural", "zh-HK-HiuGaaiNeural", "zh-HK-HiuMaanNeural", "zh-HK-WanLungNeural", "zh-CN-XiaoxiaoNeural", "zh-CN-XiaoyiNeural", "zh-CN-YunjianNeural", "zh-CN-YunxiNeural", "zh-CN-YunxiaNeural", "zh-CN-YunyangNeural", "zh-CN-liaoning-XiaobeiNeural", "zh-TW-HsiaoChenNeural", "zh-TW-YunJheNeural", "zh-TW-HsiaoYuNeural", "zh-CN-shaanxi-XiaoniNeural", "hr-HR-GabrijelaNeural", "hr-HR-SreckoNeural", "cs-CZ-AntoninNeural", "cs-CZ-VlastaNeural", "da-DK-ChristelNeural", "da-DK-JeppeNeural", "nl-BE-ArnaudNeural", "nl-BE-DenaNeural", "nl-NL-ColetteNeural", "nl-NL-FennaNeural", "nl-NL-MaartenNeural", "en-AU-NatashaNeural", "en-AU-WilliamNeural", "en-CA-ClaraNeural", "en-CA-LiamNeural", "en-HK-SamNeural", "en-HK-YanNeural", "en-IN-NeerjaExpressiveNeural", "en-IN-NeerjaNeural", "en-IN-PrabhatNeural", "en-IE-ConnorNeural", "en-IE-EmilyNeural", "en-KE-AsiliaNeural", "en-KE-ChilembaNeural", "en-NZ-MitchellNeural", "en-NZ-MollyNeural", "en-NG-AbeoNeural", "en-NG-EzinneNeural", "en-PH-JamesNeural", "en-PH-RosaNeural", "en-SG-LunaNeural", "en-SG-WayneNeural", "en-ZA-LeahNeural", "en-ZA-LukeNeural", "en-TZ-ElimuNeural", "en-TZ-ImaniNeural", "en-GB-LibbyNeural", "en-GB-MaisieNeural", "en-GB-RyanNeural", "en-GB-SoniaNeural", "en-GB-ThomasNeural", "en-US-AvaMultilingualNeural", "en-US-AndrewMultilingualNeural", "en-US-EmmaMultilingualNeural", "en-US-BrianMultilingualNeural", "en-US-AvaNeural", "en-US-AndrewNeural", "en-US-EmmaNeural", "en-US-BrianNeural", "en-US-AnaNeural", "en-US-AriaNeural", "en-US-ChristopherNeural", "en-US-EricNeural", "en-US-GuyNeural", "en-US-JennyNeural", "en-US-MichelleNeural", "en-US-RogerNeural", "en-US-SteffanNeural", "et-EE-AnuNeural", "et-EE-KertNeural", "fil-PH-AngeloNeural", "fil-PH-BlessicaNeural", "fi-FI-HarriNeural", "fi-FI-NooraNeural", "fr-BE-CharlineNeural", "fr-BE-GerardNeural", "fr-CA-ThierryNeural", "fr-CA-AntoineNeural", "fr-CA-JeanNeural", "fr-CA-SylvieNeural", "fr-FR-VivienneMultilingualNeural", "fr-FR-RemyMultilingualNeural", "fr-FR-DeniseNeural", "fr-FR-EloiseNeural", "fr-FR-HenriNeural", "fr-CH-ArianeNeural", "fr-CH-FabriceNeural", "gl-ES-RoiNeural", "gl-ES-SabelaNeural", "ka-GE-EkaNeural", "ka-GE-GiorgiNeural", "de-AT-IngridNeural", "de-AT-JonasNeural", "de-DE-SeraphinaMultilingualNeural", "de-DE-FlorianMultilingualNeural", "de-DE-AmalaNeural", "de-DE-ConradNeural", "de-DE-KatjaNeural", "de-DE-KillianNeural", "de-CH-JanNeural", "de-CH-LeniNeural", "el-GR-AthinaNeural", "el-GR-NestorasNeural", "gu-IN-DhwaniNeural", "gu-IN-NiranjanNeural", "he-IL-AvriNeural", "he-IL-HilaNeural", "hi-IN-MadhurNeural", "hi-IN-SwaraNeural", "hu-HU-NoemiNeural", "hu-HU-TamasNeural", "is-IS-GudrunNeural", "is-IS-GunnarNeural", "id-ID-ArdiNeural", "id-ID-GadisNeural", "ga-IE-ColmNeural", "ga-IE-OrlaNeural", "it-IT-GiuseppeNeural", "it-IT-DiegoNeural", "it-IT-ElsaNeural", "it-IT-IsabellaNeural", "ja-JP-KeitaNeural", "ja-JP-NanamiNeural", "jv-ID-DimasNeural", "jv-ID-SitiNeural", "kn-IN-GaganNeural", "kn-IN-SapnaNeural", "kk-KZ-AigulNeural", "kk-KZ-DauletNeural", "km-KH-PisethNeural", "km-KH-SreymomNeural", "ko-KR-HyunsuNeural", "ko-KR-InJoonNeural", "ko-KR-SunHiNeural", "lo-LA-ChanthavongNeural", "lo-LA-KeomanyNeural", "lv-LV-EveritaNeural", "lv-LV-NilsNeural", "lt-LT-LeonasNeural", "lt-LT-OnaNeural", "mk-MK-AleksandarNeural", "mk-MK-MarijaNeural", "ms-MY-OsmanNeural", "ms-MY-YasminNeural", "ml-IN-MidhunNeural", "ml-IN-SobhanaNeural", "mt-MT-GraceNeural", "mt-MT-JosephNeural", "mr-IN-AarohiNeural", "mr-IN-ManoharNeural", "mn-MN-BataaNeural", "mn-MN-YesuiNeural", "ne-NP-HemkalaNeural", "ne-NP-SagarNeural", "nb-NO-FinnNeural", "nb-NO-PernilleNeural", "ps-AF-GulNawazNeural", "ps-AF-LatifaNeural", "fa-IR-DilaraNeural", "fa-IR-FaridNeural", "pl-PL-MarekNeural", "pl-PL-ZofiaNeural", "pt-BR-ThalitaNeural", "pt-BR-AntonioNeural", "pt-BR-FranciscaNeural", "pt-PT-DuarteNeural", "pt-PT-RaquelNeural", "ro-RO-AlinaNeural", "ro-RO-EmilNeural", "ru-RU-DmitryNeural", "ru-RU-SvetlanaNeural", "sr-RS-NicholasNeural", "sr-RS-SophieNeural", "si-LK-SameeraNeural", "si-LK-ThiliniNeural", "sk-SK-LukasNeural", "sk-SK-ViktoriaNeural", "sl-SI-PetraNeural", "sl-SI-RokNeural", "so-SO-MuuseNeural", "so-SO-UbaxNeural", "es-AR-ElenaNeural", "es-AR-TomasNeural", "es-BO-MarceloNeural", "es-BO-SofiaNeural", "es-CL-CatalinaNeural", "es-CL-LorenzoNeural", "es-ES-XimenaNeural", "es-CO-GonzaloNeural", "es-CO-SalomeNeural", "es-CR-JuanNeural", "es-CR-MariaNeural", "es-CU-BelkysNeural", "es-CU-ManuelNeural", "es-DO-EmilioNeural", "es-DO-RamonaNeural", "es-EC-AndreaNeural", "es-EC-LuisNeural", "es-SV-LorenaNeural", "es-SV-RodrigoNeural", "es-GQ-JavierNeural", "es-GQ-TeresaNeural", "es-GT-AndresNeural", "es-GT-MartaNeural", "es-HN-CarlosNeural", "es-HN-KarlaNeural", "es-MX-DaliaNeural", "es-MX-JorgeNeural", "es-NI-FedericoNeural", "es-NI-YolandaNeural", "es-PA-MargaritaNeural", "es-PA-RobertoNeural", "es-PY-MarioNeural", "es-PY-TaniaNeural", "es-PE-AlexNeural", "es-PE-CamilaNeural", "es-PR-KarinaNeural", "es-PR-VictorNeural", "es-ES-AlvaroNeural", "es-ES-ElviraNeural", "es-US-AlonsoNeural", "es-US-PalomaNeural", "es-UY-MateoNeural", "es-UY-ValentinaNeural", "es-VE-PaolaNeural", "es-VE-SebastianNeural", "su-ID-JajangNeural", "su-ID-TutiNeural", "sw-KE-RafikiNeural", "sw-KE-ZuriNeural", "sw-TZ-DaudiNeural", "sw-TZ-RehemaNeural", "sv-SE-MattiasNeural", "sv-SE-SofieNeural", "ta-IN-PallaviNeural", "ta-IN-ValluvarNeural", "ta-MY-KaniNeural", "ta-MY-SuryaNeural", "ta-SG-AnbuNeural", "ta-SG-VenbaNeural", "ta-LK-KumarNeural", "ta-LK-SaranyaNeural", "te-IN-MohanNeural", "te-IN-ShrutiNeural", "th-TH-NiwatNeural", "th-TH-PremwadeeNeural", "tr-TR-AhmetNeural", "tr-TR-EmelNeural", "uk-UA-OstapNeural", "uk-UA-PolinaNeural", "ur-IN-GulNeural", "ur-IN-SalmanNeural", "ur-PK-AsadNeural", "ur-PK-UzmaNeural", "uz-UZ-MadinaNeural", "uz-UZ-SardorNeural", "vi-VN-HoaiMyNeural", "vi-VN-NamMinhNeural", "cy-GB-AledNeural", "cy-GB-NiaNeural", "zu-ZA-ThandoNeural", "zu-ZA-ThembaNeural"],
|
9 |
+
"google_tts_voice": ["af", "am", "ar", "bg", "bn", "bs", "ca", "cs", "cy", "da", "de", "el", "en", "es", "et", "eu", "fi", "fr", "fr-CA", "gl", "gu", "ha", "hi", "hr", "hu", "id", "is", "it", "iw", "ja", "jw", "km", "kn", "ko", "la", "lt", "lv", "ml", "mr", "ms", "my", "ne", "nl", "no", "pa", "pl", "pt", "pt-PT", "ro", "ru", "si", "sk", "sq", "sr", "su", "sv", "sw", "ta", "te", "th", "tl", "tr", "uk", "ur", "vi", "yue", "zh-CN", "zh-TW", "zh"],
|
10 |
+
"separator_tab": true,
|
11 |
+
"convert_tab": true,
|
12 |
+
"tts_tab": true,
|
13 |
+
"effects_tab": true,
|
14 |
+
"create_dataset_tab": true,
|
15 |
+
"training_tab": true,
|
16 |
+
"fushion_tab": true,
|
17 |
+
"read_tab": true,
|
18 |
+
"downloads_tab": true,
|
19 |
+
"settings_tab": true,
|
20 |
+
"report_bug_tab": true,
|
21 |
+
"app_port": 7860,
|
22 |
+
"tensorboard_port": 6870,
|
23 |
+
"num_of_restart": 5,
|
24 |
+
"server_name": "0.0.0.0",
|
25 |
+
"app_show_error": true
|
26 |
+
}
|
main/configs/config.py
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import json
|
3 |
+
import torch
|
4 |
+
|
5 |
+
version_config_paths = [os.path.join(version, size) for version in ["v1", "v2"] for size in ["32000.json", "40000.json", "44100.json", "48000.json"]]
|
6 |
+
|
7 |
+
def singleton(cls):
|
8 |
+
instances = {}
|
9 |
+
def get_instance(*args, **kwargs):
|
10 |
+
if cls not in instances: instances[cls] = cls(*args, **kwargs)
|
11 |
+
return instances[cls]
|
12 |
+
return get_instance
|
13 |
+
|
14 |
+
@singleton
|
15 |
+
class Config:
|
16 |
+
def __init__(self):
|
17 |
+
self.device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
18 |
+
self.gpu_name = (torch.cuda.get_device_name(int(self.device.split(":")[-1])) if self.device.startswith("cuda") else None)
|
19 |
+
self.translations = self.multi_language()
|
20 |
+
self.json_config = self.load_config_json()
|
21 |
+
self.gpu_mem = None
|
22 |
+
self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config()
|
23 |
+
|
24 |
+
def multi_language(self):
|
25 |
+
try:
|
26 |
+
with open(os.path.join("main", "configs", "config.json"), "r") as f:
|
27 |
+
configs = json.load(f)
|
28 |
+
|
29 |
+
lang = configs.get("language", "vi-VN")
|
30 |
+
if len([l for l in os.listdir(os.path.join("assets", "languages")) if l.endswith(".json")]) < 1: raise FileNotFoundError("Không tìm thấy bất cứ gói ngôn ngữ nào(No package languages found)")
|
31 |
+
|
32 |
+
if not lang: lang = "vi-VN"
|
33 |
+
if lang not in configs["support_language"]: raise ValueError("Ngôn ngữ không được hỗ trợ(Language not supported)")
|
34 |
+
|
35 |
+
lang_path = os.path.join("assets", "languages", f"{lang}.json")
|
36 |
+
if not os.path.exists(lang_path): lang_path = os.path.join("assets", "languages", "vi-VN.json")
|
37 |
+
|
38 |
+
with open(lang_path, encoding="utf-8") as f:
|
39 |
+
translations = json.load(f)
|
40 |
+
except json.JSONDecodeError:
|
41 |
+
print(self.translations["empty_json"].format(file=lang))
|
42 |
+
pass
|
43 |
+
return translations
|
44 |
+
|
45 |
+
def load_config_json(self):
|
46 |
+
configs = {}
|
47 |
+
for config_file in version_config_paths:
|
48 |
+
try:
|
49 |
+
with open(os.path.join("main", "configs", config_file), "r") as f:
|
50 |
+
configs[config_file] = json.load(f)
|
51 |
+
except json.JSONDecodeError:
|
52 |
+
print(self.translations["empty_json"].format(file=config_file))
|
53 |
+
pass
|
54 |
+
return configs
|
55 |
+
|
56 |
+
def device_config(self):
|
57 |
+
if self.device.startswith("cuda"): self.set_cuda_config()
|
58 |
+
elif self.has_mps(): self.device = "mps"
|
59 |
+
else: self.device = "cpu"
|
60 |
+
|
61 |
+
if self.gpu_mem is not None and self.gpu_mem <= 4: return 1, 5, 30, 32
|
62 |
+
return 1, 6, 38, 41
|
63 |
+
|
64 |
+
def set_cuda_config(self):
|
65 |
+
i_device = int(self.device.split(":")[-1])
|
66 |
+
self.gpu_name = torch.cuda.get_device_name(i_device)
|
67 |
+
self.gpu_mem = torch.cuda.get_device_properties(i_device).total_memory // (1024**3)
|
68 |
+
|
69 |
+
def has_mps(self):
|
70 |
+
return torch.backends.mps.is_available()
|
main/configs/v1/32000.json
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"train": {
|
3 |
+
"log_interval": 200,
|
4 |
+
"seed": 1234,
|
5 |
+
"epochs": 20000,
|
6 |
+
"learning_rate": 0.0001,
|
7 |
+
"betas": [0.8, 0.99],
|
8 |
+
"eps": 1e-09,
|
9 |
+
"batch_size": 4,
|
10 |
+
"lr_decay": 0.999875,
|
11 |
+
"segment_size": 12800,
|
12 |
+
"init_lr_ratio": 1,
|
13 |
+
"warmup_epochs": 0,
|
14 |
+
"c_mel": 45,
|
15 |
+
"c_kl": 1.0
|
16 |
+
},
|
17 |
+
"data": {
|
18 |
+
"max_wav_value": 32768.0,
|
19 |
+
"sample_rate": 32000,
|
20 |
+
"filter_length": 1024,
|
21 |
+
"hop_length": 320,
|
22 |
+
"win_length": 1024,
|
23 |
+
"n_mel_channels": 80,
|
24 |
+
"mel_fmin": 0.0,
|
25 |
+
"mel_fmax": null
|
26 |
+
},
|
27 |
+
"model": {
|
28 |
+
"inter_channels": 192,
|
29 |
+
"hidden_channels": 192,
|
30 |
+
"filter_channels": 768,
|
31 |
+
"text_enc_hidden_dim": 256,
|
32 |
+
"n_heads": 2,
|
33 |
+
"n_layers": 6,
|
34 |
+
"kernel_size": 3,
|
35 |
+
"p_dropout": 0,
|
36 |
+
"resblock": "1",
|
37 |
+
"resblock_kernel_sizes": [3, 7, 11],
|
38 |
+
"resblock_dilation_sizes": [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
|
39 |
+
"upsample_rates": [10, 4, 2, 2, 2],
|
40 |
+
"upsample_initial_channel": 512,
|
41 |
+
"upsample_kernel_sizes": [16, 16, 4, 4, 4],
|
42 |
+
"use_spectral_norm": false,
|
43 |
+
"gin_channels": 256,
|
44 |
+
"spk_embed_dim": 109
|
45 |
+
}
|
46 |
+
}
|
main/configs/v1/40000.json
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"train": {
|
3 |
+
"log_interval": 200,
|
4 |
+
"seed": 1234,
|
5 |
+
"epochs": 20000,
|
6 |
+
"learning_rate": 0.0001,
|
7 |
+
"betas": [0.8, 0.99],
|
8 |
+
"eps": 1e-09,
|
9 |
+
"batch_size": 4,
|
10 |
+
"lr_decay": 0.999875,
|
11 |
+
"segment_size": 12800,
|
12 |
+
"init_lr_ratio": 1,
|
13 |
+
"warmup_epochs": 0,
|
14 |
+
"c_mel": 45,
|
15 |
+
"c_kl": 1.0
|
16 |
+
},
|
17 |
+
"data": {
|
18 |
+
"max_wav_value": 32768.0,
|
19 |
+
"sample_rate": 40000,
|
20 |
+
"filter_length": 2048,
|
21 |
+
"hop_length": 400,
|
22 |
+
"win_length": 2048,
|
23 |
+
"n_mel_channels": 125,
|
24 |
+
"mel_fmin": 0.0,
|
25 |
+
"mel_fmax": null
|
26 |
+
},
|
27 |
+
"model": {
|
28 |
+
"inter_channels": 192,
|
29 |
+
"hidden_channels": 192,
|
30 |
+
"filter_channels": 768,
|
31 |
+
"text_enc_hidden_dim": 256,
|
32 |
+
"n_heads": 2,
|
33 |
+
"n_layers": 6,
|
34 |
+
"kernel_size": 3,
|
35 |
+
"p_dropout": 0,
|
36 |
+
"resblock": "1",
|
37 |
+
"resblock_kernel_sizes": [3, 7, 11],
|
38 |
+
"resblock_dilation_sizes": [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
|
39 |
+
"upsample_rates": [10, 10, 2, 2],
|
40 |
+
"upsample_initial_channel": 512,
|
41 |
+
"upsample_kernel_sizes": [16, 16, 4, 4],
|
42 |
+
"use_spectral_norm": false,
|
43 |
+
"gin_channels": 256,
|
44 |
+
"spk_embed_dim": 109
|
45 |
+
}
|
46 |
+
}
|
main/configs/v1/44100.json
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"train": {
|
3 |
+
"log_interval": 200,
|
4 |
+
"seed": 1234,
|
5 |
+
"epochs": 20000,
|
6 |
+
"learning_rate": 0.0001,
|
7 |
+
"betas": [0.8, 0.99],
|
8 |
+
"eps": 1e-09,
|
9 |
+
"batch_size": 4,
|
10 |
+
"lr_decay": 0.999875,
|
11 |
+
"segment_size": 15876,
|
12 |
+
"init_lr_ratio": 1,
|
13 |
+
"warmup_epochs": 0,
|
14 |
+
"c_mel": 45,
|
15 |
+
"c_kl": 1.0
|
16 |
+
},
|
17 |
+
"data": {
|
18 |
+
"max_wav_value": 32768.0,
|
19 |
+
"sample_rate": 44100,
|
20 |
+
"filter_length": 2048,
|
21 |
+
"hop_length": 441,
|
22 |
+
"win_length": 2048,
|
23 |
+
"n_mel_channels": 160,
|
24 |
+
"mel_fmin": 0.0,
|
25 |
+
"mel_fmax": null
|
26 |
+
},
|
27 |
+
"model": {
|
28 |
+
"inter_channels": 192,
|
29 |
+
"hidden_channels": 192,
|
30 |
+
"filter_channels": 768,
|
31 |
+
"text_enc_hidden_dim": 256,
|
32 |
+
"n_heads": 2,
|
33 |
+
"n_layers": 6,
|
34 |
+
"kernel_size": 3,
|
35 |
+
"p_dropout": 0,
|
36 |
+
"resblock": "1",
|
37 |
+
"resblock_kernel_sizes": [3, 7, 11],
|
38 |
+
"resblock_dilation_sizes": [[1, 3, 5], [ 1, 3, 5], [1, 3, 5]],
|
39 |
+
"upsample_rates": [7, 7, 3, 3],
|
40 |
+
"upsample_initial_channel": 512,
|
41 |
+
"upsample_kernel_sizes": [14, 14, 6, 6],
|
42 |
+
"use_spectral_norm": false,
|
43 |
+
"gin_channels": 256,
|
44 |
+
"spk_embed_dim": 109
|
45 |
+
}
|
46 |
+
}
|
main/configs/v1/48000.json
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"train": {
|
3 |
+
"log_interval": 200,
|
4 |
+
"seed": 1234,
|
5 |
+
"epochs": 20000,
|
6 |
+
"learning_rate": 0.0001,
|
7 |
+
"betas": [0.8, 0.99],
|
8 |
+
"eps": 1e-09,
|
9 |
+
"batch_size": 4,
|
10 |
+
"lr_decay": 0.999875,
|
11 |
+
"segment_size": 11520,
|
12 |
+
"init_lr_ratio": 1,
|
13 |
+
"warmup_epochs": 0,
|
14 |
+
"c_mel": 45,
|
15 |
+
"c_kl": 1.0
|
16 |
+
},
|
17 |
+
"data": {
|
18 |
+
"max_wav_value": 32768.0,
|
19 |
+
"sample_rate": 48000,
|
20 |
+
"filter_length": 2048,
|
21 |
+
"hop_length": 480,
|
22 |
+
"win_length": 2048,
|
23 |
+
"n_mel_channels": 128,
|
24 |
+
"mel_fmin": 0.0,
|
25 |
+
"mel_fmax": null
|
26 |
+
},
|
27 |
+
"model": {
|
28 |
+
"inter_channels": 192,
|
29 |
+
"hidden_channels": 192,
|
30 |
+
"filter_channels": 768,
|
31 |
+
"text_enc_hidden_dim": 256,
|
32 |
+
"n_heads": 2,
|
33 |
+
"n_layers": 6,
|
34 |
+
"kernel_size": 3,
|
35 |
+
"p_dropout": 0,
|
36 |
+
"resblock": "1",
|
37 |
+
"resblock_kernel_sizes": [3, 7, 11],
|
38 |
+
"resblock_dilation_sizes": [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
|
39 |
+
"upsample_rates": [10, 6, 2, 2, 2],
|
40 |
+
"upsample_initial_channel": 512,
|
41 |
+
"upsample_kernel_sizes": [16, 16, 4, 4, 4],
|
42 |
+
"use_spectral_norm": false,
|
43 |
+
"gin_channels": 256,
|
44 |
+
"spk_embed_dim": 109
|
45 |
+
}
|
46 |
+
}
|
main/configs/v2/32000.json
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"train": {
|
3 |
+
"log_interval": 200,
|
4 |
+
"seed": 1234,
|
5 |
+
"learning_rate": 0.0001,
|
6 |
+
"betas": [0.8, 0.99],
|
7 |
+
"eps": 1e-09,
|
8 |
+
"lr_decay": 0.999875,
|
9 |
+
"segment_size": 12800,
|
10 |
+
"c_mel": 45,
|
11 |
+
"c_kl": 1.0
|
12 |
+
},
|
13 |
+
"data": {
|
14 |
+
"max_wav_value": 32768.0,
|
15 |
+
"sample_rate": 32000,
|
16 |
+
"filter_length": 1024,
|
17 |
+
"hop_length": 320,
|
18 |
+
"win_length": 1024,
|
19 |
+
"n_mel_channels": 80,
|
20 |
+
"mel_fmin": 0.0,
|
21 |
+
"mel_fmax": null
|
22 |
+
},
|
23 |
+
"model": {
|
24 |
+
"inter_channels": 192,
|
25 |
+
"hidden_channels": 192,
|
26 |
+
"filter_channels": 768,
|
27 |
+
"text_enc_hidden_dim": 768,
|
28 |
+
"n_heads": 2,
|
29 |
+
"n_layers": 6,
|
30 |
+
"kernel_size": 3,
|
31 |
+
"p_dropout": 0,
|
32 |
+
"resblock": "1",
|
33 |
+
"resblock_kernel_sizes": [3, 7, 11],
|
34 |
+
"resblock_dilation_sizes": [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
|
35 |
+
"upsample_rates": [10, 8, 2, 2],
|
36 |
+
"upsample_initial_channel": 512,
|
37 |
+
"upsample_kernel_sizes": [20, 16, 4, 4],
|
38 |
+
"use_spectral_norm": false,
|
39 |
+
"gin_channels": 256,
|
40 |
+
"spk_embed_dim": 109
|
41 |
+
}
|
42 |
+
}
|
main/configs/v2/40000.json
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"train": {
|
3 |
+
"log_interval": 200,
|
4 |
+
"seed": 1234,
|
5 |
+
"learning_rate": 0.0001,
|
6 |
+
"betas": [0.8, 0.99],
|
7 |
+
"eps": 1e-09,
|
8 |
+
"lr_decay": 0.999875,
|
9 |
+
"segment_size": 12800,
|
10 |
+
"c_mel": 45,
|
11 |
+
"c_kl": 1.0
|
12 |
+
},
|
13 |
+
"data": {
|
14 |
+
"max_wav_value": 32768.0,
|
15 |
+
"sample_rate": 40000,
|
16 |
+
"filter_length": 2048,
|
17 |
+
"hop_length": 400,
|
18 |
+
"win_length": 2048,
|
19 |
+
"n_mel_channels": 125,
|
20 |
+
"mel_fmin": 0.0,
|
21 |
+
"mel_fmax": null
|
22 |
+
},
|
23 |
+
"model": {
|
24 |
+
"inter_channels": 192,
|
25 |
+
"hidden_channels": 192,
|
26 |
+
"filter_channels": 768,
|
27 |
+
"text_enc_hidden_dim": 768,
|
28 |
+
"n_heads": 2,
|
29 |
+
"n_layers": 6,
|
30 |
+
"kernel_size": 3,
|
31 |
+
"p_dropout": 0,
|
32 |
+
"resblock": "1",
|
33 |
+
"resblock_kernel_sizes": [3, 7, 11],
|
34 |
+
"resblock_dilation_sizes": [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
|
35 |
+
"upsample_rates": [10, 10, 2, 2],
|
36 |
+
"upsample_initial_channel": 512,
|
37 |
+
"upsample_kernel_sizes": [16, 16, 4, 4],
|
38 |
+
"use_spectral_norm": false,
|
39 |
+
"gin_channels": 256,
|
40 |
+
"spk_embed_dim": 109
|
41 |
+
}
|
42 |
+
}
|
main/configs/v2/44100.json
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"train": {
|
3 |
+
"log_interval": 200,
|
4 |
+
"seed": 1234,
|
5 |
+
"learning_rate": 0.0001,
|
6 |
+
"betas": [0.8, 0.99],
|
7 |
+
"eps": 1e-09,
|
8 |
+
"lr_decay": 0.999875,
|
9 |
+
"segment_size": 15876,
|
10 |
+
"c_mel": 45,
|
11 |
+
"c_kl": 1.0
|
12 |
+
},
|
13 |
+
"data": {
|
14 |
+
"max_wav_value": 32768.0,
|
15 |
+
"sample_rate": 44100,
|
16 |
+
"filter_length": 2048,
|
17 |
+
"hop_length": 441,
|
18 |
+
"win_length": 2048,
|
19 |
+
"n_mel_channels": 160,
|
20 |
+
"mel_fmin": 0.0,
|
21 |
+
"mel_fmax": null
|
22 |
+
},
|
23 |
+
"model": {
|
24 |
+
"inter_channels": 192,
|
25 |
+
"hidden_channels": 192,
|
26 |
+
"filter_channels": 768,
|
27 |
+
"text_enc_hidden_dim": 768,
|
28 |
+
"n_heads": 2,
|
29 |
+
"n_layers": 6,
|
30 |
+
"kernel_size": 3,
|
31 |
+
"p_dropout": 0,
|
32 |
+
"resblock": "1",
|
33 |
+
"resblock_kernel_sizes": [3, 7, 11],
|
34 |
+
"resblock_dilation_sizes": [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
|
35 |
+
"upsample_rates": [7, 7, 3, 3],
|
36 |
+
"upsample_initial_channel": 512,
|
37 |
+
"upsample_kernel_sizes": [14, 14, 6, 6],
|
38 |
+
"use_spectral_norm": false,
|
39 |
+
"gin_channels": 256,
|
40 |
+
"spk_embed_dim": 109
|
41 |
+
}
|
42 |
+
}
|
main/configs/v2/48000.json
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"train": {
|
3 |
+
"log_interval": 200,
|
4 |
+
"seed": 1234,
|
5 |
+
"learning_rate": 0.0001,
|
6 |
+
"betas": [0.8, 0.99],
|
7 |
+
"eps": 1e-09,
|
8 |
+
"lr_decay": 0.999875,
|
9 |
+
"segment_size": 17280,
|
10 |
+
"c_mel": 45,
|
11 |
+
"c_kl": 1.0
|
12 |
+
},
|
13 |
+
"data": {
|
14 |
+
"max_wav_value": 32768.0,
|
15 |
+
"sample_rate": 48000,
|
16 |
+
"filter_length": 2048,
|
17 |
+
"hop_length": 480,
|
18 |
+
"win_length": 2048,
|
19 |
+
"n_mel_channels": 128,
|
20 |
+
"mel_fmin": 0.0,
|
21 |
+
"mel_fmax": null
|
22 |
+
},
|
23 |
+
"model": {
|
24 |
+
"inter_channels": 192,
|
25 |
+
"hidden_channels": 192,
|
26 |
+
"filter_channels": 768,
|
27 |
+
"text_enc_hidden_dim": 768,
|
28 |
+
"n_heads": 2,
|
29 |
+
"n_layers": 6,
|
30 |
+
"kernel_size": 3,
|
31 |
+
"p_dropout": 0,
|
32 |
+
"resblock": "1",
|
33 |
+
"resblock_kernel_sizes": [3, 7, 11],
|
34 |
+
"resblock_dilation_sizes": [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
|
35 |
+
"upsample_rates": [12, 10, 2, 2],
|
36 |
+
"upsample_initial_channel": 512,
|
37 |
+
"upsample_kernel_sizes": [24, 20, 4, 4],
|
38 |
+
"use_spectral_norm": false,
|
39 |
+
"gin_channels": 256,
|
40 |
+
"spk_embed_dim": 109
|
41 |
+
}
|
42 |
+
}
|
main/inference/audio_effects.py
ADDED
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
import librosa
|
4 |
+
import argparse
|
5 |
+
|
6 |
+
import numpy as np
|
7 |
+
import soundfile as sf
|
8 |
+
|
9 |
+
from distutils.util import strtobool
|
10 |
+
from scipy.signal import butter, filtfilt
|
11 |
+
from pedalboard import Pedalboard, Chorus, Distortion, Reverb, PitchShift, Delay, Limiter, Gain, Bitcrush, Clipping, Compressor, Phaser, HighpassFilter
|
12 |
+
|
13 |
+
sys.path.append(os.getcwd())
|
14 |
+
|
15 |
+
from main.configs.config import Config
|
16 |
+
from main.library.utils import pydub_convert
|
17 |
+
|
18 |
+
translations = Config().translations
|
19 |
+
|
20 |
+
def parse_arguments():
|
21 |
+
parser = argparse.ArgumentParser()
|
22 |
+
parser.add_argument("--input_path", type=str, required=True)
|
23 |
+
parser.add_argument("--output_path", type=str, default="./audios/apply_effects.wav")
|
24 |
+
parser.add_argument("--export_format", type=str, default="wav")
|
25 |
+
parser.add_argument("--resample", type=lambda x: bool(strtobool(x)), default=False)
|
26 |
+
parser.add_argument("--resample_sr", type=int, default=0)
|
27 |
+
parser.add_argument("--chorus", type=lambda x: bool(strtobool(x)), default=False)
|
28 |
+
parser.add_argument("--chorus_depth", type=float, default=0.5)
|
29 |
+
parser.add_argument("--chorus_rate", type=float, default=1.5)
|
30 |
+
parser.add_argument("--chorus_mix", type=float, default=0.5)
|
31 |
+
parser.add_argument("--chorus_delay", type=int, default=10)
|
32 |
+
parser.add_argument("--chorus_feedback", type=float, default=0)
|
33 |
+
parser.add_argument("--distortion", type=lambda x: bool(strtobool(x)), default=False)
|
34 |
+
parser.add_argument("--drive_db", type=int, default=20)
|
35 |
+
parser.add_argument("--reverb", type=lambda x: bool(strtobool(x)), default=False)
|
36 |
+
parser.add_argument("--reverb_room_size", type=float, default=0.5)
|
37 |
+
parser.add_argument("--reverb_damping", type=float, default=0.5)
|
38 |
+
parser.add_argument("--reverb_wet_level", type=float, default=0.33)
|
39 |
+
parser.add_argument("--reverb_dry_level", type=float, default=0.67)
|
40 |
+
parser.add_argument("--reverb_width", type=float, default=1)
|
41 |
+
parser.add_argument("--reverb_freeze_mode", type=lambda x: bool(strtobool(x)), default=False)
|
42 |
+
parser.add_argument("--pitchshift", type=lambda x: bool(strtobool(x)), default=False)
|
43 |
+
parser.add_argument("--pitch_shift", type=int, default=0)
|
44 |
+
parser.add_argument("--delay", type=lambda x: bool(strtobool(x)), default=False)
|
45 |
+
parser.add_argument("--delay_seconds", type=float, default=0.5)
|
46 |
+
parser.add_argument("--delay_feedback", type=float, default=0.5)
|
47 |
+
parser.add_argument("--delay_mix", type=float, default=0.5)
|
48 |
+
parser.add_argument("--compressor", type=lambda x: bool(strtobool(x)), default=False)
|
49 |
+
parser.add_argument("--compressor_threshold", type=int, default=-20)
|
50 |
+
parser.add_argument("--compressor_ratio", type=float, default=4)
|
51 |
+
parser.add_argument("--compressor_attack_ms", type=float, default=10)
|
52 |
+
parser.add_argument("--compressor_release_ms", type=int, default=200)
|
53 |
+
parser.add_argument("--limiter", type=lambda x: bool(strtobool(x)), default=False)
|
54 |
+
parser.add_argument("--limiter_threshold", type=int, default=0)
|
55 |
+
parser.add_argument("--limiter_release", type=int, default=100)
|
56 |
+
parser.add_argument("--gain", type=lambda x: bool(strtobool(x)), default=False)
|
57 |
+
parser.add_argument("--gain_db", type=int, default=0)
|
58 |
+
parser.add_argument("--bitcrush", type=lambda x: bool(strtobool(x)), default=False)
|
59 |
+
parser.add_argument("--bitcrush_bit_depth", type=int, default=16)
|
60 |
+
parser.add_argument("--clipping", type=lambda x: bool(strtobool(x)), default=False)
|
61 |
+
parser.add_argument("--clipping_threshold", type=int, default=-10)
|
62 |
+
parser.add_argument("--phaser", type=lambda x: bool(strtobool(x)), default=False)
|
63 |
+
parser.add_argument("--phaser_rate_hz", type=float, default=0.5)
|
64 |
+
parser.add_argument("--phaser_depth", type=float, default=0.5)
|
65 |
+
parser.add_argument("--phaser_centre_frequency_hz", type=int, default=1000)
|
66 |
+
parser.add_argument("--phaser_feedback", type=float, default=0)
|
67 |
+
parser.add_argument("--phaser_mix", type=float, default=0.5)
|
68 |
+
parser.add_argument("--treble_bass_boost", type=lambda x: bool(strtobool(x)), default=False)
|
69 |
+
parser.add_argument("--bass_boost_db", type=int, default=0)
|
70 |
+
parser.add_argument("--bass_boost_frequency", type=int, default=100)
|
71 |
+
parser.add_argument("--treble_boost_db", type=int, default=0)
|
72 |
+
parser.add_argument("--treble_boost_frequency", type=int, default=3000)
|
73 |
+
parser.add_argument("--fade_in_out", type=lambda x: bool(strtobool(x)), default=False)
|
74 |
+
parser.add_argument("--fade_in_duration", type=float, default=2000)
|
75 |
+
parser.add_argument("--fade_out_duration", type=float, default=2000)
|
76 |
+
parser.add_argument("--audio_combination", type=lambda x: bool(strtobool(x)), default=False)
|
77 |
+
parser.add_argument("--audio_combination_input", type=str)
|
78 |
+
|
79 |
+
return parser.parse_args()
|
80 |
+
|
81 |
+
def process_audio(input_path, output_path, resample, resample_sr, chorus_depth, chorus_rate, chorus_mix, chorus_delay, chorus_feedback, distortion_drive, reverb_room_size, reverb_damping, reverb_wet_level, reverb_dry_level, reverb_width, reverb_freeze_mode, pitch_shift, delay_seconds, delay_feedback, delay_mix, compressor_threshold, compressor_ratio, compressor_attack_ms, compressor_release_ms, limiter_threshold, limiter_release, gain_db, bitcrush_bit_depth, clipping_threshold, phaser_rate_hz, phaser_depth, phaser_centre_frequency_hz, phaser_feedback, phaser_mix, bass_boost_db, bass_boost_frequency, treble_boost_db, treble_boost_frequency, fade_in_duration, fade_out_duration, export_format, chorus, distortion, reverb, pitchshift, delay, compressor, limiter, gain, bitcrush, clipping, phaser, treble_bass_boost, fade_in_out, audio_combination, audio_combination_input):
|
82 |
+
def bass_boost(audio, gain_db, frequency, sample_rate):
|
83 |
+
if gain_db >= 1:
|
84 |
+
b, a = butter(4, frequency / (0.5 * sample_rate), btype='low')
|
85 |
+
return filtfilt(b, a, audio) * 10 ** (gain_db / 20)
|
86 |
+
else: return audio
|
87 |
+
|
88 |
+
def treble_boost(audio, gain_db, frequency, sample_rate):
|
89 |
+
if gain_db >=1:
|
90 |
+
b, a = butter(4, frequency / (0.5 * sample_rate), btype='high')
|
91 |
+
return filtfilt(b, a, audio) * 10 ** (gain_db / 20)
|
92 |
+
else: return audio
|
93 |
+
|
94 |
+
def fade_out_effect(audio, sr, duration=3.0):
|
95 |
+
length = int(duration * sr)
|
96 |
+
end = audio.shape[0]
|
97 |
+
|
98 |
+
if length > end: length = end
|
99 |
+
start = end - length
|
100 |
+
|
101 |
+
audio[start:end] = audio[start:end] * np.linspace(1.0, 0.0, length)
|
102 |
+
return audio
|
103 |
+
|
104 |
+
def fade_in_effect(audio, sr, duration=3.0):
|
105 |
+
length = int(duration * sr)
|
106 |
+
start = 0
|
107 |
+
|
108 |
+
if length > audio.shape[0]: length = audio.shape[0]
|
109 |
+
end = length
|
110 |
+
|
111 |
+
audio[start:end] = audio[start:end] * np.linspace(0.0, 1.0, length)
|
112 |
+
return audio
|
113 |
+
|
114 |
+
if not input_path or not os.path.exists(input_path):
|
115 |
+
print(translations["input_not_valid"])
|
116 |
+
sys.exit(1)
|
117 |
+
|
118 |
+
if not output_path:
|
119 |
+
print(translations["output_not_valid"])
|
120 |
+
sys.exit(1)
|
121 |
+
|
122 |
+
if os.path.exists(output_path): os.remove(output_path)
|
123 |
+
|
124 |
+
try:
|
125 |
+
audio, sample_rate = sf.read(input_path)
|
126 |
+
except Exception as e:
|
127 |
+
raise RuntimeError(translations["errors_loading_audio"].format(e=e))
|
128 |
+
|
129 |
+
try:
|
130 |
+
board = Pedalboard([HighpassFilter()])
|
131 |
+
|
132 |
+
if chorus: board.append(Chorus(depth=chorus_depth, rate_hz=chorus_rate, mix=chorus_mix, centre_delay_ms=chorus_delay, feedback=chorus_feedback))
|
133 |
+
if distortion: board.append(Distortion(drive_db=distortion_drive))
|
134 |
+
if reverb: board.append(Reverb(room_size=reverb_room_size, damping=reverb_damping, wet_level=reverb_wet_level, dry_level=reverb_dry_level, width=reverb_width, freeze_mode=1 if reverb_freeze_mode else 0))
|
135 |
+
if pitchshift: board.append(PitchShift(semitones=pitch_shift))
|
136 |
+
if delay: board.append(Delay(delay_seconds=delay_seconds, feedback=delay_feedback, mix=delay_mix))
|
137 |
+
if compressor: board.append(Compressor(threshold_db=compressor_threshold, ratio=compressor_ratio, attack_ms=compressor_attack_ms, release_ms=compressor_release_ms))
|
138 |
+
if limiter: board.append(Limiter(threshold_db=limiter_threshold, release_ms=limiter_release))
|
139 |
+
if gain: board.append(Gain(gain_db=gain_db))
|
140 |
+
if bitcrush: board.append(Bitcrush(bit_depth=bitcrush_bit_depth))
|
141 |
+
if clipping: board.append(Clipping(threshold_db=clipping_threshold))
|
142 |
+
if phaser: board.append(Phaser(rate_hz=phaser_rate_hz, depth=phaser_depth, centre_frequency_hz=phaser_centre_frequency_hz, feedback=phaser_feedback, mix=phaser_mix))
|
143 |
+
|
144 |
+
processed_audio = board(audio, sample_rate)
|
145 |
+
|
146 |
+
if treble_bass_boost:
|
147 |
+
processed_audio = bass_boost(processed_audio, bass_boost_db, bass_boost_frequency, sample_rate)
|
148 |
+
processed_audio = treble_boost(processed_audio, treble_boost_db, treble_boost_frequency, sample_rate)
|
149 |
+
|
150 |
+
if fade_in_out:
|
151 |
+
processed_audio = fade_in_effect(processed_audio, sample_rate, fade_in_duration)
|
152 |
+
processed_audio = fade_out_effect(processed_audio, sample_rate, fade_out_duration)
|
153 |
+
|
154 |
+
if resample_sr != sample_rate and resample_sr > 0 and resample:
|
155 |
+
target_sr = min([8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000, 96000], key=lambda x: abs(x - resample_sr))
|
156 |
+
processed_audio = librosa.resample(processed_audio, orig_sr=sample_rate, target_sr=target_sr, res_type="soxr_vhq")
|
157 |
+
sample_rate = target_sr
|
158 |
+
|
159 |
+
sf.write(output_path.replace("wav", export_format), processed_audio, sample_rate, format=export_format)
|
160 |
+
|
161 |
+
if audio_combination:
|
162 |
+
from pydub import AudioSegment
|
163 |
+
pydub_convert(AudioSegment.from_file(audio_combination_input)).overlay(pydub_convert(AudioSegment.from_file(output_path.replace("wav", export_format)))).export(output_path.replace("wav", export_format), format=export_format)
|
164 |
+
except Exception as e:
|
165 |
+
raise RuntimeError(translations["apply_error"].format(e=e))
|
166 |
+
return output_path
|
167 |
+
|
168 |
+
if __name__ == "__main__":
|
169 |
+
args = parse_arguments()
|
170 |
+
process_audio(input_path=args.input_path, output_path=args.output_path, resample=args.resample, resample_sr=args.resample_sr, chorus_depth=args.chorus_depth, chorus_rate=args.chorus_rate, chorus_mix=args.chorus_mix, chorus_delay=args.chorus_delay, chorus_feedback=args.chorus_feedback, distortion_drive=args.drive_db, reverb_room_size=args.reverb_room_size, reverb_damping=args.reverb_damping, reverb_wet_level=args.reverb_wet_level, reverb_dry_level=args.reverb_dry_level, reverb_width=args.reverb_width, reverb_freeze_mode=args.reverb_freeze_mode, pitch_shift=args.pitch_shift, delay_seconds=args.delay_seconds, delay_feedback=args.delay_feedback, delay_mix=args.delay_mix, compressor_threshold=args.compressor_threshold, compressor_ratio=args.compressor_ratio, compressor_attack_ms=args.compressor_attack_ms, compressor_release_ms=args.compressor_release_ms, limiter_threshold=args.limiter_threshold, limiter_release=args.limiter_release, gain_db=args.gain_db, bitcrush_bit_depth=args.bitcrush_bit_depth, clipping_threshold=args.clipping_threshold, phaser_rate_hz=args.phaser_rate_hz, phaser_depth=args.phaser_depth, phaser_centre_frequency_hz=args.phaser_centre_frequency_hz, phaser_feedback=args.phaser_feedback, phaser_mix=args.phaser_mix, bass_boost_db=args.bass_boost_db, bass_boost_frequency=args.bass_boost_frequency, treble_boost_db=args.treble_boost_db, treble_boost_frequency=args.treble_boost_frequency, fade_in_duration=args.fade_in_duration, fade_out_duration=args.fade_out_duration, export_format=args.export_format, chorus=args.chorus, distortion=args.distortion, reverb=args.reverb, pitchshift=args.pitchshift, delay=args.delay, compressor=args.compressor, limiter=args.limiter, gain=args.gain, bitcrush=args.bitcrush, clipping=args.clipping, phaser=args.phaser, treble_bass_boost=args.treble_bass_boost, fade_in_out=args.fade_in_out, audio_combination=args.audio_combination, audio_combination_input=args.audio_combination_input)
|
main/inference/convert.py
ADDED
@@ -0,0 +1,650 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
import os
|
3 |
+
import sys
|
4 |
+
import time
|
5 |
+
import faiss
|
6 |
+
import torch
|
7 |
+
import shutil
|
8 |
+
import librosa
|
9 |
+
import logging
|
10 |
+
import argparse
|
11 |
+
import warnings
|
12 |
+
import parselmouth
|
13 |
+
import onnxruntime
|
14 |
+
import logging.handlers
|
15 |
+
|
16 |
+
import numpy as np
|
17 |
+
import soundfile as sf
|
18 |
+
import torch.nn.functional as F
|
19 |
+
|
20 |
+
from tqdm import tqdm
|
21 |
+
from scipy import signal
|
22 |
+
from distutils.util import strtobool
|
23 |
+
from fairseq import checkpoint_utils
|
24 |
+
|
25 |
+
warnings.filterwarnings("ignore")
|
26 |
+
sys.path.append(os.getcwd())
|
27 |
+
|
28 |
+
from main.configs.config import Config
|
29 |
+
from main.library.predictors.FCPE import FCPE
|
30 |
+
from main.library.predictors.RMVPE import RMVPE
|
31 |
+
from main.library.predictors.WORLD import PYWORLD
|
32 |
+
from main.library.algorithm.synthesizers import Synthesizer
|
33 |
+
from main.library.predictors.CREPE import predict, mean, median
|
34 |
+
from main.library.utils import check_predictors, check_embedders, load_audio, process_audio, merge_audio
|
35 |
+
|
36 |
+
bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000)
|
37 |
+
config = Config()
|
38 |
+
translations = config.translations
|
39 |
+
logger = logging.getLogger(__name__)
|
40 |
+
logger.propagate = False
|
41 |
+
|
42 |
+
for l in ["torch", "faiss", "httpx", "fairseq", "httpcore", "faiss.loader", "numba.core", "urllib3"]:
|
43 |
+
logging.getLogger(l).setLevel(logging.ERROR)
|
44 |
+
|
45 |
+
if logger.hasHandlers(): logger.handlers.clear()
|
46 |
+
else:
|
47 |
+
console_handler = logging.StreamHandler()
|
48 |
+
console_formatter = logging.Formatter(fmt="\n%(asctime)s.%(msecs)03d | %(levelname)s | %(module)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
|
49 |
+
console_handler.setFormatter(console_formatter)
|
50 |
+
console_handler.setLevel(logging.INFO)
|
51 |
+
file_handler = logging.handlers.RotatingFileHandler(os.path.join("assets", "logs", "convert.log"), maxBytes=5*1024*1024, backupCount=3, encoding='utf-8')
|
52 |
+
file_formatter = logging.Formatter(fmt="\n%(asctime)s.%(msecs)03d | %(levelname)s | %(module)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
|
53 |
+
file_handler.setFormatter(file_formatter)
|
54 |
+
file_handler.setLevel(logging.DEBUG)
|
55 |
+
logger.addHandler(console_handler)
|
56 |
+
logger.addHandler(file_handler)
|
57 |
+
logger.setLevel(logging.DEBUG)
|
58 |
+
|
59 |
+
def parse_arguments():
|
60 |
+
parser = argparse.ArgumentParser()
|
61 |
+
parser.add_argument("--pitch", type=int, default=0)
|
62 |
+
parser.add_argument("--filter_radius", type=int, default=3)
|
63 |
+
parser.add_argument("--index_rate", type=float, default=0.5)
|
64 |
+
parser.add_argument("--volume_envelope", type=float, default=1)
|
65 |
+
parser.add_argument("--protect", type=float, default=0.33)
|
66 |
+
parser.add_argument("--hop_length", type=int, default=64)
|
67 |
+
parser.add_argument("--f0_method", type=str, default="rmvpe")
|
68 |
+
parser.add_argument("--embedder_model", type=str, default="contentvec_base")
|
69 |
+
parser.add_argument("--input_path", type=str, required=True)
|
70 |
+
parser.add_argument("--output_path", type=str, default="./audios/output.wav")
|
71 |
+
parser.add_argument("--export_format", type=str, default="wav")
|
72 |
+
parser.add_argument("--pth_path", type=str, required=True)
|
73 |
+
parser.add_argument("--index_path", type=str)
|
74 |
+
parser.add_argument("--f0_autotune", type=lambda x: bool(strtobool(x)), default=False)
|
75 |
+
parser.add_argument("--f0_autotune_strength", type=float, default=1)
|
76 |
+
parser.add_argument("--clean_audio", type=lambda x: bool(strtobool(x)), default=False)
|
77 |
+
parser.add_argument("--clean_strength", type=float, default=0.7)
|
78 |
+
parser.add_argument("--resample_sr", type=int, default=0)
|
79 |
+
parser.add_argument("--split_audio", type=lambda x: bool(strtobool(x)), default=False)
|
80 |
+
parser.add_argument("--checkpointing", type=lambda x: bool(strtobool(x)), default=False)
|
81 |
+
|
82 |
+
return parser.parse_args()
|
83 |
+
|
84 |
+
def main():
|
85 |
+
args = parse_arguments()
|
86 |
+
pitch, filter_radius, index_rate, volume_envelope, protect, hop_length, f0_method, input_path, output_path, pth_path, index_path, f0_autotune, f0_autotune_strength, clean_audio, clean_strength, export_format, embedder_model, resample_sr, split_audio, checkpointing = args.pitch, args.filter_radius, args.index_rate, args.volume_envelope,args.protect, args.hop_length, args.f0_method, args.input_path, args.output_path, args.pth_path, args.index_path, args.f0_autotune, args.f0_autotune_strength, args.clean_audio, args.clean_strength, args.export_format, args.embedder_model, args.resample_sr, args.split_audio, args.checkpointing
|
87 |
+
|
88 |
+
log_data = {translations['pitch']: pitch, translations['filter_radius']: filter_radius, translations['index_strength']: index_rate, translations['volume_envelope']: volume_envelope, translations['protect']: protect, "Hop length": hop_length, translations['f0_method']: f0_method, translations['audio_path']: input_path, translations['output_path']: output_path.replace('wav', export_format), translations['model_path']: pth_path, translations['indexpath']: index_path, translations['autotune']: f0_autotune, translations['clear_audio']: clean_audio, translations['export_format']: export_format, translations['hubert_model']: embedder_model, translations['split_audio']: split_audio, translations['memory_efficient_training']: checkpointing}
|
89 |
+
|
90 |
+
if clean_audio: log_data[translations['clean_strength']] = clean_strength
|
91 |
+
if resample_sr != 0: log_data[translations['sample_rate']] = resample_sr
|
92 |
+
if f0_autotune: log_data[translations['autotune_rate_info']] = f0_autotune_strength
|
93 |
+
|
94 |
+
for key, value in log_data.items():
|
95 |
+
logger.debug(f"{key}: {value}")
|
96 |
+
|
97 |
+
check_predictors(f0_method)
|
98 |
+
check_embedders(embedder_model)
|
99 |
+
|
100 |
+
run_convert_script(pitch=pitch, filter_radius=filter_radius, index_rate=index_rate, volume_envelope=volume_envelope, protect=protect, hop_length=hop_length, f0_method=f0_method, input_path=input_path, output_path=output_path, pth_path=pth_path, index_path=index_path, f0_autotune=f0_autotune, f0_autotune_strength=f0_autotune_strength, clean_audio=clean_audio, clean_strength=clean_strength, export_format=export_format, embedder_model=embedder_model, resample_sr=resample_sr, split_audio=split_audio, checkpointing=checkpointing)
|
101 |
+
|
102 |
+
def run_batch_convert(params):
|
103 |
+
path, audio_temp, export_format, cut_files, pitch, filter_radius, index_rate, volume_envelope, protect, hop_length, f0_method, pth_path, index_path, f0_autotune, f0_autotune_strength, clean_audio, clean_strength, embedder_model, resample_sr, checkpointing = params["path"], params["audio_temp"], params["export_format"], params["cut_files"], params["pitch"], params["filter_radius"], params["index_rate"], params["volume_envelope"], params["protect"], params["hop_length"], params["f0_method"], params["pth_path"], params["index_path"], params["f0_autotune"], params["f0_autotune_strength"], params["clean_audio"], params["clean_strength"], params["embedder_model"], params["resample_sr"], params["checkpointing"]
|
104 |
+
|
105 |
+
segment_output_path = os.path.join(audio_temp, f"output_{cut_files.index(path)}.{export_format}")
|
106 |
+
if os.path.exists(segment_output_path): os.remove(segment_output_path)
|
107 |
+
|
108 |
+
VoiceConverter().convert_audio(pitch=pitch, filter_radius=filter_radius, index_rate=index_rate, volume_envelope=volume_envelope, protect=protect, hop_length=hop_length, f0_method=f0_method, audio_input_path=path, audio_output_path=segment_output_path, model_path=pth_path, index_path=index_path, f0_autotune=f0_autotune, f0_autotune_strength=f0_autotune_strength, clean_audio=clean_audio, clean_strength=clean_strength, export_format=export_format, embedder_model=embedder_model, resample_sr=resample_sr, checkpointing=checkpointing)
|
109 |
+
os.remove(path)
|
110 |
+
|
111 |
+
if os.path.exists(segment_output_path): return segment_output_path
|
112 |
+
else:
|
113 |
+
logger.warning(f"{translations['not_found_convert_file']}: {segment_output_path}")
|
114 |
+
sys.exit(1)
|
115 |
+
|
116 |
+
def run_convert_script(pitch, filter_radius, index_rate, volume_envelope, protect, hop_length, f0_method, input_path, output_path, pth_path, index_path, f0_autotune, f0_autotune_strength, clean_audio, clean_strength, export_format, embedder_model, resample_sr, split_audio, checkpointing):
|
117 |
+
cvt = VoiceConverter()
|
118 |
+
start_time = time.time()
|
119 |
+
|
120 |
+
pid_path = os.path.join("assets", "convert_pid.txt")
|
121 |
+
with open(pid_path, "w") as pid_file:
|
122 |
+
pid_file.write(str(os.getpid()))
|
123 |
+
|
124 |
+
if not pth_path or not os.path.exists(pth_path) or os.path.isdir(pth_path) or not pth_path.endswith(".pth"):
|
125 |
+
logger.warning(translations["provide_file"].format(filename=translations["model"]))
|
126 |
+
sys.exit(1)
|
127 |
+
|
128 |
+
output_dir = os.path.dirname(output_path) or output_path
|
129 |
+
if not os.path.exists(output_dir): os.makedirs(output_dir, exist_ok=True)
|
130 |
+
|
131 |
+
processed_segments = []
|
132 |
+
audio_temp = os.path.join("audios_temp")
|
133 |
+
if not os.path.exists(audio_temp) and split_audio: os.makedirs(audio_temp, exist_ok=True)
|
134 |
+
|
135 |
+
if os.path.isdir(input_path):
|
136 |
+
try:
|
137 |
+
logger.info(translations["convert_batch"])
|
138 |
+
audio_files = [f for f in os.listdir(input_path) if f.lower().endswith(("wav", "mp3", "flac", "ogg", "opus", "m4a", "mp4", "aac", "alac", "wma", "aiff", "webm", "ac3"))]
|
139 |
+
|
140 |
+
if not audio_files:
|
141 |
+
logger.warning(translations["not_found_audio"])
|
142 |
+
sys.exit(1)
|
143 |
+
|
144 |
+
logger.info(translations["found_audio"].format(audio_files=len(audio_files)))
|
145 |
+
|
146 |
+
for audio in audio_files:
|
147 |
+
audio_path = os.path.join(input_path, audio)
|
148 |
+
output_audio = os.path.join(input_path, os.path.splitext(audio)[0] + f"_output.{export_format}")
|
149 |
+
|
150 |
+
if split_audio:
|
151 |
+
try:
|
152 |
+
cut_files, time_stamps = process_audio(logger, audio_path, audio_temp)
|
153 |
+
params_list = [{"path": path, "audio_temp": audio_temp, "export_format": export_format, "cut_files": cut_files, "pitch": pitch, "filter_radius": filter_radius, "index_rate": index_rate, "volume_envelope": volume_envelope, "protect": protect, "hop_length": hop_length, "f0_method": f0_method, "pth_path": pth_path, "index_path": index_path, "f0_autotune": f0_autotune, "f0_autotune_strength": f0_autotune_strength, "clean_audio": clean_audio, "clean_strength": clean_strength, "embedder_model": embedder_model, "resample_sr": resample_sr, "checkpointing": checkpointing} for path in cut_files]
|
154 |
+
|
155 |
+
with tqdm(total=len(params_list), desc=translations["convert_audio"], ncols=100, unit="a") as pbar:
|
156 |
+
for params in params_list:
|
157 |
+
results = run_batch_convert(params)
|
158 |
+
processed_segments.append(results)
|
159 |
+
pbar.update(1)
|
160 |
+
logger.debug(pbar.format_meter(pbar.n, pbar.total, pbar.format_dict["elapsed"]))
|
161 |
+
|
162 |
+
merge_audio(processed_segments, time_stamps, audio_path, output_audio, export_format)
|
163 |
+
except Exception as e:
|
164 |
+
logger.error(translations["error_convert_batch"].format(e=e))
|
165 |
+
finally:
|
166 |
+
if os.path.exists(audio_temp): shutil.rmtree(audio_temp, ignore_errors=True)
|
167 |
+
else:
|
168 |
+
try:
|
169 |
+
logger.info(f"{translations['convert_audio']} '{audio_path}'...")
|
170 |
+
if os.path.exists(output_audio): os.remove(output_audio)
|
171 |
+
|
172 |
+
with tqdm(total=1, desc=translations["convert_audio"], ncols=100, unit="a") as pbar:
|
173 |
+
cvt.convert_audio(pitch=pitch, filter_radius=filter_radius, index_rate=index_rate, volume_envelope=volume_envelope, protect=protect, hop_length=hop_length, f0_method=f0_method, audio_input_path=audio_path, audio_output_path=output_audio, model_path=pth_path, index_path=index_path, f0_autotune=f0_autotune, f0_autotune_strength=f0_autotune_strength, clean_audio=clean_audio, clean_strength=clean_strength, export_format=export_format, embedder_model=embedder_model, resample_sr=resample_sr, checkpointing=checkpointing)
|
174 |
+
pbar.update(1)
|
175 |
+
logger.debug(pbar.format_meter(pbar.n, pbar.total, pbar.format_dict["elapsed"]))
|
176 |
+
except Exception as e:
|
177 |
+
logger.error(translations["error_convert"].format(e=e))
|
178 |
+
|
179 |
+
elapsed_time = time.time() - start_time
|
180 |
+
logger.info(translations["convert_batch_success"].format(elapsed_time=f"{elapsed_time:.2f}", output_path=output_path.replace('wav', export_format)))
|
181 |
+
except Exception as e:
|
182 |
+
logger.error(translations["error_convert_batch_2"].format(e=e))
|
183 |
+
else:
|
184 |
+
logger.info(f"{translations['convert_audio']} '{input_path}'...")
|
185 |
+
|
186 |
+
if not os.path.exists(input_path):
|
187 |
+
logger.warning(translations["not_found_audio"])
|
188 |
+
sys.exit(1)
|
189 |
+
|
190 |
+
if os.path.isdir(output_path): output_path = os.path.join(output_path, f"output.{export_format}")
|
191 |
+
if os.path.exists(output_path): os.remove(output_path)
|
192 |
+
|
193 |
+
if split_audio:
|
194 |
+
try:
|
195 |
+
cut_files, time_stamps = process_audio(logger, input_path, audio_temp)
|
196 |
+
params_list = [{"path": path, "audio_temp": audio_temp, "export_format": export_format, "cut_files": cut_files, "pitch": pitch, "filter_radius": filter_radius, "index_rate": index_rate, "volume_envelope": volume_envelope, "protect": protect, "hop_length": hop_length, "f0_method": f0_method, "pth_path": pth_path, "index_path": index_path, "f0_autotune": f0_autotune, "f0_autotune_strength": f0_autotune_strength, "clean_audio": clean_audio, "clean_strength": clean_strength, "embedder_model": embedder_model, "resample_sr": resample_sr, "checkpointing": checkpointing} for path in cut_files]
|
197 |
+
|
198 |
+
with tqdm(total=len(params_list), desc=translations["convert_audio"], ncols=100, unit="a") as pbar:
|
199 |
+
for params in params_list:
|
200 |
+
results = run_batch_convert(params)
|
201 |
+
processed_segments.append(results)
|
202 |
+
pbar.update(1)
|
203 |
+
logger.debug(pbar.format_meter(pbar.n, pbar.total, pbar.format_dict["elapsed"]))
|
204 |
+
|
205 |
+
merge_audio(processed_segments, time_stamps, input_path, output_path.replace("wav", export_format), export_format)
|
206 |
+
except Exception as e:
|
207 |
+
logger.error(translations["error_convert_batch"].format(e=e))
|
208 |
+
finally:
|
209 |
+
if os.path.exists(audio_temp): shutil.rmtree(audio_temp, ignore_errors=True)
|
210 |
+
else:
|
211 |
+
try:
|
212 |
+
with tqdm(total=1, desc=translations["convert_audio"], ncols=100, unit="a") as pbar:
|
213 |
+
cvt.convert_audio(pitch=pitch, filter_radius=filter_radius, index_rate=index_rate, volume_envelope=volume_envelope, protect=protect, hop_length=hop_length, f0_method=f0_method, audio_input_path=input_path, audio_output_path=output_path, model_path=pth_path, index_path=index_path, f0_autotune=f0_autotune, f0_autotune_strength=f0_autotune_strength, clean_audio=clean_audio, clean_strength=clean_strength, export_format=export_format, embedder_model=embedder_model, resample_sr=resample_sr, checkpointing=checkpointing)
|
214 |
+
pbar.update(1)
|
215 |
+
|
216 |
+
logger.debug(pbar.format_meter(pbar.n, pbar.total, pbar.format_dict["elapsed"]))
|
217 |
+
except Exception as e:
|
218 |
+
logger.error(translations["error_convert"].format(e=e))
|
219 |
+
|
220 |
+
if os.path.exists(pid_path): os.remove(pid_path)
|
221 |
+
elapsed_time = time.time() - start_time
|
222 |
+
logger.info(translations["convert_audio_success"].format(input_path=input_path, elapsed_time=f"{elapsed_time:.2f}", output_path=output_path.replace('wav', export_format)))
|
223 |
+
|
224 |
+
def change_rms(source_audio, source_rate, target_audio, target_rate, rate):
|
225 |
+
rms2 = F.interpolate(torch.from_numpy(librosa.feature.rms(y=target_audio, frame_length=target_rate // 2 * 2, hop_length=target_rate // 2)).float().unsqueeze(0), size=target_audio.shape[0], mode="linear").squeeze()
|
226 |
+
return (target_audio * (torch.pow(F.interpolate(torch.from_numpy(librosa.feature.rms(y=source_audio, frame_length=source_rate // 2 * 2, hop_length=source_rate // 2)).float().unsqueeze(0), size=target_audio.shape[0], mode="linear").squeeze(), 1 - rate) * torch.pow(torch.maximum(rms2, torch.zeros_like(rms2) + 1e-6), rate - 1)).numpy())
|
227 |
+
|
228 |
+
class Autotune:
|
229 |
+
def __init__(self, ref_freqs):
|
230 |
+
self.ref_freqs = ref_freqs
|
231 |
+
self.note_dict = self.ref_freqs
|
232 |
+
|
233 |
+
def autotune_f0(self, f0, f0_autotune_strength):
|
234 |
+
autotuned_f0 = np.zeros_like(f0)
|
235 |
+
|
236 |
+
for i, freq in enumerate(f0):
|
237 |
+
autotuned_f0[i] = freq + (min(self.note_dict, key=lambda x: abs(x - freq)) - freq) * f0_autotune_strength
|
238 |
+
|
239 |
+
return autotuned_f0
|
240 |
+
|
241 |
+
class VC:
|
242 |
+
def __init__(self, tgt_sr, config):
|
243 |
+
self.x_pad = config.x_pad
|
244 |
+
self.x_query = config.x_query
|
245 |
+
self.x_center = config.x_center
|
246 |
+
self.x_max = config.x_max
|
247 |
+
self.sample_rate = 16000
|
248 |
+
self.window = 160
|
249 |
+
self.t_pad = self.sample_rate * self.x_pad
|
250 |
+
self.t_pad_tgt = tgt_sr * self.x_pad
|
251 |
+
self.t_pad2 = self.t_pad * 2
|
252 |
+
self.t_query = self.sample_rate * self.x_query
|
253 |
+
self.t_center = self.sample_rate * self.x_center
|
254 |
+
self.t_max = self.sample_rate * self.x_max
|
255 |
+
self.time_step = self.window / self.sample_rate * 1000
|
256 |
+
self.f0_min = 50
|
257 |
+
self.f0_max = 1100
|
258 |
+
self.f0_mel_min = 1127 * np.log(1 + self.f0_min / 700)
|
259 |
+
self.f0_mel_max = 1127 * np.log(1 + self.f0_max / 700)
|
260 |
+
self.device = config.device
|
261 |
+
self.ref_freqs = [49.00, 51.91, 55.00, 58.27, 61.74, 65.41, 69.30, 73.42, 77.78, 82.41, 87.31, 92.50, 98.00, 103.83, 110.00, 116.54, 123.47, 130.81, 138.59, 146.83, 155.56, 164.81, 174.61, 185.00, 196.00, 207.65, 220.00, 233.08, 246.94, 261.63, 277.18, 293.66, 311.13, 329.63, 349.23, 369.99, 392.00, 415.30, 440.00, 466.16, 493.88, 523.25, 554.37, 587.33, 622.25, 659.25, 698.46, 739.99, 783.99, 830.61, 880.00, 932.33, 987.77, 1046.50]
|
262 |
+
self.autotune = Autotune(self.ref_freqs)
|
263 |
+
self.note_dict = self.autotune.note_dict
|
264 |
+
|
265 |
+
def get_providers(self):
|
266 |
+
ort_providers = onnxruntime.get_available_providers()
|
267 |
+
|
268 |
+
if "CUDAExecutionProvider" in ort_providers: providers = ["CUDAExecutionProvider"]
|
269 |
+
elif "CoreMLExecutionProvider" in ort_providers: providers = ["CoreMLExecutionProvider"]
|
270 |
+
else: providers = ["CPUExecutionProvider"]
|
271 |
+
|
272 |
+
return providers
|
273 |
+
|
274 |
+
def get_f0_pm(self, x, p_len):
|
275 |
+
f0 = (parselmouth.Sound(x, self.sample_rate).to_pitch_ac(time_step=self.window / self.sample_rate * 1000 / 1000, voicing_threshold=0.6, pitch_floor=self.f0_min, pitch_ceiling=self.f0_max).selected_array["frequency"])
|
276 |
+
pad_size = (p_len - len(f0) + 1) // 2
|
277 |
+
|
278 |
+
if pad_size > 0 or p_len - len(f0) - pad_size > 0: f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant")
|
279 |
+
return f0
|
280 |
+
|
281 |
+
def get_f0_mangio_crepe(self, x, p_len, hop_length, model="full", onnx=False):
|
282 |
+
providers = self.get_providers() if onnx else None
|
283 |
+
|
284 |
+
x = x.astype(np.float32)
|
285 |
+
x /= np.quantile(np.abs(x), 0.999)
|
286 |
+
|
287 |
+
audio = torch.unsqueeze(torch.from_numpy(x).to(self.device, copy=True), dim=0)
|
288 |
+
if audio.ndim == 2 and audio.shape[0] > 1: audio = torch.mean(audio, dim=0, keepdim=True).detach()
|
289 |
+
|
290 |
+
p_len = p_len or x.shape[0] // hop_length
|
291 |
+
source = np.array(predict(audio.detach(), self.sample_rate, hop_length, self.f0_min, self.f0_max, model, batch_size=hop_length * 2, device=self.device, pad=True, providers=providers, onnx=onnx).squeeze(0).cpu().float().numpy())
|
292 |
+
source[source < 0.001] = np.nan
|
293 |
+
return np.nan_to_num(np.interp(np.arange(0, len(source) * p_len, len(source)) / p_len, np.arange(0, len(source)), source))
|
294 |
+
|
295 |
+
def get_f0_crepe(self, x, model="full", onnx=False):
|
296 |
+
providers = self.get_providers() if onnx else None
|
297 |
+
|
298 |
+
f0, pd = predict(torch.tensor(np.copy(x))[None].float(), self.sample_rate, self.window, self.f0_min, self.f0_max, model, batch_size=512, device=self.device, return_periodicity=True, providers=providers, onnx=onnx)
|
299 |
+
f0, pd = mean(f0, 3), median(pd, 3)
|
300 |
+
f0[pd < 0.1] = 0
|
301 |
+
|
302 |
+
return f0[0].cpu().numpy()
|
303 |
+
|
304 |
+
def get_f0_fcpe(self, x, p_len, hop_length, onnx=False, legacy=False):
|
305 |
+
providers = self.get_providers() if onnx else None
|
306 |
+
|
307 |
+
model_fcpe = FCPE(os.path.join("assets", "models", "predictors", "fcpe" + (".onnx" if onnx else ".pt")), hop_length=int(hop_length), f0_min=int(self.f0_min), f0_max=int(self.f0_max), dtype=torch.float32, device=self.device, sample_rate=self.sample_rate, threshold=0.03, providers=providers, onnx=onnx) if legacy else FCPE(os.path.join("assets", "models", "predictors", "fcpe" + (".onnx" if onnx else ".pt")), hop_length=self.window, f0_min=0, f0_max=8000, dtype=torch.float32, device=self.device, sample_rate=self.sample_rate, threshold=0.006, providers=providers, onnx=onnx)
|
308 |
+
f0 = model_fcpe.compute_f0(x, p_len=p_len)
|
309 |
+
|
310 |
+
del model_fcpe
|
311 |
+
return f0
|
312 |
+
|
313 |
+
def get_f0_rmvpe(self, x, legacy=False, onnx=False):
|
314 |
+
providers = self.get_providers() if onnx else None
|
315 |
+
|
316 |
+
rmvpe_model = RMVPE(os.path.join("assets", "models", "predictors", "rmvpe" + (".onnx" if onnx else ".pt")), device=self.device, onnx=onnx, providers=providers)
|
317 |
+
f0 = rmvpe_model.infer_from_audio_with_pitch(x, thred=0.03, f0_min=self.f0_min, f0_max=self.f0_max) if legacy else rmvpe_model.infer_from_audio(x, thred=0.03)
|
318 |
+
|
319 |
+
del rmvpe_model
|
320 |
+
return f0
|
321 |
+
|
322 |
+
def get_f0_pyworld(self, x, filter_radius, model="harvest"):
|
323 |
+
pw = PYWORLD()
|
324 |
+
|
325 |
+
if model == "harvest": f0, t = pw.harvest(x.astype(np.double), fs=self.sample_rate, f0_ceil=self.f0_max, f0_floor=self.f0_min, frame_period=10)
|
326 |
+
elif model == "dio": f0, t = pw.dio(x.astype(np.double), fs=self.sample_rate, f0_ceil=self.f0_max, f0_floor=self.f0_min, frame_period=10)
|
327 |
+
else: raise ValueError(translations["method_not_valid"])
|
328 |
+
|
329 |
+
f0 = pw.stonemask(x.astype(np.double), self.sample_rate, t, f0)
|
330 |
+
|
331 |
+
if filter_radius > 2 or model == "dio": f0 = signal.medfilt(f0, 3)
|
332 |
+
return f0
|
333 |
+
|
334 |
+
def get_f0_yin(self, x, hop_length, p_len):
|
335 |
+
source = np.array(librosa.yin(x.astype(np.double), sr=self.sample_rate, fmin=self.f0_min, fmax=self.f0_max, hop_length=hop_length))
|
336 |
+
source[source < 0.001] = np.nan
|
337 |
+
|
338 |
+
return np.nan_to_num(np.interp(np.arange(0, len(source) * p_len, len(source)) / p_len, np.arange(0, len(source)), source))
|
339 |
+
|
340 |
+
def get_f0_pyin(self, x, hop_length, p_len):
|
341 |
+
f0, _, _ = librosa.pyin(x.astype(np.double), fmin=self.f0_min, fmax=self.f0_max, sr=self.sample_rate, hop_length=hop_length)
|
342 |
+
source = np.array(f0)
|
343 |
+
source[source < 0.001] = np.nan
|
344 |
+
|
345 |
+
return np.nan_to_num(np.interp(np.arange(0, len(source) * p_len, len(source)) / p_len, np.arange(0, len(source)), source))
|
346 |
+
|
347 |
+
def get_f0_hybrid(self, methods_str, x, p_len, hop_length, filter_radius):
|
348 |
+
methods_str = re.search("hybrid\[(.+)\]", methods_str)
|
349 |
+
if methods_str: methods = [method.strip() for method in methods_str.group(1).split("+")]
|
350 |
+
|
351 |
+
f0_computation_stack, resampled_stack = [], []
|
352 |
+
logger.debug(translations["hybrid_methods"].format(methods=methods))
|
353 |
+
|
354 |
+
x = x.astype(np.float32)
|
355 |
+
x /= np.quantile(np.abs(x), 0.999)
|
356 |
+
|
357 |
+
for method in methods:
|
358 |
+
f0 = None
|
359 |
+
|
360 |
+
if method == "pm": f0 = self.get_f0_pm(x, p_len)
|
361 |
+
elif method == "dio": f0 = self.get_f0_pyworld(x, filter_radius, "dio")
|
362 |
+
elif method == "mangio-crepe-tiny": f0 = self.get_f0_mangio_crepe(x, p_len, int(hop_length), "tiny")
|
363 |
+
elif method == "mangio-crepe-tiny-onnx": f0 = self.get_f0_mangio_crepe(x, p_len, int(hop_length), "tiny", onnx=True)
|
364 |
+
elif method == "mangio-crepe-small": f0 = self.get_f0_mangio_crepe(x, p_len, int(hop_length), "small")
|
365 |
+
elif method == "mangio-crepe-small-onnx": f0 = self.get_f0_mangio_crepe(x, p_len, int(hop_length), "small", onnx=True)
|
366 |
+
elif method == "mangio-crepe-medium": f0 = self.get_f0_mangio_crepe(x, p_len, int(hop_length), "medium")
|
367 |
+
elif method == "mangio-crepe-medium-onnx": f0 = self.get_f0_mangio_crepe(x, p_len, int(hop_length), "medium", onnx=True)
|
368 |
+
elif method == "mangio-crepe-large": f0 = self.get_f0_mangio_crepe(x, p_len, int(hop_length), "large")
|
369 |
+
elif method == "mangio-crepe-large-onnx": f0 = self.get_f0_mangio_crepe(x, p_len, int(hop_length), "large", onnx=True)
|
370 |
+
elif method == "mangio-crepe-full": f0 = self.get_f0_mangio_crepe(x, p_len, int(hop_length), "full")
|
371 |
+
elif method == "mangio-crepe-full-onnx": f0 = self.get_f0_mangio_crepe(x, p_len, int(hop_length), "full", onnx=True)
|
372 |
+
elif method == "crepe-tiny": f0 = self.get_f0_crepe(x, "tiny")
|
373 |
+
elif method == "crepe-tiny-onnx": f0 = self.get_f0_crepe(x, "tiny", onnx=True)
|
374 |
+
elif method == "crepe-small": f0 = self.get_f0_crepe(x, "small")
|
375 |
+
elif method == "crepe-small-onnx": f0 = self.get_f0_crepe(x, "small", onnx=True)
|
376 |
+
elif method == "crepe-medium": f0 = self.get_f0_crepe(x, "medium")
|
377 |
+
elif method == "crepe-medium-onnx": f0 = self.get_f0_crepe(x, "medium", onnx=True)
|
378 |
+
elif method == "crepe-large": f0 = self.get_f0_crepe(x, "large")
|
379 |
+
elif method == "crepe-large-onnx": f0 = self.get_f0_crepe(x, "large", onnx=True)
|
380 |
+
elif method == "crepe-full": f0 = self.get_f0_crepe(x, "full")
|
381 |
+
elif method == "crepe-full-onnx": f0 = self.get_f0_crepe(x, "full", onnx=True)
|
382 |
+
elif method == "fcpe": f0 = self.get_f0_fcpe(x, p_len, int(hop_length))
|
383 |
+
elif method == "fcpe-onnx": f0 = self.get_f0_fcpe(x, p_len, int(hop_length), onnx=True)
|
384 |
+
elif method == "fcpe-legacy": f0 = self.get_f0_fcpe(x, p_len, int(hop_length), legacy=True)
|
385 |
+
elif method == "fcpe-legacy-onnx": f0 = self.get_f0_fcpe(x, p_len, int(hop_length), onnx=True, legacy=True)
|
386 |
+
elif method == "rmvpe": f0 = self.get_f0_rmvpe(x)
|
387 |
+
elif method == "rmvpe-onnx": f0 = self.get_f0_rmvpe(x, onnx=True)
|
388 |
+
elif method == "rmvpe-legacy": f0 = self.get_f0_rmvpe(x, legacy=True)
|
389 |
+
elif method == "rmvpe-legacy-onnx": f0 = self.get_f0_rmvpe(x, legacy=True, onnx=True)
|
390 |
+
elif method == "harvest": f0 = self.get_f0_pyworld(x, filter_radius, "harvest")
|
391 |
+
elif method == "yin": f0 = self.get_f0_yin(x, int(hop_length), p_len)
|
392 |
+
elif method == "pyin": f0 = self.get_f0_pyin(x, int(hop_length), p_len)
|
393 |
+
else: raise ValueError(translations["method_not_valid"])
|
394 |
+
|
395 |
+
f0_computation_stack.append(f0)
|
396 |
+
|
397 |
+
for f0 in f0_computation_stack:
|
398 |
+
resampled_stack.append(np.interp(np.linspace(0, len(f0), p_len), np.arange(len(f0)), f0))
|
399 |
+
|
400 |
+
return resampled_stack[0] if len(resampled_stack) == 1 else np.nanmedian(np.vstack(resampled_stack), axis=0)
|
401 |
+
|
402 |
+
def get_f0(self, x, p_len, pitch, f0_method, filter_radius, hop_length, f0_autotune, f0_autotune_strength):
|
403 |
+
if f0_method == "pm": f0 = self.get_f0_pm(x, p_len)
|
404 |
+
elif f0_method == "dio": f0 = self.get_f0_pyworld(x, filter_radius, "dio")
|
405 |
+
elif f0_method == "mangio-crepe-tiny": f0 = self.get_f0_mangio_crepe(x, p_len, int(hop_length), "tiny")
|
406 |
+
elif f0_method == "mangio-crepe-tiny-onnx": f0 = self.get_f0_mangio_crepe(x, p_len, int(hop_length), "tiny", onnx=True)
|
407 |
+
elif f0_method == "mangio-crepe-small": f0 = self.get_f0_mangio_crepe(x, p_len, int(hop_length), "small")
|
408 |
+
elif f0_method == "mangio-crepe-small-onnx": f0 = self.get_f0_mangio_crepe(x, p_len, int(hop_length), "small", onnx=True)
|
409 |
+
elif f0_method == "mangio-crepe-medium": f0 = self.get_f0_mangio_crepe(x, p_len, int(hop_length), "medium")
|
410 |
+
elif f0_method == "mangio-crepe-medium-onnx": f0 = self.get_f0_mangio_crepe(x, p_len, int(hop_length), "medium", onnx=True)
|
411 |
+
elif f0_method == "mangio-crepe-large": f0 = self.get_f0_mangio_crepe(x, p_len, int(hop_length), "large")
|
412 |
+
elif f0_method == "mangio-crepe-large-onnx": f0 = self.get_f0_mangio_crepe(x, p_len, int(hop_length), "large", onnx=True)
|
413 |
+
elif f0_method == "mangio-crepe-full": f0 = self.get_f0_mangio_crepe(x, p_len, int(hop_length), "full")
|
414 |
+
elif f0_method == "mangio-crepe-full-onnx": f0 = self.get_f0_mangio_crepe(x, p_len, int(hop_length), "full", onnx=True)
|
415 |
+
elif f0_method == "crepe-tiny": f0 = self.get_f0_crepe(x, "tiny")
|
416 |
+
elif f0_method == "crepe-tiny-onnx": f0 = self.get_f0_crepe(x, "tiny", onnx=True)
|
417 |
+
elif f0_method == "crepe-small": f0 = self.get_f0_crepe(x, "small")
|
418 |
+
elif f0_method == "crepe-small-onnx": f0 = self.get_f0_crepe(x, "small", onnx=True)
|
419 |
+
elif f0_method == "crepe-medium": f0 = self.get_f0_crepe(x, "medium")
|
420 |
+
elif f0_method == "crepe-medium-onnx": f0 = self.get_f0_crepe(x, "medium", onnx=True)
|
421 |
+
elif f0_method == "crepe-large": f0 = self.get_f0_crepe(x, "large")
|
422 |
+
elif f0_method == "crepe-large-onnx": f0 = self.get_f0_crepe(x, "large", onnx=True)
|
423 |
+
elif f0_method == "crepe-full": f0 = self.get_f0_crepe(x, "full")
|
424 |
+
elif f0_method == "crepe-full-onnx": f0 = self.get_f0_crepe(x, "full", onnx=True)
|
425 |
+
elif f0_method == "fcpe": f0 = self.get_f0_fcpe(x, p_len, int(hop_length))
|
426 |
+
elif f0_method == "fcpe-onnx": f0 = self.get_f0_fcpe(x, p_len, int(hop_length), onnx=True)
|
427 |
+
elif f0_method == "fcpe-legacy": f0 = self.get_f0_fcpe(x, p_len, int(hop_length), legacy=True)
|
428 |
+
elif f0_method == "fcpe-legacy-onnx": f0 = self.get_f0_fcpe(x, p_len, int(hop_length), onnx=True, legacy=True)
|
429 |
+
elif f0_method == "rmvpe": f0 = self.get_f0_rmvpe(x)
|
430 |
+
elif f0_method == "rmvpe-onnx": f0 = self.get_f0_rmvpe(x, onnx=True)
|
431 |
+
elif f0_method == "rmvpe-legacy": f0 = self.get_f0_rmvpe(x, legacy=True)
|
432 |
+
elif f0_method == "rmvpe-legacy-onnx": f0 = self.get_f0_rmvpe(x, legacy=True, onnx=True)
|
433 |
+
elif f0_method == "harvest": f0 = self.get_f0_pyworld(x, filter_radius, "harvest")
|
434 |
+
elif f0_method == "yin": f0 = self.get_f0_yin(x, int(hop_length), p_len)
|
435 |
+
elif f0_method == "pyin": f0 = self.get_f0_pyin(x, int(hop_length), p_len)
|
436 |
+
elif "hybrid" in f0_method: f0 = self.get_f0_hybrid(f0_method, x, p_len, hop_length, filter_radius)
|
437 |
+
else: raise ValueError(translations["method_not_valid"])
|
438 |
+
|
439 |
+
if f0_autotune: f0 = Autotune.autotune_f0(self, f0, f0_autotune_strength)
|
440 |
+
|
441 |
+
f0 *= pow(2, pitch / 12)
|
442 |
+
f0_mel = 1127 * np.log(1 + f0 / 700)
|
443 |
+
f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - self.f0_mel_min) * 254 / (self.f0_mel_max - self.f0_mel_min) + 1
|
444 |
+
f0_mel[f0_mel <= 1] = 1
|
445 |
+
f0_mel[f0_mel > 255] = 255
|
446 |
+
|
447 |
+
return np.rint(f0_mel).astype(np.int32), f0.copy()
|
448 |
+
|
449 |
+
def voice_conversion(self, model, net_g, sid, audio0, pitch, pitchf, index, big_npy, index_rate, version, protect):
|
450 |
+
pitch_guidance = pitch != None and pitchf != None
|
451 |
+
feats = torch.from_numpy(audio0).float()
|
452 |
+
|
453 |
+
if feats.dim() == 2: feats = feats.mean(-1)
|
454 |
+
assert feats.dim() == 1, feats.dim()
|
455 |
+
|
456 |
+
feats = feats.view(1, -1)
|
457 |
+
padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False)
|
458 |
+
inputs = {"source": feats.to(self.device), "padding_mask": padding_mask, "output_layer": 9 if version == "v1" else 12}
|
459 |
+
|
460 |
+
with torch.no_grad():
|
461 |
+
logits = model.extract_features(**inputs)
|
462 |
+
feats = model.final_proj(logits[0]) if version == "v1" else logits[0]
|
463 |
+
|
464 |
+
if protect < 0.5 and pitch_guidance: feats0 = feats.clone()
|
465 |
+
|
466 |
+
if (not isinstance(index, type(None)) and not isinstance(big_npy, type(None)) and index_rate != 0):
|
467 |
+
npy = feats[0].cpu().numpy()
|
468 |
+
score, ix = index.search(npy, k=8)
|
469 |
+
weight = np.square(1 / score)
|
470 |
+
weight /= weight.sum(axis=1, keepdims=True)
|
471 |
+
npy = np.sum(big_npy[ix] * np.expand_dims(weight, axis=2), axis=1)
|
472 |
+
feats = (torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate + (1 - index_rate) * feats)
|
473 |
+
|
474 |
+
feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)
|
475 |
+
if protect < 0.5 and pitch_guidance: feats0 = F.interpolate(feats0.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)
|
476 |
+
|
477 |
+
p_len = audio0.shape[0] // self.window
|
478 |
+
|
479 |
+
if feats.shape[1] < p_len:
|
480 |
+
p_len = feats.shape[1]
|
481 |
+
if pitch_guidance:
|
482 |
+
pitch = pitch[:, :p_len]
|
483 |
+
pitchf = pitchf[:, :p_len]
|
484 |
+
|
485 |
+
if protect < 0.5 and pitch_guidance:
|
486 |
+
pitchff = pitchf.clone()
|
487 |
+
pitchff[pitchf > 0] = 1
|
488 |
+
pitchff[pitchf < 1] = protect
|
489 |
+
pitchff = pitchff.unsqueeze(-1)
|
490 |
+
feats = feats * pitchff + feats0 * (1 - pitchff)
|
491 |
+
feats = feats.to(feats0.dtype)
|
492 |
+
|
493 |
+
p_len = torch.tensor([p_len], device=self.device).long()
|
494 |
+
audio1 = ((net_g.infer(feats, p_len, pitch if pitch_guidance else None, pitchf if pitch_guidance else None, sid)[0][0, 0]).data.cpu().float().numpy())
|
495 |
+
|
496 |
+
del feats, p_len, padding_mask
|
497 |
+
if torch.cuda.is_available(): torch.cuda.empty_cache()
|
498 |
+
return audio1
|
499 |
+
|
500 |
+
def pipeline(self, model, net_g, sid, audio, pitch, f0_method, file_index, index_rate, pitch_guidance, filter_radius, tgt_sr, resample_sr, volume_envelope, version, protect, hop_length, f0_autotune, f0_autotune_strength):
|
501 |
+
if file_index != "" and os.path.exists(file_index) and index_rate != 0:
|
502 |
+
try:
|
503 |
+
index = faiss.read_index(file_index)
|
504 |
+
big_npy = index.reconstruct_n(0, index.ntotal)
|
505 |
+
except Exception as e:
|
506 |
+
logger.error(translations["read_faiss_index_error"].format(e=e))
|
507 |
+
index = big_npy = None
|
508 |
+
else: index = big_npy = None
|
509 |
+
|
510 |
+
audio = signal.filtfilt(bh, ah, audio)
|
511 |
+
audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode="reflect")
|
512 |
+
opt_ts, audio_opt = [], []
|
513 |
+
|
514 |
+
if audio_pad.shape[0] > self.t_max:
|
515 |
+
audio_sum = np.zeros_like(audio)
|
516 |
+
|
517 |
+
for i in range(self.window):
|
518 |
+
audio_sum += audio_pad[i : i - self.window]
|
519 |
+
|
520 |
+
for t in range(self.t_center, audio.shape[0], self.t_center):
|
521 |
+
opt_ts.append(t - self.t_query + np.where(np.abs(audio_sum[t - self.t_query : t + self.t_query]) == np.abs(audio_sum[t - self.t_query : t + self.t_query]).min())[0][0])
|
522 |
+
|
523 |
+
s = 0
|
524 |
+
t = None
|
525 |
+
|
526 |
+
audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode="reflect")
|
527 |
+
sid = torch.tensor(sid, device=self.device).unsqueeze(0).long()
|
528 |
+
p_len = audio_pad.shape[0] // self.window
|
529 |
+
|
530 |
+
if pitch_guidance:
|
531 |
+
pitch, pitchf = self.get_f0(audio_pad, p_len, pitch, f0_method, filter_radius, hop_length, f0_autotune, f0_autotune_strength)
|
532 |
+
pitch, pitchf = pitch[:p_len], pitchf[:p_len]
|
533 |
+
|
534 |
+
if self.device == "mps": pitchf = pitchf.astype(np.float32)
|
535 |
+
pitch, pitchf = torch.tensor(pitch, device=self.device).unsqueeze(0).long(), torch.tensor(pitchf, device=self.device).unsqueeze(0).float()
|
536 |
+
|
537 |
+
for t in opt_ts:
|
538 |
+
t = t // self.window * self.window
|
539 |
+
audio_opt.append(self.voice_conversion(model, net_g, sid, audio_pad[s : t + self.t_pad2 + self.window], pitch[:, s // self.window : (t + self.t_pad2) // self.window] if pitch_guidance else None, pitchf[:, s // self.window : (t + self.t_pad2) // self.window] if pitch_guidance else None, index, big_npy, index_rate, version, protect)[self.t_pad_tgt : -self.t_pad_tgt])
|
540 |
+
s = t
|
541 |
+
|
542 |
+
audio_opt.append(self.voice_conversion(model, net_g, sid, audio_pad[t:], (pitch[:, t // self.window :] if t is not None else pitch) if pitch_guidance else None, (pitchf[:, t // self.window :] if t is not None else pitchf) if pitch_guidance else None, index, big_npy, index_rate, version, protect)[self.t_pad_tgt : -self.t_pad_tgt])
|
543 |
+
audio_opt = np.concatenate(audio_opt)
|
544 |
+
|
545 |
+
if volume_envelope != 1: audio_opt = change_rms(audio, self.sample_rate, audio_opt, tgt_sr, volume_envelope)
|
546 |
+
if resample_sr >= self.sample_rate and tgt_sr != resample_sr: audio_opt = librosa.resample(audio_opt, orig_sr=tgt_sr, target_sr=resample_sr, res_type="soxr_vhq")
|
547 |
+
|
548 |
+
audio_max = np.abs(audio_opt).max() / 0.99
|
549 |
+
if audio_max > 1: audio_opt /= audio_max
|
550 |
+
|
551 |
+
if pitch_guidance: del pitch, pitchf
|
552 |
+
del sid
|
553 |
+
|
554 |
+
if torch.cuda.is_available(): torch.cuda.empty_cache()
|
555 |
+
return audio_opt
|
556 |
+
|
557 |
+
class VoiceConverter:
|
558 |
+
def __init__(self):
|
559 |
+
self.config = config
|
560 |
+
self.hubert_model = None
|
561 |
+
self.tgt_sr = None
|
562 |
+
self.net_g = None
|
563 |
+
self.vc = None
|
564 |
+
self.cpt = None
|
565 |
+
self.version = None
|
566 |
+
self.n_spk = None
|
567 |
+
self.use_f0 = None
|
568 |
+
self.loaded_model = None
|
569 |
+
self.vocoder = "Default"
|
570 |
+
self.checkpointing = False
|
571 |
+
|
572 |
+
def load_embedders(self, embedder_model):
|
573 |
+
try:
|
574 |
+
models, _, _ = checkpoint_utils.load_model_ensemble_and_task([os.path.join("assets", "models", "embedders", embedder_model + '.pt')], suffix="")
|
575 |
+
except Exception as e:
|
576 |
+
logger.error(translations["read_model_error"].format(e=e))
|
577 |
+
self.hubert_model = models[0].to(self.config.device).float().eval()
|
578 |
+
|
579 |
+
def convert_audio(self, audio_input_path, audio_output_path, model_path, index_path, embedder_model, pitch, f0_method, index_rate, volume_envelope, protect, hop_length, f0_autotune, f0_autotune_strength, filter_radius, clean_audio, clean_strength, export_format, resample_sr = 0, sid = 0, checkpointing = False):
|
580 |
+
try:
|
581 |
+
self.get_vc(model_path, sid)
|
582 |
+
audio = load_audio(audio_input_path)
|
583 |
+
self.checkpointing = checkpointing
|
584 |
+
|
585 |
+
audio_max = np.abs(audio).max() / 0.95
|
586 |
+
if audio_max > 1: audio /= audio_max
|
587 |
+
|
588 |
+
if not self.hubert_model:
|
589 |
+
if not os.path.exists(os.path.join("assets", "models", "embedders", embedder_model + '.pt')): raise FileNotFoundError(f"{translations['not_found'].format(name=translations['model'])}: {embedder_model}")
|
590 |
+
self.load_embedders(embedder_model)
|
591 |
+
|
592 |
+
if self.tgt_sr != resample_sr >= 16000: self.tgt_sr = resample_sr
|
593 |
+
target_sr = min([8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000, 96000], key=lambda x: abs(x - self.tgt_sr))
|
594 |
+
|
595 |
+
audio_output = self.vc.pipeline(model=self.hubert_model, net_g=self.net_g, sid=sid, audio=audio, pitch=pitch, f0_method=f0_method, file_index=(index_path.strip().strip('"').strip("\n").strip('"').strip().replace("trained", "added")), index_rate=index_rate, pitch_guidance=self.use_f0, filter_radius=filter_radius, tgt_sr=self.tgt_sr, resample_sr=target_sr, volume_envelope=volume_envelope, version=self.version, protect=protect, hop_length=hop_length, f0_autotune=f0_autotune, f0_autotune_strength=f0_autotune_strength)
|
596 |
+
|
597 |
+
if clean_audio:
|
598 |
+
from main.tools.noisereduce import reduce_noise
|
599 |
+
audio_output = reduce_noise(y=audio_output, sr=target_sr, prop_decrease=clean_strength)
|
600 |
+
|
601 |
+
sf.write(audio_output_path, audio_output, target_sr, format=export_format)
|
602 |
+
except Exception as e:
|
603 |
+
logger.error(translations["error_convert"].format(e=e))
|
604 |
+
|
605 |
+
import traceback
|
606 |
+
logger.debug(traceback.format_exc())
|
607 |
+
|
608 |
+
def get_vc(self, weight_root, sid):
|
609 |
+
if sid == "" or sid == []:
|
610 |
+
self.cleanup()
|
611 |
+
if torch.cuda.is_available(): torch.cuda.empty_cache()
|
612 |
+
|
613 |
+
if not self.loaded_model or self.loaded_model != weight_root:
|
614 |
+
self.load_model(weight_root)
|
615 |
+
if self.cpt is not None: self.setup()
|
616 |
+
self.loaded_model = weight_root
|
617 |
+
|
618 |
+
def cleanup(self):
|
619 |
+
if self.hubert_model is not None:
|
620 |
+
del self.net_g, self.n_spk, self.vc, self.hubert_model, self.tgt_sr
|
621 |
+
self.hubert_model = self.net_g = self.n_spk = self.vc = self.tgt_sr = None
|
622 |
+
if torch.cuda.is_available(): torch.cuda.empty_cache()
|
623 |
+
|
624 |
+
del self.net_g, self.cpt
|
625 |
+
if torch.cuda.is_available(): torch.cuda.empty_cache()
|
626 |
+
self.cpt = None
|
627 |
+
|
628 |
+
def load_model(self, weight_root):
|
629 |
+
self.cpt = (torch.load(weight_root, map_location="cpu") if os.path.isfile(weight_root) else None)
|
630 |
+
|
631 |
+
def setup(self):
|
632 |
+
if self.cpt is not None:
|
633 |
+
self.tgt_sr = self.cpt["config"][-1]
|
634 |
+
self.cpt["config"][-3] = self.cpt["weight"]["emb_g.weight"].shape[0]
|
635 |
+
|
636 |
+
self.use_f0 = self.cpt.get("f0", 1)
|
637 |
+
self.version = self.cpt.get("version", "v1")
|
638 |
+
self.vocoder = self.cpt.get("vocoder", "Default")
|
639 |
+
|
640 |
+
self.text_enc_hidden_dim = 768 if self.version == "v2" else 256
|
641 |
+
self.net_g = Synthesizer(*self.cpt["config"], use_f0=self.use_f0, text_enc_hidden_dim=self.text_enc_hidden_dim, vocoder=self.vocoder, checkpointing=self.checkpointing)
|
642 |
+
del self.net_g.enc_q
|
643 |
+
|
644 |
+
self.net_g.load_state_dict(self.cpt["weight"], strict=False)
|
645 |
+
self.net_g.eval().to(self.config.device).float()
|
646 |
+
|
647 |
+
self.vc = VC(self.tgt_sr, self.config)
|
648 |
+
self.n_spk = self.cpt["config"][-3]
|
649 |
+
|
650 |
+
if __name__ == "__main__": main()
|
main/inference/create_dataset.py
ADDED
@@ -0,0 +1,240 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
import time
|
4 |
+
import yt_dlp
|
5 |
+
import shutil
|
6 |
+
import librosa
|
7 |
+
import logging
|
8 |
+
import argparse
|
9 |
+
import warnings
|
10 |
+
import logging.handlers
|
11 |
+
|
12 |
+
from soundfile import read, write
|
13 |
+
from distutils.util import strtobool
|
14 |
+
|
15 |
+
sys.path.append(os.getcwd())
|
16 |
+
|
17 |
+
from main.configs.config import Config
|
18 |
+
from main.library.utils import process_audio, merge_audio
|
19 |
+
|
20 |
+
translations = Config().translations
|
21 |
+
dataset_temp = os.path.join("dataset_temp")
|
22 |
+
logger = logging.getLogger(__name__)
|
23 |
+
|
24 |
+
if logger.hasHandlers(): logger.handlers.clear()
|
25 |
+
else:
|
26 |
+
console_handler = logging.StreamHandler()
|
27 |
+
console_formatter = logging.Formatter(fmt="\n%(asctime)s.%(msecs)03d | %(levelname)s | %(module)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
|
28 |
+
console_handler.setFormatter(console_formatter)
|
29 |
+
console_handler.setLevel(logging.INFO)
|
30 |
+
file_handler = logging.handlers.RotatingFileHandler(os.path.join("assets", "logs", "create_dataset.log"), maxBytes=5*1024*1024, backupCount=3, encoding='utf-8')
|
31 |
+
file_formatter = logging.Formatter(fmt="\n%(asctime)s.%(msecs)03d | %(levelname)s | %(module)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
|
32 |
+
file_handler.setFormatter(file_formatter)
|
33 |
+
file_handler.setLevel(logging.DEBUG)
|
34 |
+
logger.addHandler(console_handler)
|
35 |
+
logger.addHandler(file_handler)
|
36 |
+
logger.setLevel(logging.DEBUG)
|
37 |
+
|
38 |
+
def parse_arguments():
|
39 |
+
parser = argparse.ArgumentParser()
|
40 |
+
parser.add_argument("--input_audio", type=str, required=True)
|
41 |
+
parser.add_argument("--output_dataset", type=str, default="./dataset")
|
42 |
+
parser.add_argument("--sample_rate", type=int, default=44100)
|
43 |
+
parser.add_argument("--clean_dataset", type=lambda x: bool(strtobool(x)), default=False)
|
44 |
+
parser.add_argument("--clean_strength", type=float, default=0.7)
|
45 |
+
parser.add_argument("--separator_reverb", type=lambda x: bool(strtobool(x)), default=False)
|
46 |
+
parser.add_argument("--kim_vocal_version", type=int, default=2)
|
47 |
+
parser.add_argument("--overlap", type=float, default=0.25)
|
48 |
+
parser.add_argument("--segments_size", type=int, default=256)
|
49 |
+
parser.add_argument("--mdx_hop_length", type=int, default=1024)
|
50 |
+
parser.add_argument("--mdx_batch_size", type=int, default=1)
|
51 |
+
parser.add_argument("--denoise_mdx", type=lambda x: bool(strtobool(x)), default=False)
|
52 |
+
parser.add_argument("--skip", type=lambda x: bool(strtobool(x)), default=False)
|
53 |
+
parser.add_argument("--skip_start_audios", type=str, default="0")
|
54 |
+
parser.add_argument("--skip_end_audios", type=str, default="0")
|
55 |
+
|
56 |
+
return parser.parse_args()
|
57 |
+
|
58 |
+
def main():
|
59 |
+
pid_path = os.path.join("assets", "create_dataset_pid.txt")
|
60 |
+
with open(pid_path, "w") as pid_file:
|
61 |
+
pid_file.write(str(os.getpid()))
|
62 |
+
|
63 |
+
args = parse_arguments()
|
64 |
+
input_audio, output_dataset, sample_rate, clean_dataset, clean_strength, separator_reverb, kim_vocal_version, overlap, segments_size, hop_length, batch_size, denoise_mdx, skip, skip_start_audios, skip_end_audios = args.input_audio, args.output_dataset, args.sample_rate, args.clean_dataset, args.clean_strength, args.separator_reverb, args.kim_vocal_version, args.overlap, args.segments_size, args.mdx_hop_length, args.mdx_batch_size, args.denoise_mdx, args.skip, args.skip_start_audios, args.skip_end_audios
|
65 |
+
log_data = {translations['audio_path']: input_audio, translations['output_path']: output_dataset, translations['sr']: sample_rate, translations['clear_dataset']: clean_dataset, translations['dereveb_audio']: separator_reverb, translations['segments_size']: segments_size, translations['overlap']: overlap, "Hop length": hop_length, translations['batch_size']: batch_size, translations['denoise_mdx']: denoise_mdx, translations['skip']: skip}
|
66 |
+
|
67 |
+
if clean_dataset: log_data[translations['clean_strength']] = clean_strength
|
68 |
+
if skip:
|
69 |
+
log_data[translations['skip_start']] = skip_start_audios
|
70 |
+
log_data[translations['skip_end']] = skip_end_audios
|
71 |
+
|
72 |
+
for key, value in log_data.items():
|
73 |
+
logger.debug(f"{key}: {value}")
|
74 |
+
|
75 |
+
if kim_vocal_version not in [1, 2]: raise ValueError(translations["version_not_valid"])
|
76 |
+
start_time = time.time()
|
77 |
+
|
78 |
+
try:
|
79 |
+
paths = []
|
80 |
+
|
81 |
+
if not os.path.exists(dataset_temp): os.makedirs(dataset_temp, exist_ok=True)
|
82 |
+
urls = input_audio.replace(", ", ",").split(",")
|
83 |
+
|
84 |
+
for url in urls:
|
85 |
+
path = downloader(url, urls.index(url))
|
86 |
+
paths.append(path)
|
87 |
+
|
88 |
+
if skip:
|
89 |
+
skip_start_audios = skip_start_audios.replace(", ", ",").split(",")
|
90 |
+
skip_end_audios = skip_end_audios.replace(", ", ",").split(",")
|
91 |
+
|
92 |
+
if len(skip_start_audios) < len(paths) or len(skip_end_audios) < len(paths):
|
93 |
+
logger.warning(translations["skip<audio"])
|
94 |
+
sys.exit(1)
|
95 |
+
elif len(skip_start_audios) > len(paths) or len(skip_end_audios) > len(paths):
|
96 |
+
logger.warning(translations["skip>audio"])
|
97 |
+
sys.exit(1)
|
98 |
+
else:
|
99 |
+
for audio, skip_start_audio, skip_end_audio in zip(paths, skip_start_audios, skip_end_audios):
|
100 |
+
skip_start(audio, skip_start_audio)
|
101 |
+
skip_end(audio, skip_end_audio)
|
102 |
+
|
103 |
+
separator_paths = []
|
104 |
+
|
105 |
+
for audio in paths:
|
106 |
+
vocals = separator_music_main(audio, dataset_temp, segments_size, overlap, denoise_mdx, kim_vocal_version, hop_length, batch_size, sample_rate)
|
107 |
+
if separator_reverb: vocals = separator_reverb_audio(vocals, dataset_temp, segments_size, overlap, denoise_mdx, hop_length, batch_size, sample_rate)
|
108 |
+
separator_paths.append(vocals)
|
109 |
+
|
110 |
+
paths = separator_paths
|
111 |
+
processed_paths = []
|
112 |
+
|
113 |
+
for audio in paths:
|
114 |
+
cut_files, time_stamps = process_audio(logger, audio, os.path.dirname(audio))
|
115 |
+
processed_paths.append(merge_audio(cut_files, time_stamps, audio, os.path.splitext(audio)[0] + "_processed" + ".wav", "wav"))
|
116 |
+
|
117 |
+
paths = processed_paths
|
118 |
+
|
119 |
+
for audio_path in paths:
|
120 |
+
data, sample_rate = read(audio_path)
|
121 |
+
data = librosa.to_mono(data.T)
|
122 |
+
|
123 |
+
if clean_dataset:
|
124 |
+
from main.tools.noisereduce import reduce_noise
|
125 |
+
data = reduce_noise(y=data, prop_decrease=clean_strength)
|
126 |
+
|
127 |
+
write(audio_path, data, sample_rate)
|
128 |
+
except Exception as e:
|
129 |
+
logger.error(f"{translations['create_dataset_error']}: {e}")
|
130 |
+
|
131 |
+
import traceback
|
132 |
+
logger.error(traceback.format_exc())
|
133 |
+
finally:
|
134 |
+
for audio in paths:
|
135 |
+
shutil.move(audio, output_dataset)
|
136 |
+
|
137 |
+
if os.path.exists(dataset_temp): shutil.rmtree(dataset_temp, ignore_errors=True)
|
138 |
+
|
139 |
+
elapsed_time = time.time() - start_time
|
140 |
+
if os.path.exists(pid_path): os.remove(pid_path)
|
141 |
+
logger.info(translations["create_dataset_success"].format(elapsed_time=f"{elapsed_time:.2f}"))
|
142 |
+
|
143 |
+
def downloader(url, name):
|
144 |
+
with warnings.catch_warnings():
|
145 |
+
warnings.simplefilter("ignore")
|
146 |
+
|
147 |
+
ydl_opts = {"format": "bestaudio/best", "outtmpl": os.path.join(dataset_temp, f"{name}"), "postprocessors": [{"key": "FFmpegExtractAudio", "preferredcodec": "wav", "preferredquality": "192"}], "no_warnings": True, "noplaylist": True, "noplaylist": True, "verbose": False}
|
148 |
+
logger.info(f"{translations['starting_download']}: {url}...")
|
149 |
+
|
150 |
+
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
|
151 |
+
ydl.extract_info(url)
|
152 |
+
logger.info(f"{translations['download_success']}: {url}")
|
153 |
+
|
154 |
+
return os.path.join(dataset_temp, f"{name}" + ".wav")
|
155 |
+
|
156 |
+
def skip_start(input_file, seconds):
|
157 |
+
data, sr = read(input_file)
|
158 |
+
total_duration = len(data) / sr
|
159 |
+
|
160 |
+
if seconds <= 0: logger.warning(translations["=<0"])
|
161 |
+
elif seconds >= total_duration: logger.warning(translations["skip_warning"].format(seconds=seconds, total_duration=f"{total_duration:.2f}"))
|
162 |
+
else:
|
163 |
+
logger.info(f"{translations['skip_start']}: {input_file}...")
|
164 |
+
write(input_file, data[int(seconds * sr):], sr)
|
165 |
+
|
166 |
+
logger.info(translations["skip_start_audio"].format(input_file=input_file))
|
167 |
+
|
168 |
+
def skip_end(input_file, seconds):
|
169 |
+
data, sr = read(input_file)
|
170 |
+
total_duration = len(data) / sr
|
171 |
+
|
172 |
+
if seconds <= 0: logger.warning(translations["=<0"])
|
173 |
+
elif seconds > total_duration: logger.warning(translations["skip_warning"].format(seconds=seconds, total_duration=f"{total_duration:.2f}"))
|
174 |
+
else:
|
175 |
+
logger.info(f"{translations['skip_end']}: {input_file}...")
|
176 |
+
write(input_file, data[:-int(seconds * sr)], sr)
|
177 |
+
|
178 |
+
logger.info(translations["skip_end_audio"].format(input_file=input_file))
|
179 |
+
|
180 |
+
def separator_music_main(input, output, segments_size, overlap, denoise, version, hop_length, batch_size, sample_rate):
|
181 |
+
if not os.path.exists(input):
|
182 |
+
logger.warning(translations["input_not_valid"])
|
183 |
+
return None
|
184 |
+
|
185 |
+
if not os.path.exists(output):
|
186 |
+
logger.warning(translations["output_not_valid"])
|
187 |
+
return None
|
188 |
+
|
189 |
+
model = f"Kim_Vocal_{version}.onnx"
|
190 |
+
output_separator = separator_main(audio_file=input, model_filename=model, output_format="wav", output_dir=output, mdx_segment_size=segments_size, mdx_overlap=overlap, mdx_batch_size=batch_size, mdx_hop_length=hop_length, mdx_enable_denoise=denoise, sample_rate=sample_rate)
|
191 |
+
|
192 |
+
for f in output_separator:
|
193 |
+
path = os.path.join(output, f)
|
194 |
+
if not os.path.exists(path): logger.error(translations["not_found"].format(name=path))
|
195 |
+
|
196 |
+
if '_(Instrumental)_' in f: os.rename(path, os.path.splitext(path)[0].replace("(", "").replace(")", "") + ".wav")
|
197 |
+
elif '_(Vocals)_' in f:
|
198 |
+
rename_file = os.path.splitext(path)[0].replace("(", "").replace(")", "") + ".wav"
|
199 |
+
os.rename(path, rename_file)
|
200 |
+
|
201 |
+
return rename_file
|
202 |
+
|
203 |
+
def separator_reverb_audio(input, output, segments_size, overlap, denoise, hop_length, batch_size, sample_rate):
|
204 |
+
if not os.path.exists(input):
|
205 |
+
logger.warning(translations["input_not_valid"])
|
206 |
+
return None
|
207 |
+
|
208 |
+
if not os.path.exists(output):
|
209 |
+
logger.warning(translations["output_not_valid"])
|
210 |
+
return None
|
211 |
+
|
212 |
+
logger.info(f"{translations['dereverb']}: {input}...")
|
213 |
+
output_dereverb = separator_main(audio_file=input, model_filename="Reverb_HQ_By_FoxJoy.onnx", output_format="wav", output_dir=output, mdx_segment_size=segments_size, mdx_overlap=overlap, mdx_batch_size=hop_length, mdx_hop_length=batch_size, mdx_enable_denoise=denoise, sample_rate=sample_rate)
|
214 |
+
|
215 |
+
for f in output_dereverb:
|
216 |
+
path = os.path.join(output, f)
|
217 |
+
if not os.path.exists(path): logger.error(translations["not_found"].format(name=path))
|
218 |
+
|
219 |
+
if '_(Reverb)_' in f: os.rename(path, os.path.splitext(path)[0].replace("(", "").replace(")", "") + ".wav")
|
220 |
+
elif '_(No Reverb)_' in f:
|
221 |
+
rename_file = os.path.splitext(path)[0].replace("(", "").replace(")", "") + ".wav"
|
222 |
+
os.rename(path, rename_file)
|
223 |
+
|
224 |
+
logger.info(f"{translations['dereverb_success']}: {rename_file}")
|
225 |
+
return rename_file
|
226 |
+
|
227 |
+
def separator_main(audio_file=None, model_filename="Kim_Vocal_1.onnx", output_format="wav", output_dir=".", mdx_segment_size=256, mdx_overlap=0.25, mdx_batch_size=1, mdx_hop_length=1024, mdx_enable_denoise=True, sample_rate=44100):
|
228 |
+
from main.library.algorithm.separator import Separator
|
229 |
+
|
230 |
+
try:
|
231 |
+
separator = Separator(logger=logger, log_formatter=file_formatter, log_level=logging.INFO, output_dir=output_dir, output_format=output_format, output_bitrate=None, normalization_threshold=0.9, output_single_stem=None, invert_using_spec=False, sample_rate=sample_rate, mdx_params={"hop_length": mdx_hop_length, "segment_size": mdx_segment_size, "overlap": mdx_overlap, "batch_size": mdx_batch_size, "enable_denoise": mdx_enable_denoise})
|
232 |
+
separator.load_model(model_filename=model_filename)
|
233 |
+
return separator.separate(audio_file)
|
234 |
+
except:
|
235 |
+
logger.debug(translations["default_setting"])
|
236 |
+
separator = Separator(logger=logger, log_formatter=file_formatter, log_level=logging.INFO, output_dir=output_dir, output_format=output_format, output_bitrate=None, normalization_threshold=0.9, output_single_stem=None, invert_using_spec=False, sample_rate=44100, mdx_params={"hop_length": 1024, "segment_size": 256, "overlap": 0.25, "batch_size": 1, "enable_denoise": mdx_enable_denoise})
|
237 |
+
separator.load_model(model_filename=model_filename)
|
238 |
+
return separator.separate(audio_file)
|
239 |
+
|
240 |
+
if __name__ == "__main__": main()
|
main/inference/create_index.py
ADDED
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
import faiss
|
4 |
+
import logging
|
5 |
+
import argparse
|
6 |
+
import logging.handlers
|
7 |
+
|
8 |
+
import numpy as np
|
9 |
+
|
10 |
+
from multiprocessing import cpu_count
|
11 |
+
from sklearn.cluster import MiniBatchKMeans
|
12 |
+
|
13 |
+
sys.path.append(os.getcwd())
|
14 |
+
|
15 |
+
from main.configs.config import Config
|
16 |
+
translations = Config().translations
|
17 |
+
|
18 |
+
|
19 |
+
def parse_arguments():
|
20 |
+
parser = argparse.ArgumentParser()
|
21 |
+
parser.add_argument("--model_name", type=str, required=True)
|
22 |
+
parser.add_argument("--rvc_version", type=str, default="v2")
|
23 |
+
parser.add_argument("--index_algorithm", type=str, default="Auto")
|
24 |
+
|
25 |
+
return parser.parse_args()
|
26 |
+
|
27 |
+
def main():
|
28 |
+
args = parse_arguments()
|
29 |
+
|
30 |
+
exp_dir = os.path.join("assets", "logs", args.model_name)
|
31 |
+
version = args.rvc_version
|
32 |
+
index_algorithm = args.index_algorithm
|
33 |
+
logger = logging.getLogger(__name__)
|
34 |
+
|
35 |
+
if logger.hasHandlers(): logger.handlers.clear()
|
36 |
+
else:
|
37 |
+
console_handler = logging.StreamHandler()
|
38 |
+
console_formatter = logging.Formatter(fmt="\n%(asctime)s.%(msecs)03d | %(levelname)s | %(module)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
|
39 |
+
console_handler.setFormatter(console_formatter)
|
40 |
+
console_handler.setLevel(logging.INFO)
|
41 |
+
file_handler = logging.handlers.RotatingFileHandler(os.path.join(exp_dir, "create_index.log"), maxBytes=5*1024*1024, backupCount=3, encoding='utf-8')
|
42 |
+
file_formatter = logging.Formatter(fmt="\n%(asctime)s.%(msecs)03d | %(levelname)s | %(module)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
|
43 |
+
file_handler.setFormatter(file_formatter)
|
44 |
+
file_handler.setLevel(logging.DEBUG)
|
45 |
+
logger.addHandler(console_handler)
|
46 |
+
logger.addHandler(file_handler)
|
47 |
+
logger.setLevel(logging.DEBUG)
|
48 |
+
|
49 |
+
log_data = {translations['modelname']: args.model_name, translations['model_path']: exp_dir, translations['training_version']: version, translations['index_algorithm_info']: index_algorithm}
|
50 |
+
for key, value in log_data.items():
|
51 |
+
logger.debug(f"{key}: {value}")
|
52 |
+
|
53 |
+
try:
|
54 |
+
npys = []
|
55 |
+
|
56 |
+
feature_dir = os.path.join(exp_dir, f"{version}_extracted")
|
57 |
+
model_name = os.path.basename(exp_dir)
|
58 |
+
|
59 |
+
for name in sorted(os.listdir(feature_dir)):
|
60 |
+
npys.append(np.load(os.path.join(feature_dir, name)))
|
61 |
+
|
62 |
+
big_npy = np.concatenate(npys, axis=0)
|
63 |
+
big_npy_idx = np.arange(big_npy.shape[0])
|
64 |
+
|
65 |
+
np.random.shuffle(big_npy_idx)
|
66 |
+
big_npy = big_npy[big_npy_idx]
|
67 |
+
|
68 |
+
if big_npy.shape[0] > 2e5 and (index_algorithm == "Auto" or index_algorithm == "KMeans"): big_npy = (MiniBatchKMeans(n_clusters=10000, verbose=True, batch_size=256 * cpu_count(), compute_labels=False, init="random").fit(big_npy).cluster_centers_)
|
69 |
+
np.save(os.path.join(exp_dir, "total_fea.npy"), big_npy)
|
70 |
+
|
71 |
+
n_ivf = min(int(16 * np.sqrt(big_npy.shape[0])), big_npy.shape[0] // 39)
|
72 |
+
index_trained = faiss.index_factory(256 if version == "v1" else 768, f"IVF{n_ivf},Flat")
|
73 |
+
|
74 |
+
index_ivf_trained = faiss.extract_index_ivf(index_trained)
|
75 |
+
index_ivf_trained.nprobe = 1
|
76 |
+
|
77 |
+
index_trained.train(big_npy)
|
78 |
+
faiss.write_index(index_trained, os.path.join(exp_dir, f"trained_IVF{n_ivf}_Flat_nprobe_{index_ivf_trained.nprobe}_{model_name}_{version}.index"))
|
79 |
+
|
80 |
+
index_added = faiss.index_factory(256 if version == "v1" else 768, f"IVF{n_ivf},Flat")
|
81 |
+
index_ivf_added = faiss.extract_index_ivf(index_added)
|
82 |
+
index_ivf_added.nprobe = 1
|
83 |
+
|
84 |
+
index_added.train(big_npy)
|
85 |
+
batch_size_add = 8192
|
86 |
+
|
87 |
+
for i in range(0, big_npy.shape[0], batch_size_add):
|
88 |
+
index_added.add(big_npy[i : i + batch_size_add])
|
89 |
+
|
90 |
+
index_filepath_added = os.path.join(exp_dir, f"added_IVF{n_ivf}_Flat_nprobe_{index_ivf_added.nprobe}_{model_name}_{version}.index")
|
91 |
+
faiss.write_index(index_added, index_filepath_added)
|
92 |
+
|
93 |
+
logger.info(f"{translations['save_index']} '{index_filepath_added}'")
|
94 |
+
except Exception as e:
|
95 |
+
logger.error(f"{translations['create_index_error']}: {e}")
|
96 |
+
|
97 |
+
import traceback
|
98 |
+
logger.debug(traceback.format_exc())
|
99 |
+
|
100 |
+
if __name__ == "__main__": main()
|
main/inference/extract.py
ADDED
@@ -0,0 +1,450 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import re
|
3 |
+
import sys
|
4 |
+
import time
|
5 |
+
import tqdm
|
6 |
+
import torch
|
7 |
+
import shutil
|
8 |
+
import librosa
|
9 |
+
import logging
|
10 |
+
import argparse
|
11 |
+
import warnings
|
12 |
+
import parselmouth
|
13 |
+
import logging.handlers
|
14 |
+
|
15 |
+
import numpy as np
|
16 |
+
import soundfile as sf
|
17 |
+
import torch.nn.functional as F
|
18 |
+
|
19 |
+
from random import shuffle
|
20 |
+
from multiprocessing import Pool
|
21 |
+
from distutils.util import strtobool
|
22 |
+
from fairseq import checkpoint_utils
|
23 |
+
from functools import partial
|
24 |
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
25 |
+
|
26 |
+
sys.path.append(os.getcwd())
|
27 |
+
|
28 |
+
from main.configs.config import Config
|
29 |
+
from main.library.predictors.FCPE import FCPE
|
30 |
+
from main.library.predictors.RMVPE import RMVPE
|
31 |
+
from main.library.predictors.WORLD import PYWORLD
|
32 |
+
from main.library.predictors.CREPE import predict, mean, median
|
33 |
+
from main.library.utils import check_predictors, check_embedders, load_audio
|
34 |
+
|
35 |
+
logger = logging.getLogger(__name__)
|
36 |
+
translations = Config().translations
|
37 |
+
logger.propagate = False
|
38 |
+
|
39 |
+
warnings.filterwarnings("ignore")
|
40 |
+
for l in ["torch", "faiss", "httpx", "fairseq", "httpcore", "faiss.loader", "numba.core", "urllib3"]:
|
41 |
+
logging.getLogger(l).setLevel(logging.ERROR)
|
42 |
+
|
43 |
+
def parse_arguments():
|
44 |
+
parser = argparse.ArgumentParser()
|
45 |
+
parser.add_argument("--model_name", type=str, required=True)
|
46 |
+
parser.add_argument("--rvc_version", type=str, default="v2")
|
47 |
+
parser.add_argument("--f0_method", type=str, default="rmvpe")
|
48 |
+
parser.add_argument("--pitch_guidance", type=lambda x: bool(strtobool(x)), default=True)
|
49 |
+
parser.add_argument("--hop_length", type=int, default=128)
|
50 |
+
parser.add_argument("--cpu_cores", type=int, default=2)
|
51 |
+
parser.add_argument("--gpu", type=str, default="-")
|
52 |
+
parser.add_argument("--sample_rate", type=int, required=True)
|
53 |
+
parser.add_argument("--embedder_model", type=str, default="contentvec_base")
|
54 |
+
|
55 |
+
return parser.parse_args()
|
56 |
+
|
57 |
+
def generate_config(rvc_version, sample_rate, model_path):
|
58 |
+
config_save_path = os.path.join(model_path, "config.json")
|
59 |
+
if not os.path.exists(config_save_path): shutil.copy(os.path.join("main", "configs", rvc_version, f"{sample_rate}.json"), config_save_path)
|
60 |
+
|
61 |
+
def generate_filelist(pitch_guidance, model_path, rvc_version, sample_rate):
|
62 |
+
gt_wavs_dir, feature_dir = os.path.join(model_path, "sliced_audios"), os.path.join(model_path, f"{rvc_version}_extracted")
|
63 |
+
f0_dir, f0nsf_dir = None, None
|
64 |
+
|
65 |
+
if pitch_guidance: f0_dir, f0nsf_dir = os.path.join(model_path, "f0"), os.path.join(model_path, "f0_voiced")
|
66 |
+
|
67 |
+
gt_wavs_files, feature_files = set(name.split(".")[0] for name in os.listdir(gt_wavs_dir)), set(name.split(".")[0] for name in os.listdir(feature_dir))
|
68 |
+
names = gt_wavs_files & feature_files & set(name.split(".")[0] for name in os.listdir(f0_dir)) & set(name.split(".")[0] for name in os.listdir(f0nsf_dir)) if pitch_guidance else gt_wavs_files & feature_files
|
69 |
+
|
70 |
+
options = []
|
71 |
+
mute_base_path = os.path.join("assets", "logs", "mute")
|
72 |
+
|
73 |
+
for name in names:
|
74 |
+
options.append(f"{gt_wavs_dir}/{name}.wav|{feature_dir}/{name}.npy|{f0_dir}/{name}.wav.npy|{f0nsf_dir}/{name}.wav.npy|0" if pitch_guidance else f"{gt_wavs_dir}/{name}.wav|{feature_dir}/{name}.npy|0")
|
75 |
+
|
76 |
+
mute_audio_path, mute_feature_path = os.path.join(mute_base_path, "sliced_audios", f"mute{sample_rate}.wav"), os.path.join(mute_base_path, f"{rvc_version}_extracted", "mute.npy")
|
77 |
+
|
78 |
+
for _ in range(2):
|
79 |
+
options.append(f"{mute_audio_path}|{mute_feature_path}|{os.path.join(mute_base_path, 'f0', 'mute.wav.npy')}|{os.path.join(mute_base_path, 'f0_voiced', 'mute.wav.npy')}|0" if pitch_guidance else f"{mute_audio_path}|{mute_feature_path}|0")
|
80 |
+
|
81 |
+
shuffle(options)
|
82 |
+
|
83 |
+
with open(os.path.join(model_path, "filelist.txt"), "w") as f:
|
84 |
+
f.write("\n".join(options))
|
85 |
+
|
86 |
+
def setup_paths(exp_dir, version = None):
|
87 |
+
wav_path = os.path.join(exp_dir, "sliced_audios_16k")
|
88 |
+
|
89 |
+
if version:
|
90 |
+
out_path = os.path.join(exp_dir, f"{version}_extracted")
|
91 |
+
os.makedirs(out_path, exist_ok=True)
|
92 |
+
|
93 |
+
return wav_path, out_path
|
94 |
+
else:
|
95 |
+
output_root1, output_root2 = os.path.join(exp_dir, "f0"), os.path.join(exp_dir, "f0_voiced")
|
96 |
+
os.makedirs(output_root1, exist_ok=True); os.makedirs(output_root2, exist_ok=True)
|
97 |
+
|
98 |
+
return wav_path, output_root1, output_root2
|
99 |
+
|
100 |
+
def read_wave(wav_path, normalize = False):
|
101 |
+
wav, sr = sf.read(wav_path)
|
102 |
+
assert sr == 16000, translations["sr_not_16000"]
|
103 |
+
|
104 |
+
feats = torch.from_numpy(wav).float()
|
105 |
+
|
106 |
+
if feats.dim() == 2: feats = feats.mean(-1)
|
107 |
+
feats = feats.view(1, -1)
|
108 |
+
|
109 |
+
if normalize: feats = F.layer_norm(feats, feats.shape)
|
110 |
+
return feats
|
111 |
+
|
112 |
+
def get_device(gpu_index):
|
113 |
+
if gpu_index == "cpu": return "cpu"
|
114 |
+
|
115 |
+
try:
|
116 |
+
index = int(gpu_index)
|
117 |
+
|
118 |
+
if index < torch.cuda.device_count(): return f"cuda:{index}"
|
119 |
+
else: logger.warning(translations["gpu_not_valid"])
|
120 |
+
except ValueError:
|
121 |
+
logger.warning(translations["gpu_not_valid"])
|
122 |
+
return "cpu"
|
123 |
+
|
124 |
+
class FeatureInput:
|
125 |
+
def __init__(self, sample_rate=16000, hop_size=160, device="cpu"):
|
126 |
+
self.fs = sample_rate
|
127 |
+
self.hop = hop_size
|
128 |
+
self.f0_bin = 256
|
129 |
+
self.f0_max = 1100.0
|
130 |
+
self.f0_min = 50.0
|
131 |
+
self.f0_mel_min = 1127 * np.log(1 + self.f0_min / 700)
|
132 |
+
self.f0_mel_max = 1127 * np.log(1 + self.f0_max / 700)
|
133 |
+
self.device = device
|
134 |
+
|
135 |
+
def get_providers(self):
|
136 |
+
import onnxruntime
|
137 |
+
|
138 |
+
ort_providers = onnxruntime.get_available_providers()
|
139 |
+
|
140 |
+
if "CUDAExecutionProvider" in ort_providers: providers = ["CUDAExecutionProvider"]
|
141 |
+
elif "CoreMLExecutionProvider" in ort_providers: providers = ["CoreMLExecutionProvider"]
|
142 |
+
else: providers = ["CPUExecutionProvider"]
|
143 |
+
|
144 |
+
return providers
|
145 |
+
|
146 |
+
def compute_f0_hybrid(self, methods_str, np_arr, hop_length):
|
147 |
+
methods_str = re.search("hybrid\[(.+)\]", methods_str)
|
148 |
+
if methods_str: methods = [method.strip() for method in methods_str.group(1).split("+")]
|
149 |
+
|
150 |
+
f0_computation_stack, resampled_stack = [], []
|
151 |
+
logger.debug(translations["hybrid_methods"].format(methods=methods))
|
152 |
+
|
153 |
+
for method in methods:
|
154 |
+
f0 = None
|
155 |
+
|
156 |
+
if method == "pm": f0 = self.get_pm(np_arr)
|
157 |
+
elif method == "dio": f0 = self.get_pyworld(np_arr, "dio")
|
158 |
+
elif method == "mangio-crepe-full": f0 = self.get_mangio_crepe(np_arr, int(hop_length), "full")
|
159 |
+
elif method == "mangio-crepe-full-onnx": f0 = self.get_mangio_crepe(np_arr, int(hop_length), "full", onnx=True)
|
160 |
+
elif method == "mangio-crepe-large": f0 = self.get_mangio_crepe(np_arr, int(hop_length), "large")
|
161 |
+
elif method == "mangio-crepe-large-onnx": f0 = self.get_mangio_crepe(np_arr, int(hop_length), "large", onnx=True)
|
162 |
+
elif method == "mangio-crepe-medium": f0 = self.get_mangio_crepe(np_arr, int(hop_length), "medium")
|
163 |
+
elif method == "mangio-crepe-medium-onnx": f0 = self.get_mangio_crepe(np_arr, int(hop_length), "medium", onnx=True)
|
164 |
+
elif method == "mangio-crepe-small": f0 = self.get_mangio_crepe(np_arr, int(hop_length), "small")
|
165 |
+
elif method == "mangio-crepe-small-onnx": f0 = self.get_mangio_crepe(np_arr, int(hop_length), "small", onnx=True)
|
166 |
+
elif method == "mangio-crepe-tiny": f0 = self.get_mangio_crepe(np_arr, int(hop_length), "tiny")
|
167 |
+
elif method == "mangio-crepe-tiny-onnx": f0 = self.get_mangio_crepe(np_arr, int(hop_length), "tiny", onnx=True)
|
168 |
+
elif method == "crepe-full": f0 = self.get_crepe(np_arr, "full")
|
169 |
+
elif method == "crepe-full-onnx": f0 = self.get_crepe(np_arr, "full", onnx=True)
|
170 |
+
elif method == "crepe-large": f0 = self.get_crepe(np_arr, "large")
|
171 |
+
elif method == "crepe-large-onnx": f0 = self.get_crepe(np_arr, "large", onnx=True)
|
172 |
+
elif method == "crepe-medium": f0 = self.get_crepe(np_arr, "medium")
|
173 |
+
elif method == "crepe-medium-onnx": f0 = self.get_crepe(np_arr, "medium", onnx=True)
|
174 |
+
elif method == "crepe-small": f0 = self.get_crepe(np_arr, "small")
|
175 |
+
elif method == "crepe-small-onnx": f0 = self.get_crepe(np_arr, "small", onnx=True)
|
176 |
+
elif method == "crepe-tiny": f0 = self.get_crepe(np_arr, "tiny")
|
177 |
+
elif method == "crepe-tiny-onnx": f0 = self.get_crepe(np_arr, "tiny", onnx=True)
|
178 |
+
elif method == "fcpe": f0 = self.get_fcpe(np_arr, int(hop_length))
|
179 |
+
elif method == "fcpe-onnx": f0 = self.get_fcpe(np_arr, int(hop_length), onnx=True)
|
180 |
+
elif method == "fcpe-legacy": f0 = self.get_fcpe(np_arr, int(hop_length), legacy=True)
|
181 |
+
elif method == "fcpe-legacy-onnx": f0 = self.get_fcpe(np_arr, int(hop_length), onnx=True, legacy=True)
|
182 |
+
elif method == "rmvpe": f0 = self.get_rmvpe(np_arr)
|
183 |
+
elif method == "rmvpe-onnx": f0 = self.get_rmvpe(np_arr, onnx=True)
|
184 |
+
elif method == "rmvpe-legacy": f0 = self.get_rmvpe(np_arr, legacy=True)
|
185 |
+
elif method == "rmvpe-legacy-onnx": f0 = self.get_rmvpe(np_arr, legacy=True, onnx=True)
|
186 |
+
elif method == "harvest": f0 = self.get_pyworld(np_arr, "harvest")
|
187 |
+
elif method == "yin": f0 = self.get_yin(np_arr, int(hop_length))
|
188 |
+
elif method == "pyin": return self.get_pyin(np_arr, int(hop_length))
|
189 |
+
else: raise ValueError(translations["method_not_valid"])
|
190 |
+
|
191 |
+
f0_computation_stack.append(f0)
|
192 |
+
|
193 |
+
for f0 in f0_computation_stack:
|
194 |
+
resampled_stack.append(np.interp(np.linspace(0, len(f0), (np_arr.size // self.hop)), np.arange(len(f0)), f0))
|
195 |
+
|
196 |
+
return resampled_stack[0] if len(resampled_stack) == 1 else np.nanmedian(np.vstack(resampled_stack), axis=0)
|
197 |
+
|
198 |
+
def compute_f0(self, np_arr, f0_method, hop_length):
|
199 |
+
if f0_method == "pm": return self.get_pm(np_arr)
|
200 |
+
elif f0_method == "dio": return self.get_pyworld(np_arr, "dio")
|
201 |
+
elif f0_method == "mangio-crepe-full": return self.get_mangio_crepe(np_arr, int(hop_length), "full")
|
202 |
+
elif f0_method == "mangio-crepe-full-onnx": return self.get_mangio_crepe(np_arr, int(hop_length), "full", onnx=True)
|
203 |
+
elif f0_method == "mangio-crepe-large": return self.get_mangio_crepe(np_arr, int(hop_length), "large")
|
204 |
+
elif f0_method == "mangio-crepe-large-onnx": return self.get_mangio_crepe(np_arr, int(hop_length), "large", onnx=True)
|
205 |
+
elif f0_method == "mangio-crepe-medium": return self.get_mangio_crepe(np_arr, int(hop_length), "medium")
|
206 |
+
elif f0_method == "mangio-crepe-medium-onnx": return self.get_mangio_crepe(np_arr, int(hop_length), "medium", onnx=True)
|
207 |
+
elif f0_method == "mangio-crepe-small": return self.get_mangio_crepe(np_arr, int(hop_length), "small")
|
208 |
+
elif f0_method == "mangio-crepe-small-onnx": return self.get_mangio_crepe(np_arr, int(hop_length), "small", onnx=True)
|
209 |
+
elif f0_method == "mangio-crepe-tiny": return self.get_mangio_crepe(np_arr, int(hop_length), "tiny")
|
210 |
+
elif f0_method == "mangio-crepe-tiny-onnx": return self.get_mangio_crepe(np_arr, int(hop_length), "tiny", onnx=True)
|
211 |
+
elif f0_method == "crepe-full": return self.get_crepe(np_arr, "full")
|
212 |
+
elif f0_method == "crepe-full-onnx": return self.get_crepe(np_arr, "full", onnx=True)
|
213 |
+
elif f0_method == "crepe-large": return self.get_crepe(np_arr, "large")
|
214 |
+
elif f0_method == "crepe-large-onnx": return self.get_crepe(np_arr, "large", onnx=True)
|
215 |
+
elif f0_method == "crepe-medium": return self.get_crepe(np_arr, "medium")
|
216 |
+
elif f0_method == "crepe-medium-onnx": return self.get_crepe(np_arr, "medium", onnx=True)
|
217 |
+
elif f0_method == "crepe-small": return self.get_crepe(np_arr, "small")
|
218 |
+
elif f0_method == "crepe-small-onnx": return self.get_crepe(np_arr, "small", onnx=True)
|
219 |
+
elif f0_method == "crepe-tiny": return self.get_crepe(np_arr, "tiny")
|
220 |
+
elif f0_method == "crepe-tiny-onnx": return self.get_crepe(np_arr, "tiny", onnx=True)
|
221 |
+
elif f0_method == "fcpe": return self.get_fcpe(np_arr, int(hop_length))
|
222 |
+
elif f0_method == "fcpe-onnx": return self.get_fcpe(np_arr, int(hop_length), onnx=True)
|
223 |
+
elif f0_method == "fcpe-legacy": return self.get_fcpe(np_arr, int(hop_length), legacy=True)
|
224 |
+
elif f0_method == "fcpe-legacy-onnx": return self.get_fcpe(np_arr, int(hop_length), onnx=True, legacy=True)
|
225 |
+
elif f0_method == "rmvpe": return self.get_rmvpe(np_arr)
|
226 |
+
elif f0_method == "rmvpe-onnx": return self.get_rmvpe(np_arr, onnx=True)
|
227 |
+
elif f0_method == "rmvpe-legacy": return self.get_rmvpe(np_arr, legacy=True)
|
228 |
+
elif f0_method == "rmvpe-legacy-onnx": return self.get_rmvpe(np_arr, legacy=True, onnx=True)
|
229 |
+
elif f0_method == "harvest": return self.get_pyworld(np_arr, "harvest")
|
230 |
+
elif f0_method == "yin": return self.get_yin(np_arr, int(hop_length))
|
231 |
+
elif f0_method == "pyin": return self.get_pyin(np_arr, int(hop_length))
|
232 |
+
elif "hybrid" in f0_method: return self.compute_f0_hybrid(f0_method, np_arr, int(hop_length))
|
233 |
+
else: raise ValueError(translations["method_not_valid"])
|
234 |
+
|
235 |
+
def get_pm(self, x):
|
236 |
+
f0 = (parselmouth.Sound(x, self.fs).to_pitch_ac(time_step=(160 / 16000 * 1000) / 1000, voicing_threshold=0.6, pitch_floor=50, pitch_ceiling=1100).selected_array["frequency"])
|
237 |
+
pad_size = ((x.size // self.hop) - len(f0) + 1) // 2
|
238 |
+
|
239 |
+
if pad_size > 0 or (x.size // self.hop) - len(f0) - pad_size > 0: f0 = np.pad(f0, [[pad_size, (x.size // self.hop) - len(f0) - pad_size]], mode="constant")
|
240 |
+
return f0
|
241 |
+
|
242 |
+
def get_mangio_crepe(self, x, hop_length, model="full", onnx=False):
|
243 |
+
providers = self.get_providers() if onnx else None
|
244 |
+
|
245 |
+
audio = torch.from_numpy(x.astype(np.float32)).to(self.device)
|
246 |
+
audio /= torch.quantile(torch.abs(audio), 0.999)
|
247 |
+
audio = audio.unsqueeze(0)
|
248 |
+
|
249 |
+
source = predict(audio, self.fs, hop_length, self.f0_min, self.f0_max, model=model, batch_size=hop_length * 2, device=self.device, pad=True, providers=providers, onnx=onnx).squeeze(0).cpu().float().numpy()
|
250 |
+
source[source < 0.001] = np.nan
|
251 |
+
|
252 |
+
return np.nan_to_num(np.interp(np.arange(0, len(source) * (x.size // self.hop), len(source)) / (x.size // self.hop), np.arange(0, len(source)), source))
|
253 |
+
|
254 |
+
def get_crepe(self, x, model="full", onnx=False):
|
255 |
+
providers = self.get_providers() if onnx else None
|
256 |
+
|
257 |
+
f0, pd = predict(torch.tensor(np.copy(x))[None].float(), self.fs, 160, self.f0_min, self.f0_max, model, batch_size=512, device=self.device, return_periodicity=True, providers=providers, onnx=onnx)
|
258 |
+
f0, pd = mean(f0, 3), median(pd, 3)
|
259 |
+
f0[pd < 0.1] = 0
|
260 |
+
|
261 |
+
return f0[0].cpu().numpy()
|
262 |
+
|
263 |
+
def get_fcpe(self, x, hop_length, legacy=False, onnx=False):
|
264 |
+
providers = self.get_providers() if onnx else None
|
265 |
+
|
266 |
+
model_fcpe = FCPE(os.path.join("assets", "models", "predictors", "fcpe" + (".onnx" if onnx else ".pt")), hop_length=int(hop_length), f0_min=int(self.f0_min), f0_max=int(self.f0_max), dtype=torch.float32, device=self.device, sample_rate=self.fs, threshold=0.03, providers=providers, onnx=onnx) if legacy else FCPE(os.path.join("assets", "models", "predictors", "fcpe" + (".onnx" if onnx else ".pt")), hop_length=160, f0_min=0, f0_max=8000, dtype=torch.float32, device=self.device, sample_rate=self.fs, threshold=0.006, providers=providers, onnx=onnx)
|
267 |
+
f0 = model_fcpe.compute_f0(x, p_len=(x.size // self.hop))
|
268 |
+
|
269 |
+
del model_fcpe
|
270 |
+
return f0
|
271 |
+
|
272 |
+
def get_rmvpe(self, x, legacy=False, onnx=False):
|
273 |
+
providers = self.get_providers() if onnx else None
|
274 |
+
|
275 |
+
rmvpe_model = RMVPE(os.path.join("assets", "models", "predictors", "rmvpe" + (".onnx" if onnx else ".pt")), device=self.device, onnx=onnx, providers=providers)
|
276 |
+
f0 = rmvpe_model.infer_from_audio_with_pitch(x, thred=0.03, f0_min=self.f0_min, f0_max=self.f0_max) if legacy else rmvpe_model.infer_from_audio(x, thred=0.03)
|
277 |
+
|
278 |
+
del rmvpe_model
|
279 |
+
return f0
|
280 |
+
|
281 |
+
def get_pyworld(self, x, model="harvest"):
|
282 |
+
pw = PYWORLD()
|
283 |
+
|
284 |
+
if model == "harvest": f0, t = pw.harvest(x.astype(np.double), fs=self.fs, f0_ceil=self.f0_max, f0_floor=self.f0_min, frame_period=1000 * self.hop / self.fs)
|
285 |
+
elif model == "dio": f0, t = pw.dio(x.astype(np.double), fs=self.fs, f0_ceil=self.f0_max, f0_floor=self.f0_min, frame_period=1000 * self.hop / self.fs)
|
286 |
+
else: raise ValueError(translations["method_not_valid"])
|
287 |
+
|
288 |
+
return pw.stonemask(x.astype(np.double), self.fs, t, f0)
|
289 |
+
|
290 |
+
def get_yin(self, x, hop_length):
|
291 |
+
source = np.array(librosa.yin(x.astype(np.double), sr=self.fs, fmin=self.f0_min, fmax=self.f0_max, hop_length=hop_length))
|
292 |
+
source[source < 0.001] = np.nan
|
293 |
+
|
294 |
+
return np.nan_to_num(np.interp(np.arange(0, len(source) * (x.size // self.hop), len(source)) / (x.size // self.hop), np.arange(0, len(source)), source))
|
295 |
+
|
296 |
+
def get_pyin(self, x, hop_length):
|
297 |
+
f0, _, _ = librosa.pyin(x.astype(np.double), fmin=self.f0_min, fmax=self.f0_max, sr=self.fs, hop_length=hop_length)
|
298 |
+
|
299 |
+
source = np.array(f0)
|
300 |
+
source[source < 0.001] = np.nan
|
301 |
+
|
302 |
+
return np.nan_to_num(np.interp(np.arange(0, len(source) * (x.size // self.hop), len(source)) / (x.size // self.hop), np.arange(0, len(source)), source))
|
303 |
+
|
304 |
+
def coarse_f0(self, f0):
|
305 |
+
return np.rint(np.clip(((1127 * np.log(1 + f0 / 700)) - self.f0_mel_min) * (self.f0_bin - 2) / (self.f0_mel_max - self.f0_mel_min) + 1, 1, self.f0_bin - 1)).astype(int)
|
306 |
+
|
307 |
+
def process_file(self, file_info, f0_method, hop_length):
|
308 |
+
inp_path, opt_path1, opt_path2, np_arr = file_info
|
309 |
+
if os.path.exists(opt_path1 + ".npy") and os.path.exists(opt_path2 + ".npy"): return
|
310 |
+
|
311 |
+
try:
|
312 |
+
feature_pit = self.compute_f0(np_arr, f0_method, hop_length)
|
313 |
+
np.save(opt_path2, feature_pit, allow_pickle=False)
|
314 |
+
np.save(opt_path1, self.coarse_f0(feature_pit), allow_pickle=False)
|
315 |
+
except Exception as e:
|
316 |
+
raise RuntimeError(f"{translations['extract_file_error']} {inp_path}: {e}")
|
317 |
+
|
318 |
+
def process_files(self, files, f0_method, hop_length, pbar):
|
319 |
+
for file_info in files:
|
320 |
+
self.process_file(file_info, f0_method, hop_length)
|
321 |
+
pbar.update()
|
322 |
+
|
323 |
+
def run_pitch_extraction(exp_dir, f0_method, hop_length, num_processes, gpus):
|
324 |
+
input_root, *output_roots = setup_paths(exp_dir)
|
325 |
+
output_root1, output_root2 = output_roots if len(output_roots) == 2 else (output_roots[0], None)
|
326 |
+
paths = [(os.path.join(input_root, name), os.path.join(output_root1, name) if output_root1 else None, os.path.join(output_root2, name) if output_root2 else None, load_audio(os.path.join(input_root, name))) for name in sorted(os.listdir(input_root)) if "spec" not in name]
|
327 |
+
logger.info(translations["extract_f0_method"].format(num_processes=num_processes, f0_method=f0_method))
|
328 |
+
|
329 |
+
start_time = time.time()
|
330 |
+
|
331 |
+
if gpus != "-":
|
332 |
+
gpus = gpus.split("-")
|
333 |
+
process_partials = []
|
334 |
+
|
335 |
+
pbar = tqdm.tqdm(total=len(paths), desc=translations["extract_f0"], ncols=100, unit="p")
|
336 |
+
|
337 |
+
for idx, gpu in enumerate(gpus):
|
338 |
+
feature_input = FeatureInput(device=get_device(gpu))
|
339 |
+
process_partials.append((feature_input, paths[idx::len(gpus)]))
|
340 |
+
|
341 |
+
with ThreadPoolExecutor() as executor:
|
342 |
+
for future in as_completed([executor.submit(FeatureInput.process_files, feature_input, part_paths, f0_method, hop_length, pbar) for feature_input, part_paths in process_partials]):
|
343 |
+
pbar.update(1)
|
344 |
+
logger.debug(pbar.format_meter(pbar.n, pbar.total, pbar.format_dict["elapsed"]))
|
345 |
+
future.result()
|
346 |
+
|
347 |
+
pbar.close()
|
348 |
+
else:
|
349 |
+
with tqdm.tqdm(total=len(paths), desc=translations["extract_f0"], ncols=100, unit="p") as pbar:
|
350 |
+
with Pool(processes=num_processes) as pool:
|
351 |
+
for _ in pool.imap_unordered(partial(FeatureInput(device="cpu").process_file, f0_method=f0_method, hop_length=hop_length), paths):
|
352 |
+
pbar.update(1)
|
353 |
+
logger.debug(pbar.format_meter(pbar.n, pbar.total, pbar.format_dict["elapsed"]))
|
354 |
+
|
355 |
+
elapsed_time = time.time() - start_time
|
356 |
+
logger.info(translations["extract_f0_success"].format(elapsed_time=f"{elapsed_time:.2f}"))
|
357 |
+
|
358 |
+
def process_file_embedding(file, wav_path, out_path, model, device, version, saved_cfg):
|
359 |
+
out_file_path = os.path.join(out_path, file.replace("wav", "npy"))
|
360 |
+
if os.path.exists(out_file_path): return
|
361 |
+
|
362 |
+
feats = read_wave(os.path.join(wav_path, file), normalize=saved_cfg.task.normalize).to(device).float()
|
363 |
+
inputs = {"source": feats, "padding_mask": torch.BoolTensor(feats.shape).fill_(False).to(device), "output_layer": 9 if version == "v1" else 12}
|
364 |
+
|
365 |
+
with torch.no_grad():
|
366 |
+
model = model.to(device).float().eval()
|
367 |
+
logits = model.extract_features(**inputs)
|
368 |
+
feats = model.final_proj(logits[0]) if version == "v1" else logits[0]
|
369 |
+
|
370 |
+
feats = feats.squeeze(0).float().cpu().numpy()
|
371 |
+
|
372 |
+
if not np.isnan(feats).any(): np.save(out_file_path, feats, allow_pickle=False)
|
373 |
+
else: logger.warning(f"{file} {translations['NaN']}")
|
374 |
+
|
375 |
+
def run_embedding_extraction(exp_dir, version, gpus, embedder_model):
|
376 |
+
wav_path, out_path = setup_paths(exp_dir, version)
|
377 |
+
logger.info(translations["start_extract_hubert"])
|
378 |
+
|
379 |
+
start_time = time.time()
|
380 |
+
|
381 |
+
try:
|
382 |
+
models, saved_cfg, _ = checkpoint_utils.load_model_ensemble_and_task([os.path.join("assets", "models", "embedders", embedder_model + '.pt')], suffix="")
|
383 |
+
except Exception as e:
|
384 |
+
raise ImportError(translations["read_model_error"].format(e=e))
|
385 |
+
|
386 |
+
devices = [get_device(gpu) for gpu in (gpus.split("-") if gpus != "-" else ["cpu"])]
|
387 |
+
paths = sorted([file for file in os.listdir(wav_path) if file.endswith(".wav")])
|
388 |
+
|
389 |
+
if not paths:
|
390 |
+
logger.warning(translations["not_found_audio_file"])
|
391 |
+
sys.exit(1)
|
392 |
+
|
393 |
+
pbar = tqdm.tqdm(total=len(paths) * len(devices), desc=translations["extract_hubert"], ncols=100, unit="p")
|
394 |
+
|
395 |
+
for task in [(file, wav_path, out_path, models[0], device, version, saved_cfg) for file in paths for device in devices]:
|
396 |
+
try:
|
397 |
+
process_file_embedding(*task)
|
398 |
+
except Exception as e:
|
399 |
+
raise RuntimeError(f"{translations['process_error']} {task[0]}: {e}")
|
400 |
+
|
401 |
+
pbar.update(1)
|
402 |
+
logger.debug(pbar.format_meter(pbar.n, pbar.total, pbar.format_dict["elapsed"]))
|
403 |
+
|
404 |
+
pbar.close()
|
405 |
+
elapsed_time = time.time() - start_time
|
406 |
+
logger.info(translations["extract_hubert_success"].format(elapsed_time=f"{elapsed_time:.2f}"))
|
407 |
+
|
408 |
+
if __name__ == "__main__":
|
409 |
+
args = parse_arguments()
|
410 |
+
exp_dir = os.path.join("assets", "logs", args.model_name)
|
411 |
+
f0_method, hop_length, num_processes, gpus, version, pitch_guidance, sample_rate, embedder_model = args.f0_method, args.hop_length, args.cpu_cores, args.gpu, args.rvc_version, args.pitch_guidance, args.sample_rate, args.embedder_model
|
412 |
+
|
413 |
+
check_predictors(f0_method)
|
414 |
+
check_embedders(embedder_model)
|
415 |
+
|
416 |
+
if logger.hasHandlers(): logger.handlers.clear()
|
417 |
+
else:
|
418 |
+
console_handler = logging.StreamHandler()
|
419 |
+
console_formatter = logging.Formatter(fmt="\n%(asctime)s.%(msecs)03d | %(levelname)s | %(module)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
|
420 |
+
console_handler.setFormatter(console_formatter)
|
421 |
+
console_handler.setLevel(logging.INFO)
|
422 |
+
file_handler = logging.handlers.RotatingFileHandler(os.path.join(exp_dir, "extract.log"), maxBytes=5*1024*1024, backupCount=3, encoding='utf-8')
|
423 |
+
file_formatter = logging.Formatter(fmt="\n%(asctime)s.%(msecs)03d | %(levelname)s | %(module)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
|
424 |
+
file_handler.setFormatter(file_formatter)
|
425 |
+
file_handler.setLevel(logging.DEBUG)
|
426 |
+
logger.addHandler(console_handler)
|
427 |
+
logger.addHandler(file_handler)
|
428 |
+
logger.setLevel(logging.DEBUG)
|
429 |
+
|
430 |
+
log_data = {translations['modelname']: args.model_name, translations['export_process']: exp_dir, translations['f0_method']: f0_method, translations['pretrain_sr']: sample_rate, translations['cpu_core']: num_processes, "Gpu": gpus, "Hop length": hop_length, translations['training_version']: version, translations['extract_f0']: pitch_guidance, translations['hubert_model']: embedder_model}
|
431 |
+
for key, value in log_data.items():
|
432 |
+
logger.debug(f"{key}: {value}")
|
433 |
+
|
434 |
+
pid_path = os.path.join(exp_dir, "extract_pid.txt")
|
435 |
+
with open(pid_path, "w") as pid_file:
|
436 |
+
pid_file.write(str(os.getpid()))
|
437 |
+
|
438 |
+
try:
|
439 |
+
run_pitch_extraction(exp_dir, f0_method, hop_length, num_processes, gpus)
|
440 |
+
run_embedding_extraction(exp_dir, version, gpus, embedder_model)
|
441 |
+
generate_config(version, sample_rate, exp_dir)
|
442 |
+
generate_filelist(pitch_guidance, exp_dir, version, sample_rate)
|
443 |
+
except Exception as e:
|
444 |
+
logger.error(f"{translations['extract_error']}: {e}")
|
445 |
+
|
446 |
+
import traceback
|
447 |
+
logger.debug(traceback.format_exc())
|
448 |
+
|
449 |
+
if os.path.exists(pid_path): os.remove(pid_path)
|
450 |
+
logger.info(f"{translations['extract_success']} {args.model_name}.")
|
main/inference/preprocess.py
ADDED
@@ -0,0 +1,290 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
import time
|
4 |
+
import logging
|
5 |
+
import librosa
|
6 |
+
import argparse
|
7 |
+
import logging.handlers
|
8 |
+
|
9 |
+
import numpy as np
|
10 |
+
import soundfile as sf
|
11 |
+
import multiprocessing as mp
|
12 |
+
|
13 |
+
from tqdm import tqdm
|
14 |
+
from scipy import signal
|
15 |
+
from scipy.io import wavfile
|
16 |
+
from distutils.util import strtobool
|
17 |
+
from concurrent.futures import ProcessPoolExecutor, as_completed
|
18 |
+
|
19 |
+
sys.path.append(os.getcwd())
|
20 |
+
|
21 |
+
from main.configs.config import Config
|
22 |
+
|
23 |
+
logger = logging.getLogger(__name__)
|
24 |
+
for l in ["numba.core.byteflow", "numba.core.ssa", "numba.core.interpreter"]:
|
25 |
+
logging.getLogger(l).setLevel(logging.ERROR)
|
26 |
+
|
27 |
+
OVERLAP, MAX_AMPLITUDE, ALPHA, HIGH_PASS_CUTOFF, SAMPLE_RATE_16K = 0.3, 0.9, 0.75, 48, 16000
|
28 |
+
translations = Config().translations
|
29 |
+
|
30 |
+
def parse_arguments():
|
31 |
+
parser = argparse.ArgumentParser()
|
32 |
+
parser.add_argument("--model_name", type=str, required=True)
|
33 |
+
parser.add_argument("--dataset_path", type=str, default="./dataset")
|
34 |
+
parser.add_argument("--sample_rate", type=int, required=True)
|
35 |
+
parser.add_argument("--cpu_cores", type=int, default=2)
|
36 |
+
parser.add_argument("--cut_preprocess", type=lambda x: bool(strtobool(x)), default=True)
|
37 |
+
parser.add_argument("--process_effects", type=lambda x: bool(strtobool(x)), default=False)
|
38 |
+
parser.add_argument("--clean_dataset", type=lambda x: bool(strtobool(x)), default=False)
|
39 |
+
parser.add_argument("--clean_strength", type=float, default=0.7)
|
40 |
+
|
41 |
+
return parser.parse_args()
|
42 |
+
|
43 |
+
def load_audio(file, sample_rate):
|
44 |
+
try:
|
45 |
+
audio, sr = sf.read(file.strip(" ").strip('"').strip("\n").strip('"').strip(" "))
|
46 |
+
|
47 |
+
if len(audio.shape) > 1: audio = librosa.to_mono(audio.T)
|
48 |
+
if sr != sample_rate: audio = librosa.resample(audio, orig_sr=sr, target_sr=sample_rate, res_type="soxr_vhq")
|
49 |
+
except Exception as e:
|
50 |
+
raise RuntimeError(f"{translations['errors_loading_audio']}: {e}")
|
51 |
+
|
52 |
+
return audio.flatten()
|
53 |
+
|
54 |
+
class Slicer:
|
55 |
+
def __init__(self, sr, threshold = -40.0, min_length = 5000, min_interval = 300, hop_size = 20, max_sil_kept = 5000):
|
56 |
+
if not min_length >= min_interval >= hop_size: raise ValueError(translations["min_length>=min_interval>=hop_size"])
|
57 |
+
if not max_sil_kept >= hop_size: raise ValueError(translations["max_sil_kept>=hop_size"])
|
58 |
+
|
59 |
+
min_interval = sr * min_interval / 1000
|
60 |
+
self.threshold = 10 ** (threshold / 20.0)
|
61 |
+
self.hop_size = round(sr * hop_size / 1000)
|
62 |
+
self.win_size = min(round(min_interval), 4 * self.hop_size)
|
63 |
+
self.min_length = round(sr * min_length / 1000 / self.hop_size)
|
64 |
+
self.min_interval = round(min_interval / self.hop_size)
|
65 |
+
self.max_sil_kept = round(sr * max_sil_kept / 1000 / self.hop_size)
|
66 |
+
|
67 |
+
def _apply_slice(self, waveform, begin, end):
|
68 |
+
start_idx = begin * self.hop_size
|
69 |
+
|
70 |
+
if len(waveform.shape) > 1: return waveform[:, start_idx:min(waveform.shape[1], end * self.hop_size)]
|
71 |
+
else: return waveform[start_idx:min(waveform.shape[0], end * self.hop_size)]
|
72 |
+
|
73 |
+
def slice(self, waveform):
|
74 |
+
samples = waveform.mean(axis=0) if len(waveform.shape) > 1 else waveform
|
75 |
+
if samples.shape[0] <= self.min_length: return [waveform]
|
76 |
+
|
77 |
+
rms_list = get_rms(y=samples, frame_length=self.win_size, hop_length=self.hop_size).squeeze(0)
|
78 |
+
sil_tags = []
|
79 |
+
silence_start, clip_start = None, 0
|
80 |
+
|
81 |
+
for i, rms in enumerate(rms_list):
|
82 |
+
if rms < self.threshold:
|
83 |
+
if silence_start is None: silence_start = i
|
84 |
+
continue
|
85 |
+
|
86 |
+
if silence_start is None: continue
|
87 |
+
|
88 |
+
is_leading_silence = silence_start == 0 and i > self.max_sil_kept
|
89 |
+
need_slice_middle = (i - silence_start >= self.min_interval and i - clip_start >= self.min_length)
|
90 |
+
|
91 |
+
if not is_leading_silence and not need_slice_middle:
|
92 |
+
silence_start = None
|
93 |
+
continue
|
94 |
+
|
95 |
+
if i - silence_start <= self.max_sil_kept:
|
96 |
+
pos = rms_list[silence_start : i + 1].argmin() + silence_start
|
97 |
+
|
98 |
+
sil_tags.append((0, pos) if silence_start == 0 else (pos, pos))
|
99 |
+
clip_start = pos
|
100 |
+
elif i - silence_start <= self.max_sil_kept * 2:
|
101 |
+
pos = rms_list[i - self.max_sil_kept : silence_start + self.max_sil_kept + 1].argmin()
|
102 |
+
|
103 |
+
pos += i - self.max_sil_kept
|
104 |
+
pos_r = (rms_list[i - self.max_sil_kept : i + 1].argmin() + i - self.max_sil_kept)
|
105 |
+
|
106 |
+
if silence_start == 0:
|
107 |
+
sil_tags.append((0, pos_r))
|
108 |
+
clip_start = pos_r
|
109 |
+
else:
|
110 |
+
sil_tags.append((min((rms_list[silence_start : silence_start + self.max_sil_kept + 1].argmin() + silence_start), pos), max(pos_r, pos)))
|
111 |
+
clip_start = max(pos_r, pos)
|
112 |
+
else:
|
113 |
+
pos_r = (rms_list[i - self.max_sil_kept : i + 1].argmin() + i - self.max_sil_kept)
|
114 |
+
|
115 |
+
sil_tags.append((0, pos_r) if silence_start == 0 else ((rms_list[silence_start : silence_start + self.max_sil_kept + 1].argmin() + silence_start), pos_r))
|
116 |
+
clip_start = pos_r
|
117 |
+
|
118 |
+
silence_start = None
|
119 |
+
total_frames = rms_list.shape[0]
|
120 |
+
if (silence_start is not None and total_frames - silence_start >= self.min_interval): sil_tags.append((rms_list[silence_start : min(total_frames, silence_start + self.max_sil_kept) + 1].argmin() + silence_start, total_frames + 1))
|
121 |
+
|
122 |
+
if not sil_tags: return [waveform]
|
123 |
+
else:
|
124 |
+
chunks = []
|
125 |
+
if sil_tags[0][0] > 0: chunks.append(self._apply_slice(waveform, 0, sil_tags[0][0]))
|
126 |
+
|
127 |
+
for i in range(len(sil_tags) - 1):
|
128 |
+
chunks.append(self._apply_slice(waveform, sil_tags[i][1], sil_tags[i + 1][0]))
|
129 |
+
|
130 |
+
if sil_tags[-1][1] < total_frames: chunks.append(self._apply_slice(waveform, sil_tags[-1][1], total_frames))
|
131 |
+
return chunks
|
132 |
+
|
133 |
+
def get_rms(y, frame_length=2048, hop_length=512, pad_mode="constant"):
|
134 |
+
y = np.pad(y, (int(frame_length // 2), int(frame_length // 2)), mode=pad_mode)
|
135 |
+
axis = -1
|
136 |
+
|
137 |
+
x_shape_trimmed = list(y.shape)
|
138 |
+
x_shape_trimmed[axis] -= frame_length - 1
|
139 |
+
|
140 |
+
xw = np.lib.stride_tricks.as_strided(y, shape=tuple(x_shape_trimmed) + tuple([frame_length]), strides=y.strides + tuple([y.strides[axis]]))
|
141 |
+
xw = np.moveaxis(xw, -1, axis - 1 if axis < 0 else axis + 1)
|
142 |
+
|
143 |
+
slices = [slice(None)] * xw.ndim
|
144 |
+
slices[axis] = slice(0, None, hop_length)
|
145 |
+
|
146 |
+
return np.sqrt(np.mean(np.abs(xw[tuple(slices)]) ** 2, axis=-2, keepdims=True))
|
147 |
+
|
148 |
+
class PreProcess:
|
149 |
+
def __init__(self, sr, exp_dir, per):
|
150 |
+
self.slicer = Slicer(sr=sr, threshold=-42, min_length=1500, min_interval=400, hop_size=15, max_sil_kept=500)
|
151 |
+
self.sr = sr
|
152 |
+
self.b_high, self.a_high = signal.butter(N=5, Wn=HIGH_PASS_CUTOFF, btype="high", fs=self.sr)
|
153 |
+
self.per = per
|
154 |
+
self.exp_dir = exp_dir
|
155 |
+
self.device = "cpu"
|
156 |
+
self.gt_wavs_dir = os.path.join(exp_dir, "sliced_audios")
|
157 |
+
self.wavs16k_dir = os.path.join(exp_dir, "sliced_audios_16k")
|
158 |
+
os.makedirs(self.gt_wavs_dir, exist_ok=True)
|
159 |
+
os.makedirs(self.wavs16k_dir, exist_ok=True)
|
160 |
+
|
161 |
+
def _normalize_audio(self, audio):
|
162 |
+
tmp_max = np.abs(audio).max()
|
163 |
+
if tmp_max > 2.5: return None
|
164 |
+
return (audio / tmp_max * (MAX_AMPLITUDE * ALPHA)) + (1 - ALPHA) * audio
|
165 |
+
|
166 |
+
def process_audio_segment(self, normalized_audio, sid, idx0, idx1):
|
167 |
+
if normalized_audio is None:
|
168 |
+
logger.debug(f"{sid}-{idx0}-{idx1}-filtered")
|
169 |
+
return
|
170 |
+
|
171 |
+
wavfile.write(os.path.join(self.gt_wavs_dir, f"{sid}_{idx0}_{idx1}.wav"), self.sr, normalized_audio.astype(np.float32))
|
172 |
+
wavfile.write(os.path.join(self.wavs16k_dir, f"{sid}_{idx0}_{idx1}.wav"), SAMPLE_RATE_16K, librosa.resample(normalized_audio, orig_sr=self.sr, target_sr=SAMPLE_RATE_16K, res_type="soxr_vhq").astype(np.float32))
|
173 |
+
|
174 |
+
def process_audio(self, path, idx0, sid, cut_preprocess, process_effects, clean_dataset, clean_strength):
|
175 |
+
try:
|
176 |
+
audio = load_audio(path, self.sr)
|
177 |
+
|
178 |
+
if process_effects:
|
179 |
+
audio = signal.lfilter(self.b_high, self.a_high, audio)
|
180 |
+
audio = self._normalize_audio(audio)
|
181 |
+
|
182 |
+
if clean_dataset:
|
183 |
+
from main.tools.noisereduce import reduce_noise
|
184 |
+
audio = reduce_noise(y=audio, sr=self.sr, prop_decrease=clean_strength)
|
185 |
+
|
186 |
+
idx1 = 0
|
187 |
+
if cut_preprocess:
|
188 |
+
for audio_segment in self.slicer.slice(audio):
|
189 |
+
i = 0
|
190 |
+
|
191 |
+
while 1:
|
192 |
+
start = int(self.sr * (self.per - OVERLAP) * i)
|
193 |
+
i += 1
|
194 |
+
|
195 |
+
if len(audio_segment[start:]) > (self.per + OVERLAP) * self.sr:
|
196 |
+
self.process_audio_segment(audio_segment[start : start + int(self.per * self.sr)], sid, idx0, idx1)
|
197 |
+
idx1 += 1
|
198 |
+
else:
|
199 |
+
self.process_audio_segment(audio_segment[start:], sid, idx0, idx1)
|
200 |
+
idx1 += 1
|
201 |
+
break
|
202 |
+
else: self.process_audio_segment(audio, sid, idx0, idx1)
|
203 |
+
except Exception as e:
|
204 |
+
raise RuntimeError(f"{translations['process_audio_error']}: {e}")
|
205 |
+
|
206 |
+
def process_file(args):
|
207 |
+
pp, file, cut_preprocess, process_effects, clean_dataset, clean_strength = (args)
|
208 |
+
file_path, idx0, sid = file
|
209 |
+
pp.process_audio(file_path, idx0, sid, cut_preprocess, process_effects, clean_dataset, clean_strength)
|
210 |
+
|
211 |
+
def preprocess_training_set(input_root, sr, num_processes, exp_dir, per, cut_preprocess, process_effects, clean_dataset, clean_strength):
|
212 |
+
start_time = time.time()
|
213 |
+
|
214 |
+
pp = PreProcess(sr, exp_dir, per)
|
215 |
+
logger.info(translations["start_preprocess"].format(num_processes=num_processes))
|
216 |
+
files = []
|
217 |
+
idx = 0
|
218 |
+
|
219 |
+
for root, _, filenames in os.walk(input_root):
|
220 |
+
try:
|
221 |
+
sid = 0 if root == input_root else int(os.path.basename(root))
|
222 |
+
|
223 |
+
for f in filenames:
|
224 |
+
if f.lower().endswith(("wav", "mp3", "flac", "ogg", "opus", "m4a", "mp4", "aac", "alac", "wma", "aiff", "webm", "ac3")):
|
225 |
+
files.append((os.path.join(root, f), idx, sid))
|
226 |
+
idx += 1
|
227 |
+
except ValueError:
|
228 |
+
raise ValueError(f"{translations['not_integer']} '{os.path.basename(root)}'.")
|
229 |
+
|
230 |
+
with tqdm(total=len(files), desc=translations["preprocess"], ncols=100, unit="f") as pbar:
|
231 |
+
with ProcessPoolExecutor(max_workers=num_processes) as executor:
|
232 |
+
futures = [executor.submit(process_file, (pp, file, cut_preprocess, process_effects, clean_dataset, clean_strength)) for file in files]
|
233 |
+
for future in as_completed(futures):
|
234 |
+
try:
|
235 |
+
future.result()
|
236 |
+
except Exception as e:
|
237 |
+
raise RuntimeError(f"{translations['process_error']}: {e}")
|
238 |
+
pbar.update(1)
|
239 |
+
logger.debug(pbar.format_meter(pbar.n, pbar.total, pbar.format_dict["elapsed"]))
|
240 |
+
|
241 |
+
elapsed_time = time.time() - start_time
|
242 |
+
logger.info(translations["preprocess_success"].format(elapsed_time=f"{elapsed_time:.2f}"))
|
243 |
+
|
244 |
+
if __name__ == "__main__":
|
245 |
+
args = parse_arguments()
|
246 |
+
experiment_directory = os.path.join("assets", "logs", args.model_name)
|
247 |
+
num_processes = args.cpu_cores
|
248 |
+
num_processes = mp.cpu_count() if num_processes is None else int(num_processes)
|
249 |
+
dataset = args.dataset_path
|
250 |
+
sample_rate = args.sample_rate
|
251 |
+
cut_preprocess = args.cut_preprocess
|
252 |
+
preprocess_effects = args.process_effects
|
253 |
+
clean_dataset = args.clean_dataset
|
254 |
+
clean_strength = args.clean_strength
|
255 |
+
|
256 |
+
os.makedirs(experiment_directory, exist_ok=True)
|
257 |
+
|
258 |
+
if logger.hasHandlers(): logger.handlers.clear()
|
259 |
+
else:
|
260 |
+
console_handler = logging.StreamHandler()
|
261 |
+
console_formatter = logging.Formatter(fmt="\n%(asctime)s.%(msecs)03d | %(levelname)s | %(module)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
|
262 |
+
console_handler.setFormatter(console_formatter)
|
263 |
+
console_handler.setLevel(logging.INFO)
|
264 |
+
file_handler = logging.handlers.RotatingFileHandler(os.path.join(experiment_directory, "preprocess.log"), maxBytes=5*1024*1024, backupCount=3, encoding='utf-8')
|
265 |
+
file_formatter = logging.Formatter(fmt="\n%(asctime)s.%(msecs)03d | %(levelname)s | %(module)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
|
266 |
+
file_handler.setFormatter(file_formatter)
|
267 |
+
file_handler.setLevel(logging.DEBUG)
|
268 |
+
logger.addHandler(console_handler)
|
269 |
+
logger.addHandler(file_handler)
|
270 |
+
logger.setLevel(logging.DEBUG)
|
271 |
+
|
272 |
+
log_data = {translations['modelname']: args.model_name, translations['export_process']: experiment_directory, translations['dataset_folder']: dataset, translations['pretrain_sr']: sample_rate, translations['cpu_core']: num_processes, translations['split_audio']: cut_preprocess, translations['preprocess_effect']: preprocess_effects, translations['clear_audio']: clean_dataset}
|
273 |
+
if clean_dataset: log_data[translations['clean_strength']] = clean_strength
|
274 |
+
|
275 |
+
for key, value in log_data.items():
|
276 |
+
logger.debug(f"{key}: {value}")
|
277 |
+
|
278 |
+
pid_path = os.path.join(experiment_directory, "preprocess_pid.txt")
|
279 |
+
with open(pid_path, "w") as pid_file:
|
280 |
+
pid_file.write(str(os.getpid()))
|
281 |
+
|
282 |
+
try:
|
283 |
+
preprocess_training_set(dataset, sample_rate, num_processes, experiment_directory, 3.7, cut_preprocess, preprocess_effects, clean_dataset, clean_strength)
|
284 |
+
except Exception as e:
|
285 |
+
logger.error(f"{translations['process_audio_error']} {e}")
|
286 |
+
import traceback
|
287 |
+
logger.debug(traceback.format_exc())
|
288 |
+
|
289 |
+
if os.path.exists(pid_path): os.remove(pid_path)
|
290 |
+
logger.info(f"{translations['preprocess_model_success']} {args.model_name}")
|
main/inference/separator_music.py
ADDED
@@ -0,0 +1,290 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
import time
|
4 |
+
import logging
|
5 |
+
import argparse
|
6 |
+
import logging.handlers
|
7 |
+
|
8 |
+
from pydub import AudioSegment
|
9 |
+
from distutils.util import strtobool
|
10 |
+
|
11 |
+
sys.path.append(os.getcwd())
|
12 |
+
|
13 |
+
from main.configs.config import Config
|
14 |
+
from main.library.utils import pydub_convert
|
15 |
+
from main.library.algorithm.separator import Separator
|
16 |
+
|
17 |
+
translations = Config().translations
|
18 |
+
logger = logging.getLogger(__name__)
|
19 |
+
|
20 |
+
if logger.hasHandlers(): logger.handlers.clear()
|
21 |
+
else:
|
22 |
+
console_handler = logging.StreamHandler()
|
23 |
+
console_formatter = logging.Formatter(fmt="\n%(asctime)s.%(msecs)03d | %(levelname)s | %(module)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
|
24 |
+
console_handler.setFormatter(console_formatter)
|
25 |
+
console_handler.setLevel(logging.INFO)
|
26 |
+
file_handler = logging.handlers.RotatingFileHandler(os.path.join("assets", "logs", "separator.log"), maxBytes=5*1024*1024, backupCount=3, encoding='utf-8')
|
27 |
+
file_formatter = logging.Formatter(fmt="\n%(asctime)s.%(msecs)03d | %(levelname)s | %(module)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
|
28 |
+
file_handler.setFormatter(file_formatter)
|
29 |
+
file_handler.setLevel(logging.DEBUG)
|
30 |
+
logger.addHandler(console_handler)
|
31 |
+
logger.addHandler(file_handler)
|
32 |
+
logger.setLevel(logging.DEBUG)
|
33 |
+
|
34 |
+
demucs_models = {"HT-Tuned": "htdemucs_ft.yaml", "HT-Normal": "htdemucs.yaml", "HD_MMI": "hdemucs_mmi.yaml", "HT_6S": "htdemucs_6s.yaml"}
|
35 |
+
mdx_models = {"Main_340": "UVR-MDX-NET_Main_340.onnx", "Main_390": "UVR-MDX-NET_Main_390.onnx", "Main_406": "UVR-MDX-NET_Main_406.onnx", "Main_427": "UVR-MDX-NET_Main_427.onnx", "Main_438": "UVR-MDX-NET_Main_438.onnx", "Inst_full_292": "UVR-MDX-NET-Inst_full_292.onnx", "Inst_HQ_1": "UVR-MDX-NET_Inst_HQ_1.onnx", "Inst_HQ_2": "UVR-MDX-NET_Inst_HQ_2.onnx", "Inst_HQ_3": "UVR-MDX-NET_Inst_HQ_3.onnx", "Inst_HQ_4": "UVR-MDX-NET-Inst_HQ_4.onnx", "Inst_HQ_5": "UVR-MDX-NET-Inst_HQ_5.onnx", "Kim_Vocal_1": "Kim_Vocal_1.onnx", "Kim_Vocal_2": "Kim_Vocal_2.onnx", "Kim_Inst": "Kim_Inst.onnx", "Inst_187_beta": "UVR-MDX-NET_Inst_187_beta.onnx", "Inst_82_beta": "UVR-MDX-NET_Inst_82_beta.onnx", "Inst_90_beta": "UVR-MDX-NET_Inst_90_beta.onnx", "Voc_FT": "UVR-MDX-NET-Voc_FT.onnx", "Crowd_HQ": "UVR-MDX-NET_Crowd_HQ_1.onnx", "MDXNET_9482": "UVR_MDXNET_9482.onnx", "Inst_1": "UVR-MDX-NET-Inst_1.onnx", "Inst_2": "UVR-MDX-NET-Inst_2.onnx", "Inst_3": "UVR-MDX-NET-Inst_3.onnx", "MDXNET_1_9703": "UVR_MDXNET_1_9703.onnx", "MDXNET_2_9682": "UVR_MDXNET_2_9682.onnx", "MDXNET_3_9662": "UVR_MDXNET_3_9662.onnx", "Inst_Main": "UVR-MDX-NET-Inst_Main.onnx", "MDXNET_Main": "UVR_MDXNET_Main.onnx"}
|
36 |
+
kara_models = {"Version-1": "UVR_MDXNET_KARA.onnx", "Version-2": "UVR_MDXNET_KARA_2.onnx"}
|
37 |
+
|
38 |
+
def parse_arguments():
|
39 |
+
parser = argparse.ArgumentParser()
|
40 |
+
parser.add_argument("--input_path", type=str, required=True)
|
41 |
+
parser.add_argument("--output_path", type=str, default="./audios")
|
42 |
+
parser.add_argument("--format", type=str, default="wav")
|
43 |
+
parser.add_argument("--shifts", type=int, default=2)
|
44 |
+
parser.add_argument("--segments_size", type=int, default=256)
|
45 |
+
parser.add_argument("--overlap", type=float, default=0.25)
|
46 |
+
parser.add_argument("--mdx_hop_length", type=int, default=1024)
|
47 |
+
parser.add_argument("--mdx_batch_size", type=int, default=1)
|
48 |
+
parser.add_argument("--clean_audio", type=lambda x: bool(strtobool(x)), default=False)
|
49 |
+
parser.add_argument("--clean_strength", type=float, default=0.7)
|
50 |
+
parser.add_argument("--model_name", type=str, default="HT-Normal")
|
51 |
+
parser.add_argument("--kara_model", type=str, default="Version-1")
|
52 |
+
parser.add_argument("--backing", type=lambda x: bool(strtobool(x)), default=False)
|
53 |
+
parser.add_argument("--mdx_denoise", type=lambda x: bool(strtobool(x)), default=False)
|
54 |
+
parser.add_argument("--reverb", type=lambda x: bool(strtobool(x)), default=False)
|
55 |
+
parser.add_argument("--backing_reverb", type=lambda x: bool(strtobool(x)), default=False)
|
56 |
+
parser.add_argument("--sample_rate", type=int, default=44100)
|
57 |
+
|
58 |
+
return parser.parse_args()
|
59 |
+
|
60 |
+
def main():
|
61 |
+
start_time = time.time()
|
62 |
+
pid_path = os.path.join("assets", "separate_pid.txt")
|
63 |
+
|
64 |
+
with open(pid_path, "w") as pid_file:
|
65 |
+
pid_file.write(str(os.getpid()))
|
66 |
+
|
67 |
+
try:
|
68 |
+
args = parse_arguments()
|
69 |
+
input_path, output_path, export_format, shifts, segments_size, overlap, hop_length, batch_size, clean_audio, clean_strength, model_name, kara_model, backing, mdx_denoise, reverb, backing_reverb, sample_rate = args.input_path, args.output_path, args.format, args.shifts, args.segments_size, args.overlap, args.mdx_hop_length, args.mdx_batch_size, args.clean_audio, args.clean_strength, args.model_name, args.kara_model, args.backing, args.mdx_denoise, args.reverb, args.backing_reverb, args.sample_rate
|
70 |
+
|
71 |
+
if backing_reverb and not reverb:
|
72 |
+
logger.warning(translations["turn_on_dereverb"])
|
73 |
+
sys.exit(1)
|
74 |
+
|
75 |
+
if backing_reverb and not backing:
|
76 |
+
logger.warning(translations["turn_on_separator_backing"])
|
77 |
+
sys.exit(1)
|
78 |
+
|
79 |
+
log_data = {translations['audio_path']: input_path, translations['output_path']: output_path, translations['export_format']: export_format, translations['shift']: shifts, translations['segments_size']: segments_size, translations['overlap']: overlap, translations['modelname']: model_name, translations['denoise_mdx']: mdx_denoise, "Hop length": hop_length, translations['batch_size']: batch_size, translations['sr']: sample_rate}
|
80 |
+
|
81 |
+
if clean_audio:
|
82 |
+
log_data[translations['clear_audio']] = clean_audio
|
83 |
+
log_data[translations['clean_strength']] = clean_strength
|
84 |
+
|
85 |
+
if backing:
|
86 |
+
log_data[translations['backing_model_ver']] = kara_model
|
87 |
+
log_data[translations['separator_backing']] = backing
|
88 |
+
|
89 |
+
if reverb:
|
90 |
+
log_data[translations['dereveb_audio']] = reverb
|
91 |
+
log_data[translations['dereveb_backing']] = backing_reverb
|
92 |
+
|
93 |
+
for key, value in log_data.items():
|
94 |
+
logger.debug(f"{key}: {value}")
|
95 |
+
|
96 |
+
if model_name in ["HT-Tuned", "HT-Normal", "HD_MMI", "HT_6S"]: vocals, instruments = separator_music_demucs(input_path, output_path, export_format, shifts, overlap, segments_size, model_name, sample_rate)
|
97 |
+
else: vocals, instruments = separator_music_mdx(input_path, output_path, export_format, segments_size, overlap, mdx_denoise, model_name, hop_length, batch_size, sample_rate)
|
98 |
+
|
99 |
+
if backing: main_vocals, backing_vocals = separator_backing(vocals, output_path, export_format, segments_size, overlap, mdx_denoise, kara_model, hop_length, batch_size, sample_rate)
|
100 |
+
if reverb: vocals_no_reverb, main_vocals_no_reverb, backing_vocals_no_reverb = separator_reverb(output_path, export_format, segments_size, overlap, mdx_denoise, reverb, backing_reverb, hop_length, batch_size, sample_rate)
|
101 |
+
|
102 |
+
original_output = os.path.join(output_path, f"Original_Vocals_No_Reverb.{export_format}") if reverb else os.path.join(output_path, f"Original_Vocals.{export_format}")
|
103 |
+
main_output = os.path.join(output_path, f"Main_Vocals_No_Reverb.{export_format}") if reverb and backing_reverb else os.path.join(output_path, f"Main_Vocals.{export_format}")
|
104 |
+
backing_output = os.path.join(output_path, f"Backing_Vocals_No_Reverb.{export_format}") if reverb and backing_reverb else os.path.join(output_path, f"Backing_Vocals.{export_format}")
|
105 |
+
|
106 |
+
if clean_audio:
|
107 |
+
import soundfile as sf
|
108 |
+
logger.info(f"{translations['clear_audio']}...")
|
109 |
+
|
110 |
+
vocal_data, vocal_sr = sf.read(vocals_no_reverb if reverb else vocals)
|
111 |
+
main_data, main_sr = sf.read(main_vocals_no_reverb if reverb and backing else main_vocals)
|
112 |
+
backing_data, backing_sr = sf.read(backing_vocals_no_reverb if reverb and backing_reverb else backing_vocals)
|
113 |
+
|
114 |
+
from main.tools.noisereduce import reduce_noise
|
115 |
+
sf.write(original_output, reduce_noise(y=vocal_data, prop_decrease=clean_strength), vocal_sr, format=export_format)
|
116 |
+
|
117 |
+
if backing:
|
118 |
+
sf.write(main_output, reduce_noise(y=main_data, sr=main_sr, prop_decrease=clean_strength), main_sr, format=export_format)
|
119 |
+
sf.write(backing_output, reduce_noise(y=backing_data, sr=backing_sr, prop_decrease=clean_strength), backing_sr, format=export_format)
|
120 |
+
|
121 |
+
logger.info(translations["clean_audio_success"])
|
122 |
+
return original_output, instruments, main_output, backing_output
|
123 |
+
except Exception as e:
|
124 |
+
logger.error(f"{translations['separator_error']}: {e}")
|
125 |
+
import traceback
|
126 |
+
logger.debug(traceback.format_exc())
|
127 |
+
|
128 |
+
if os.path.exists(pid_path): os.remove(pid_path)
|
129 |
+
|
130 |
+
elapsed_time = time.time() - start_time
|
131 |
+
logger.info(translations["separator_success"].format(elapsed_time=f"{elapsed_time:.2f}"))
|
132 |
+
|
133 |
+
def separator_music_demucs(input, output, format, shifts, overlap, segments_size, demucs_model, sample_rate):
|
134 |
+
if not os.path.exists(input):
|
135 |
+
logger.warning(translations["input_not_valid"])
|
136 |
+
sys.exit(1)
|
137 |
+
|
138 |
+
if not os.path.exists(output):
|
139 |
+
logger.warning(translations["output_not_valid"])
|
140 |
+
sys.exit(1)
|
141 |
+
|
142 |
+
for i in [f"Original_Vocals.{format}", f"Instruments.{format}"]:
|
143 |
+
if os.path.exists(os.path.join(output, i)): os.remove(os.path.join(output, i))
|
144 |
+
|
145 |
+
logger.info(f"{translations['separator_process_2']}...")
|
146 |
+
demucs_output = separator_main(audio_file=input, model_filename=demucs_models.get(demucs_model), output_format=format, output_dir=output, demucs_segment_size=(segments_size / 2), demucs_shifts=shifts, demucs_overlap=overlap, sample_rate=sample_rate)
|
147 |
+
|
148 |
+
for f in demucs_output:
|
149 |
+
path = os.path.join(output, f)
|
150 |
+
if not os.path.exists(path): logger.error(translations["not_found"].format(name=path))
|
151 |
+
|
152 |
+
if '_(Drums)_' in f: drums = path
|
153 |
+
elif '_(Bass)_' in f: bass = path
|
154 |
+
elif '_(Other)_' in f: other = path
|
155 |
+
elif '_(Vocals)_' in f: os.rename(path, os.path.join(output, f"Original_Vocals.{format}"))
|
156 |
+
|
157 |
+
pydub_convert(AudioSegment.from_file(drums)).overlay(pydub_convert(AudioSegment.from_file(bass))).overlay(pydub_convert(AudioSegment.from_file(other))).export(os.path.join(output, f"Instruments.{format}"), format=format)
|
158 |
+
|
159 |
+
for f in [drums, bass, other]:
|
160 |
+
if os.path.exists(f): os.remove(f)
|
161 |
+
|
162 |
+
logger.info(translations["separator_success_2"])
|
163 |
+
return os.path.join(output, f"Original_Vocals.{format}"), os.path.join(output, f"Instruments.{format}")
|
164 |
+
|
165 |
+
def separator_backing(input, output, format, segments_size, overlap, denoise, kara_model, hop_length, batch_size, sample_rate):
|
166 |
+
if not os.path.exists(input):
|
167 |
+
logger.warning(translations["input_not_valid"])
|
168 |
+
sys.exit(1)
|
169 |
+
|
170 |
+
if not os.path.exists(output):
|
171 |
+
logger.warning(translations["output_not_valid"])
|
172 |
+
sys.exit(1)
|
173 |
+
|
174 |
+
for f in [f"Main_Vocals.{format}", f"Backing_Vocals.{format}"]:
|
175 |
+
if os.path.exists(os.path.join(output, f)): os.remove(os.path.join(output, f))
|
176 |
+
|
177 |
+
model_2 = kara_models.get(kara_model)
|
178 |
+
logger.info(f"{translations['separator_process_backing']}...")
|
179 |
+
|
180 |
+
backing_outputs = separator_main(audio_file=input, model_filename=model_2, output_format=format, output_dir=output, mdx_segment_size=segments_size, mdx_overlap=overlap, mdx_batch_size=batch_size, mdx_hop_length=hop_length, mdx_enable_denoise=denoise, sample_rate=sample_rate)
|
181 |
+
main_output = os.path.join(output, f"Main_Vocals.{format}")
|
182 |
+
backing_output = os.path.join(output, f"Backing_Vocals.{format}")
|
183 |
+
|
184 |
+
for f in backing_outputs:
|
185 |
+
path = os.path.join(output, f)
|
186 |
+
if not os.path.exists(path): logger.error(translations["not_found"].format(name=path))
|
187 |
+
if '_(Instrumental)_' in f: os.rename(path, backing_output)
|
188 |
+
elif '_(Vocals)_' in f: os.rename(path, main_output)
|
189 |
+
|
190 |
+
logger.info(translations["separator_process_backing_success"])
|
191 |
+
return main_output, backing_output
|
192 |
+
|
193 |
+
def separator_music_mdx(input, output, format, segments_size, overlap, denoise, mdx_model, hop_length, batch_size, sample_rate):
|
194 |
+
if not os.path.exists(input):
|
195 |
+
logger.warning(translations["input_not_valid"])
|
196 |
+
sys.exit(1)
|
197 |
+
|
198 |
+
if not os.path.exists(output):
|
199 |
+
logger.warning(translations["output_not_valid"])
|
200 |
+
sys.exit(1)
|
201 |
+
|
202 |
+
for i in [f"Original_Vocals.{format}", f"Instruments.{format}"]:
|
203 |
+
if os.path.exists(os.path.join(output, i)): os.remove(os.path.join(output, i))
|
204 |
+
|
205 |
+
model_3 = mdx_models.get(mdx_model)
|
206 |
+
logger.info(f"{translations['separator_process_2']}...")
|
207 |
+
|
208 |
+
output_music = separator_main(audio_file=input, model_filename=model_3, output_format=format, output_dir=output, mdx_segment_size=segments_size, mdx_overlap=overlap, mdx_batch_size=batch_size, mdx_hop_length=hop_length, mdx_enable_denoise=denoise, sample_rate=sample_rate)
|
209 |
+
original_output, instruments_output = os.path.join(output, f"Original_Vocals.{format}"), os.path.join(output, f"Instruments.{format}")
|
210 |
+
|
211 |
+
for f in output_music:
|
212 |
+
path = os.path.join(output, f)
|
213 |
+
if not os.path.exists(path): logger.error(translations["not_found"].format(name=path))
|
214 |
+
if '_(Instrumental)_' in f: os.rename(path, instruments_output)
|
215 |
+
elif '_(Vocals)_' in f: os.rename(path, original_output)
|
216 |
+
|
217 |
+
logger.info(translations["separator_process_backing_success"])
|
218 |
+
return original_output, instruments_output
|
219 |
+
|
220 |
+
def separator_reverb(output, format, segments_size, overlap, denoise, original, backing_reverb, hop_length, batch_size, sample_rate):
|
221 |
+
if not os.path.exists(output):
|
222 |
+
logger.warning(translations["output_not_valid"])
|
223 |
+
sys.exit(1)
|
224 |
+
|
225 |
+
for i in [f"Original_Vocals_Reverb.{format}", f"Main_Vocals_Reverb.{format}", f"Original_Vocals_No_Reverb.{format}", f"Main_Vocals_No_Reverb.{format}"]:
|
226 |
+
if os.path.exists(os.path.join(output, i)): os.remove(os.path.join(output, i))
|
227 |
+
|
228 |
+
dereveb_path = []
|
229 |
+
|
230 |
+
if original:
|
231 |
+
try:
|
232 |
+
dereveb_path.append(os.path.join(output, [f for f in os.listdir(output) if 'Original_Vocals' in f][0]))
|
233 |
+
except IndexError:
|
234 |
+
logger.warning(translations["not_found_original_vocal"])
|
235 |
+
sys.exit(1)
|
236 |
+
|
237 |
+
if backing_reverb:
|
238 |
+
try:
|
239 |
+
dereveb_path.append(os.path.join(output, [f for f in os.listdir(output) if 'Main_Vocals' in f][0]))
|
240 |
+
except IndexError:
|
241 |
+
logger.warning(translations["not_found_main_vocal"])
|
242 |
+
sys.exit(1)
|
243 |
+
|
244 |
+
if backing_reverb:
|
245 |
+
try:
|
246 |
+
dereveb_path.append(os.path.join(output, [f for f in os.listdir(output) if 'Backing_Vocals' in f][0]))
|
247 |
+
except IndexError:
|
248 |
+
logger.warning(translations["not_found_backing_vocal"])
|
249 |
+
sys.exit(1)
|
250 |
+
|
251 |
+
for path in dereveb_path:
|
252 |
+
if not os.path.exists(path):
|
253 |
+
logger.warning(translations["not_found"].format(name=path))
|
254 |
+
sys.exit(1)
|
255 |
+
|
256 |
+
if "Original_Vocals" in path:
|
257 |
+
reverb_path, no_reverb_path = os.path.join(output, f"Original_Vocals_Reverb.{format}"), os.path.join(output, f"Original_Vocals_No_Reverb.{format}")
|
258 |
+
start_title, end_title = translations["process_original"], translations["process_original_success"]
|
259 |
+
elif "Main_Vocals" in path:
|
260 |
+
reverb_path, no_reverb_path = os.path.join(output, f"Main_Vocals_Reverb.{format}"), os.path.join(output, f"Main_Vocals_No_Reverb.{format}")
|
261 |
+
start_title, end_title = translations["process_main"], translations["process_main_success"]
|
262 |
+
elif "Backing_Vocals" in path:
|
263 |
+
reverb_path, no_reverb_path = os.path.join(output, f"Backing_Vocals_Reverb.{format}"), os.path.join(output, f"Backing_Vocals_No_Reverb.{format}")
|
264 |
+
start_title, end_title = translations["process_backing"], translations["process_backing_success"]
|
265 |
+
|
266 |
+
logger.info(start_title)
|
267 |
+
output_dereveb = separator_main(audio_file=path, model_filename="Reverb_HQ_By_FoxJoy.onnx", output_format=format, output_dir=output, mdx_segment_size=segments_size, mdx_overlap=overlap, mdx_batch_size=batch_size, mdx_hop_length=hop_length, mdx_enable_denoise=denoise, sample_rate=sample_rate)
|
268 |
+
|
269 |
+
for f in output_dereveb:
|
270 |
+
path = os.path.join(output, f)
|
271 |
+
if not os.path.exists(path): logger.error(translations["not_found"].format(name=path))
|
272 |
+
|
273 |
+
if '_(Reverb)_' in f: os.rename(path, reverb_path)
|
274 |
+
elif '_(No Reverb)_' in f: os.rename(path, no_reverb_path)
|
275 |
+
|
276 |
+
logger.info(end_title)
|
277 |
+
return (os.path.join(output, f"Original_Vocals_No_Reverb.{format}") if original else None), (os.path.join(output, f"Main_Vocals_No_Reverb.{format}") if backing_reverb else None), (os.path.join(output, f"Backing_Vocals_No_Reverb.{format}") if backing_reverb else None)
|
278 |
+
|
279 |
+
def separator_main(audio_file=None, model_filename="UVR-MDX-NET_Main_340.onnx", output_format="wav", output_dir=".", mdx_segment_size=256, mdx_overlap=0.25, mdx_batch_size=1, mdx_hop_length=1024, mdx_enable_denoise=True, demucs_segment_size=256, demucs_shifts=2, demucs_overlap=0.25, sample_rate=44100):
|
280 |
+
try:
|
281 |
+
separator = Separator(logger=logger, log_formatter=file_formatter, log_level=logging.INFO, output_dir=output_dir, output_format=output_format, output_bitrate=None, normalization_threshold=0.9, output_single_stem=None, invert_using_spec=False, sample_rate=sample_rate, mdx_params={"hop_length": mdx_hop_length, "segment_size": mdx_segment_size, "overlap": mdx_overlap, "batch_size": mdx_batch_size, "enable_denoise": mdx_enable_denoise}, demucs_params={"segment_size": demucs_segment_size, "shifts": demucs_shifts, "overlap": demucs_overlap, "segments_enabled": True})
|
282 |
+
separator.load_model(model_filename=model_filename)
|
283 |
+
return separator.separate(audio_file)
|
284 |
+
except:
|
285 |
+
logger.debug(translations["default_setting"])
|
286 |
+
separator = Separator(logger=logger, log_formatter=file_formatter, log_level=logging.INFO, output_dir=output_dir, output_format=output_format, output_bitrate=None, normalization_threshold=0.9, output_single_stem=None, invert_using_spec=False, sample_rate=44100, mdx_params={"hop_length": 1024, "segment_size": 256, "overlap": 0.25, "batch_size": 1, "enable_denoise": mdx_enable_denoise}, demucs_params={"segment_size": 128, "shifts": 2, "overlap": 0.25, "segments_enabled": True})
|
287 |
+
separator.load_model(model_filename=model_filename)
|
288 |
+
return separator.separate(audio_file)
|
289 |
+
|
290 |
+
if __name__ == "__main__": main()
|
main/inference/train.py
ADDED
@@ -0,0 +1,1000 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
import glob
|
4 |
+
import json
|
5 |
+
import torch
|
6 |
+
import hashlib
|
7 |
+
import logging
|
8 |
+
import argparse
|
9 |
+
import datetime
|
10 |
+
import warnings
|
11 |
+
import logging.handlers
|
12 |
+
|
13 |
+
import numpy as np
|
14 |
+
import soundfile as sf
|
15 |
+
import matplotlib.pyplot as plt
|
16 |
+
import torch.distributed as dist
|
17 |
+
import torch.utils.data as tdata
|
18 |
+
import torch.multiprocessing as mp
|
19 |
+
import torch.utils.checkpoint as checkpoint
|
20 |
+
|
21 |
+
from tqdm import tqdm
|
22 |
+
from collections import OrderedDict
|
23 |
+
from random import randint, shuffle
|
24 |
+
from torch.cuda.amp import GradScaler, autocast
|
25 |
+
from torch.utils.tensorboard import SummaryWriter
|
26 |
+
|
27 |
+
from time import time as ttime
|
28 |
+
from torch.nn import functional as F
|
29 |
+
from distutils.util import strtobool
|
30 |
+
from librosa.filters import mel as librosa_mel_fn
|
31 |
+
from torch.nn.parallel import DistributedDataParallel as DDP
|
32 |
+
from torch.nn.utils.parametrizations import spectral_norm, weight_norm
|
33 |
+
|
34 |
+
sys.path.append(os.getcwd())
|
35 |
+
from main.configs.config import Config
|
36 |
+
from main.library.algorithm.residuals import LRELU_SLOPE
|
37 |
+
from main.library.algorithm.synthesizers import Synthesizer
|
38 |
+
from main.library.algorithm.commons import get_padding, slice_segments, clip_grad_value
|
39 |
+
|
40 |
+
MATPLOTLIB_FLAG = False
|
41 |
+
translations = Config().translations
|
42 |
+
warnings.filterwarnings("ignore")
|
43 |
+
logging.getLogger("torch").setLevel(logging.ERROR)
|
44 |
+
|
45 |
+
class HParams:
|
46 |
+
def __init__(self, **kwargs):
|
47 |
+
for k, v in kwargs.items():
|
48 |
+
self[k] = HParams(**v) if isinstance(v, dict) else v
|
49 |
+
|
50 |
+
def keys(self):
|
51 |
+
return self.__dict__.keys()
|
52 |
+
|
53 |
+
def items(self):
|
54 |
+
return self.__dict__.items()
|
55 |
+
|
56 |
+
def values(self):
|
57 |
+
return self.__dict__.values()
|
58 |
+
|
59 |
+
def __len__(self):
|
60 |
+
return len(self.__dict__)
|
61 |
+
|
62 |
+
def __getitem__(self, key):
|
63 |
+
return self.__dict__[key]
|
64 |
+
|
65 |
+
def __setitem__(self, key, value):
|
66 |
+
self.__dict__[key] = value
|
67 |
+
|
68 |
+
def __contains__(self, key):
|
69 |
+
return key in self.__dict__
|
70 |
+
|
71 |
+
def __repr__(self):
|
72 |
+
return repr(self.__dict__)
|
73 |
+
|
74 |
+
def parse_arguments():
|
75 |
+
parser = argparse.ArgumentParser()
|
76 |
+
parser.add_argument("--model_name", type=str, required=True)
|
77 |
+
parser.add_argument("--rvc_version", type=str, default="v2")
|
78 |
+
parser.add_argument("--save_every_epoch", type=int, required=True)
|
79 |
+
parser.add_argument("--save_only_latest", type=lambda x: bool(strtobool(x)), default=True)
|
80 |
+
parser.add_argument("--save_every_weights", type=lambda x: bool(strtobool(x)), default=True)
|
81 |
+
parser.add_argument("--total_epoch", type=int, default=300)
|
82 |
+
parser.add_argument("--sample_rate", type=int, required=True)
|
83 |
+
parser.add_argument("--batch_size", type=int, default=8)
|
84 |
+
parser.add_argument("--gpu", type=str, default="0")
|
85 |
+
parser.add_argument("--pitch_guidance", type=lambda x: bool(strtobool(x)), default=True)
|
86 |
+
parser.add_argument("--g_pretrained_path", type=str, default="")
|
87 |
+
parser.add_argument("--d_pretrained_path", type=str, default="")
|
88 |
+
parser.add_argument("--overtraining_detector", type=lambda x: bool(strtobool(x)), default=False)
|
89 |
+
parser.add_argument("--overtraining_threshold", type=int, default=50)
|
90 |
+
parser.add_argument("--cleanup", type=lambda x: bool(strtobool(x)), default=False)
|
91 |
+
parser.add_argument("--cache_data_in_gpu", type=lambda x: bool(strtobool(x)), default=False)
|
92 |
+
parser.add_argument("--model_author", type=str)
|
93 |
+
parser.add_argument("--vocoder", type=str, default="Default")
|
94 |
+
parser.add_argument("--checkpointing", type=lambda x: bool(strtobool(x)), default=False)
|
95 |
+
|
96 |
+
return parser.parse_args()
|
97 |
+
|
98 |
+
args = parse_arguments()
|
99 |
+
model_name, save_every_epoch, total_epoch, pretrainG, pretrainD, version, gpus, batch_size, sample_rate, pitch_guidance, save_only_latest, save_every_weights, cache_data_in_gpu, overtraining_detector, overtraining_threshold, cleanup, model_author, vocoder, checkpointing = args.model_name, args.save_every_epoch, args.total_epoch, args.g_pretrained_path, args.d_pretrained_path, args.rvc_version, args.gpu, args.batch_size, args.sample_rate, args.pitch_guidance, args.save_only_latest, args.save_every_weights, args.cache_data_in_gpu, args.overtraining_detector, args.overtraining_threshold, args.cleanup, args.model_author, args.vocoder, args.checkpointing
|
100 |
+
|
101 |
+
experiment_dir = os.path.join("assets", "logs", model_name)
|
102 |
+
training_file_path = os.path.join(experiment_dir, "training_data.json")
|
103 |
+
config_save_path = os.path.join(experiment_dir, "config.json")
|
104 |
+
|
105 |
+
os.environ["CUDA_VISIBLE_DEVICES"] = gpus.replace("-", ",")
|
106 |
+
n_gpus = len(gpus.split("-"))
|
107 |
+
|
108 |
+
torch.backends.cudnn.deterministic = False
|
109 |
+
torch.backends.cudnn.benchmark = False
|
110 |
+
|
111 |
+
lowest_value = {"step": 0, "value": float("inf"), "epoch": 0}
|
112 |
+
global_step, last_loss_gen_all, overtrain_save_epoch = 0, 0, 0
|
113 |
+
loss_gen_history, smoothed_loss_gen_history, loss_disc_history, smoothed_loss_disc_history = [], [], [], []
|
114 |
+
|
115 |
+
with open(config_save_path, "r") as f:
|
116 |
+
config = json.load(f)
|
117 |
+
|
118 |
+
config = HParams(**config)
|
119 |
+
config.data.training_files = os.path.join(experiment_dir, "filelist.txt")
|
120 |
+
logger = logging.getLogger(__name__)
|
121 |
+
|
122 |
+
if logger.hasHandlers(): logger.handlers.clear()
|
123 |
+
else:
|
124 |
+
console_handler = logging.StreamHandler()
|
125 |
+
console_handler.setFormatter(logging.Formatter(fmt="\n%(asctime)s.%(msecs)03d | %(levelname)s | %(module)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S"))
|
126 |
+
console_handler.setLevel(logging.INFO)
|
127 |
+
file_handler = logging.handlers.RotatingFileHandler(os.path.join(experiment_dir, "train.log"), maxBytes=5*1024*1024, backupCount=3, encoding='utf-8')
|
128 |
+
file_handler.setFormatter(logging.Formatter(fmt="\n%(asctime)s.%(msecs)03d | %(levelname)s | %(module)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S"))
|
129 |
+
file_handler.setLevel(logging.DEBUG)
|
130 |
+
logger.addHandler(console_handler)
|
131 |
+
logger.addHandler(file_handler)
|
132 |
+
logger.setLevel(logging.DEBUG)
|
133 |
+
|
134 |
+
log_data = {translations['modelname']: model_name, translations["save_every_epoch"]: save_every_epoch, translations["total_e"]: total_epoch, translations["dorg"].format(pretrainG=pretrainG, pretrainD=pretrainD): "", translations['training_version']: version, "Gpu": gpus, translations['batch_size']: batch_size, translations['pretrain_sr']: sample_rate, translations['training_f0']: pitch_guidance, translations['save_only_latest']: save_only_latest, translations['save_every_weights']: save_every_weights, translations['cache_in_gpu']: cache_data_in_gpu, translations['overtraining_detector']: overtraining_detector, translations['threshold']: overtraining_threshold, translations['cleanup_training']: cleanup, translations['memory_efficient_training']: checkpointing}
|
135 |
+
if model_author: log_data[translations["model_author"].format(model_author=model_author)] = ""
|
136 |
+
if vocoder != "Default": log_data[translations['vocoder']] = vocoder
|
137 |
+
|
138 |
+
for key, value in log_data.items():
|
139 |
+
logger.debug(f"{key}: {value}" if value != "" else f"{key} {value}")
|
140 |
+
|
141 |
+
def main():
|
142 |
+
global training_file_path, last_loss_gen_all, smoothed_loss_gen_history, loss_gen_history, loss_disc_history, smoothed_loss_disc_history, overtrain_save_epoch, model_author, vocoder, checkpointing
|
143 |
+
|
144 |
+
os.environ["MASTER_ADDR"] = "localhost"
|
145 |
+
os.environ["MASTER_PORT"] = str(randint(20000, 55555))
|
146 |
+
|
147 |
+
if torch.cuda.is_available(): device, n_gpus = torch.device("cuda"), torch.cuda.device_count()
|
148 |
+
elif torch.backends.mps.is_available(): device, n_gpus = torch.device("mps"), 1
|
149 |
+
else: device, n_gpus = torch.device("cpu"), 1
|
150 |
+
|
151 |
+
def start():
|
152 |
+
children = []
|
153 |
+
pid_data = {"process_pids": []}
|
154 |
+
|
155 |
+
with open(config_save_path, "r") as pid_file:
|
156 |
+
try:
|
157 |
+
pid_data.update(json.load(pid_file))
|
158 |
+
except json.JSONDecodeError:
|
159 |
+
pass
|
160 |
+
|
161 |
+
with open(config_save_path, "w") as pid_file:
|
162 |
+
for i in range(n_gpus):
|
163 |
+
subproc = mp.Process(target=run, args=(i, n_gpus, experiment_dir, pretrainG, pretrainD, pitch_guidance, total_epoch, save_every_weights, config, device, model_author, vocoder, checkpointing))
|
164 |
+
children.append(subproc)
|
165 |
+
|
166 |
+
subproc.start()
|
167 |
+
pid_data["process_pids"].append(subproc.pid)
|
168 |
+
|
169 |
+
json.dump(pid_data, pid_file, indent=4)
|
170 |
+
|
171 |
+
for i in range(n_gpus):
|
172 |
+
children[i].join()
|
173 |
+
|
174 |
+
def load_from_json(file_path):
|
175 |
+
if os.path.exists(file_path):
|
176 |
+
with open(file_path, "r") as f:
|
177 |
+
data = json.load(f)
|
178 |
+
return (data.get("loss_disc_history", []), data.get("smoothed_loss_disc_history", []), data.get("loss_gen_history", []), data.get("smoothed_loss_gen_history", []))
|
179 |
+
|
180 |
+
return [], [], [], []
|
181 |
+
|
182 |
+
def continue_overtrain_detector(training_file_path):
|
183 |
+
if overtraining_detector and os.path.exists(training_file_path): (loss_disc_history, smoothed_loss_disc_history, loss_gen_history, smoothed_loss_gen_history) = load_from_json(training_file_path)
|
184 |
+
|
185 |
+
n_gpus = torch.cuda.device_count()
|
186 |
+
|
187 |
+
if not torch.cuda.is_available() and torch.backends.mps.is_available(): n_gpus = 1
|
188 |
+
if n_gpus < 1:
|
189 |
+
logger.warning(translations["not_gpu"])
|
190 |
+
n_gpus = 1
|
191 |
+
|
192 |
+
if cleanup:
|
193 |
+
for root, dirs, files in os.walk(experiment_dir, topdown=False):
|
194 |
+
for name in files:
|
195 |
+
file_path = os.path.join(root, name)
|
196 |
+
_, file_extension = os.path.splitext(name)
|
197 |
+
if (file_extension == ".0" or (name.startswith("D_") and file_extension == ".pth") or (name.startswith("G_") and file_extension == ".pth") or (file_extension == ".index")): os.remove(file_path)
|
198 |
+
|
199 |
+
for name in dirs:
|
200 |
+
if name == "eval":
|
201 |
+
folder_path = os.path.join(root, name)
|
202 |
+
|
203 |
+
for item in os.listdir(folder_path):
|
204 |
+
item_path = os.path.join(folder_path, item)
|
205 |
+
if os.path.isfile(item_path): os.remove(item_path)
|
206 |
+
|
207 |
+
os.rmdir(folder_path)
|
208 |
+
|
209 |
+
continue_overtrain_detector(training_file_path)
|
210 |
+
start()
|
211 |
+
|
212 |
+
def plot_spectrogram_to_numpy(spectrogram):
|
213 |
+
global MATPLOTLIB_FLAG
|
214 |
+
|
215 |
+
if not MATPLOTLIB_FLAG:
|
216 |
+
plt.switch_backend("Agg")
|
217 |
+
MATPLOTLIB_FLAG = True
|
218 |
+
|
219 |
+
fig, ax = plt.subplots(figsize=(10, 2))
|
220 |
+
|
221 |
+
plt.colorbar(ax.imshow(spectrogram, aspect="auto", origin="lower", interpolation="none"), ax=ax)
|
222 |
+
plt.xlabel("Frames")
|
223 |
+
plt.ylabel("Channels")
|
224 |
+
plt.tight_layout()
|
225 |
+
fig.canvas.draw()
|
226 |
+
plt.close(fig)
|
227 |
+
|
228 |
+
return np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8).reshape(fig.canvas.get_width_height()[::-1] + (3,))
|
229 |
+
|
230 |
+
def verify_checkpoint_shapes(checkpoint_path, model):
|
231 |
+
checkpoint = torch.load(checkpoint_path, map_location="cpu")
|
232 |
+
checkpoint_state_dict = checkpoint["model"]
|
233 |
+
try:
|
234 |
+
model_state_dict = model.module.load_state_dict(checkpoint_state_dict) if hasattr(model, "module") else model.load_state_dict(checkpoint_state_dict)
|
235 |
+
except RuntimeError:
|
236 |
+
logger.error(translations["checkpointing_err"])
|
237 |
+
sys.exit(1)
|
238 |
+
else: del checkpoint, checkpoint_state_dict, model_state_dict
|
239 |
+
|
240 |
+
def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sample_rate=22050):
|
241 |
+
for k, v in scalars.items():
|
242 |
+
writer.add_scalar(k, v, global_step)
|
243 |
+
|
244 |
+
for k, v in histograms.items():
|
245 |
+
writer.add_histogram(k, v, global_step)
|
246 |
+
|
247 |
+
for k, v in images.items():
|
248 |
+
writer.add_image(k, v, global_step, dataformats="HWC")
|
249 |
+
|
250 |
+
for k, v in audios.items():
|
251 |
+
writer.add_audio(k, v, global_step, audio_sample_rate)
|
252 |
+
|
253 |
+
def load_checkpoint(checkpoint_path, model, optimizer=None, load_opt=1):
|
254 |
+
assert os.path.isfile(checkpoint_path), translations["not_found_checkpoint"].format(checkpoint_path=checkpoint_path)
|
255 |
+
checkpoint_dict = replace_keys_in_dict(replace_keys_in_dict(torch.load(checkpoint_path, map_location="cpu"), ".weight_v", ".parametrizations.weight.original1"), ".weight_g", ".parametrizations.weight.original0")
|
256 |
+
new_state_dict = {k: checkpoint_dict["model"].get(k, v) for k, v in (model.module.state_dict() if hasattr(model, "module") else model.state_dict()).items()}
|
257 |
+
|
258 |
+
if hasattr(model, "module"): model.module.load_state_dict(new_state_dict, strict=False)
|
259 |
+
else: model.load_state_dict(new_state_dict, strict=False)
|
260 |
+
|
261 |
+
if optimizer and load_opt == 1: optimizer.load_state_dict(checkpoint_dict.get("optimizer", {}))
|
262 |
+
logger.debug(translations["save_checkpoint"].format(checkpoint_path=checkpoint_path, checkpoint_dict=checkpoint_dict['iteration']))
|
263 |
+
return (model, optimizer, checkpoint_dict.get("learning_rate", 0), checkpoint_dict["iteration"])
|
264 |
+
|
265 |
+
def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path):
|
266 |
+
state_dict = (model.module.state_dict() if hasattr(model, "module") else model.state_dict())
|
267 |
+
torch.save(replace_keys_in_dict(replace_keys_in_dict({"model": state_dict, "iteration": iteration, "optimizer": optimizer.state_dict(), "learning_rate": learning_rate}, ".parametrizations.weight.original1", ".weight_v"), ".parametrizations.weight.original0", ".weight_g"), checkpoint_path)
|
268 |
+
logger.info(translations["save_model"].format(checkpoint_path=checkpoint_path, iteration=iteration))
|
269 |
+
|
270 |
+
def latest_checkpoint_path(dir_path, regex="G_*.pth"):
|
271 |
+
checkpoints = sorted(glob.glob(os.path.join(dir_path, regex)), key=lambda f: int("".join(filter(str.isdigit, f))))
|
272 |
+
return checkpoints[-1] if checkpoints else None
|
273 |
+
|
274 |
+
def load_wav_to_torch(full_path):
|
275 |
+
data, sample_rate = sf.read(full_path, dtype='float32')
|
276 |
+
return torch.FloatTensor(data.astype(np.float32)), sample_rate
|
277 |
+
|
278 |
+
def load_filepaths_and_text(filename, split="|"):
|
279 |
+
with open(filename, encoding="utf-8") as f:
|
280 |
+
return [line.strip().split(split) for line in f]
|
281 |
+
|
282 |
+
def feature_loss(fmap_r, fmap_g):
|
283 |
+
loss = 0
|
284 |
+
for dr, dg in zip(fmap_r, fmap_g):
|
285 |
+
for rl, gl in zip(dr, dg):
|
286 |
+
loss += torch.mean(torch.abs(rl.float().detach() - gl.float()))
|
287 |
+
return loss * 2
|
288 |
+
|
289 |
+
def discriminator_loss(disc_real_outputs, disc_generated_outputs):
|
290 |
+
loss = 0
|
291 |
+
r_losses, g_losses = [], []
|
292 |
+
|
293 |
+
for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
|
294 |
+
dr = dr.float()
|
295 |
+
dg = dg.float()
|
296 |
+
r_loss = torch.mean((1 - dr) ** 2)
|
297 |
+
g_loss = torch.mean(dg**2)
|
298 |
+
loss += r_loss + g_loss
|
299 |
+
r_losses.append(r_loss.item())
|
300 |
+
g_losses.append(g_loss.item())
|
301 |
+
return loss, r_losses, g_losses
|
302 |
+
|
303 |
+
def generator_loss(disc_outputs):
|
304 |
+
loss = 0
|
305 |
+
gen_losses = []
|
306 |
+
|
307 |
+
for dg in disc_outputs:
|
308 |
+
l = torch.mean((1 - dg.float()) ** 2)
|
309 |
+
gen_losses.append(l)
|
310 |
+
loss += l
|
311 |
+
return loss, gen_losses
|
312 |
+
|
313 |
+
def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):
|
314 |
+
z_p = z_p.float()
|
315 |
+
logs_q = logs_q.float()
|
316 |
+
m_p = m_p.float()
|
317 |
+
logs_p = logs_p.float()
|
318 |
+
z_mask = z_mask.float()
|
319 |
+
kl = logs_p - logs_q - 0.5
|
320 |
+
kl += 0.5 * ((z_p - m_p) ** 2) * torch.exp(-2.0 * logs_p)
|
321 |
+
return torch.sum(kl * z_mask) / torch.sum(z_mask)
|
322 |
+
|
323 |
+
class TextAudioLoaderMultiNSFsid(tdata.Dataset):
|
324 |
+
def __init__(self, hparams):
|
325 |
+
self.audiopaths_and_text = load_filepaths_and_text(hparams.training_files)
|
326 |
+
self.max_wav_value = hparams.max_wav_value
|
327 |
+
self.sample_rate = hparams.sample_rate
|
328 |
+
self.filter_length = hparams.filter_length
|
329 |
+
self.hop_length = hparams.hop_length
|
330 |
+
self.win_length = hparams.win_length
|
331 |
+
self.sample_rate = hparams.sample_rate
|
332 |
+
self.min_text_len = getattr(hparams, "min_text_len", 1)
|
333 |
+
self.max_text_len = getattr(hparams, "max_text_len", 5000)
|
334 |
+
self._filter()
|
335 |
+
|
336 |
+
def _filter(self):
|
337 |
+
audiopaths_and_text_new, lengths = [], []
|
338 |
+
for audiopath, text, pitch, pitchf, dv in self.audiopaths_and_text:
|
339 |
+
if self.min_text_len <= len(text) and len(text) <= self.max_text_len:
|
340 |
+
audiopaths_and_text_new.append([audiopath, text, pitch, pitchf, dv])
|
341 |
+
lengths.append(os.path.getsize(audiopath) // (3 * self.hop_length))
|
342 |
+
|
343 |
+
self.audiopaths_and_text = audiopaths_and_text_new
|
344 |
+
self.lengths = lengths
|
345 |
+
|
346 |
+
def get_sid(self, sid):
|
347 |
+
try:
|
348 |
+
sid = torch.LongTensor([int(sid)])
|
349 |
+
except ValueError as e:
|
350 |
+
logger.error(translations["sid_error"].format(sid=sid, e=e))
|
351 |
+
sid = torch.LongTensor([0])
|
352 |
+
return sid
|
353 |
+
|
354 |
+
def get_audio_text_pair(self, audiopath_and_text):
|
355 |
+
phone, pitch, pitchf = self.get_labels(audiopath_and_text[1], audiopath_and_text[2], audiopath_and_text[3])
|
356 |
+
spec, wav = self.get_audio(audiopath_and_text[0])
|
357 |
+
dv = self.get_sid(audiopath_and_text[4])
|
358 |
+
len_phone = phone.size()[0]
|
359 |
+
len_spec = spec.size()[-1]
|
360 |
+
|
361 |
+
if len_phone != len_spec:
|
362 |
+
len_min = min(len_phone, len_spec)
|
363 |
+
len_wav = len_min * self.hop_length
|
364 |
+
spec, wav, phone = spec[:, :len_min], wav[:, :len_wav], phone[:len_min, :]
|
365 |
+
pitch, pitchf = pitch[:len_min], pitchf[:len_min]
|
366 |
+
return (spec, wav, phone, pitch, pitchf, dv)
|
367 |
+
|
368 |
+
def get_labels(self, phone, pitch, pitchf):
|
369 |
+
phone = np.repeat(np.load(phone), 2, axis=0)
|
370 |
+
n_num = min(phone.shape[0], 900)
|
371 |
+
return torch.FloatTensor(phone[:n_num, :]), torch.LongTensor(np.load(pitch)[:n_num]), torch.FloatTensor(np.load(pitchf)[:n_num])
|
372 |
+
|
373 |
+
def get_audio(self, filename):
|
374 |
+
audio, sample_rate = load_wav_to_torch(filename)
|
375 |
+
if sample_rate != self.sample_rate: raise ValueError(translations["sr_does_not_match"].format(sample_rate=sample_rate, sample_rate2=self.sample_rate))
|
376 |
+
audio_norm = audio.unsqueeze(0)
|
377 |
+
spec_filename = filename.replace(".wav", ".spec.pt")
|
378 |
+
|
379 |
+
if os.path.exists(spec_filename):
|
380 |
+
try:
|
381 |
+
spec = torch.load(spec_filename)
|
382 |
+
except Exception as e:
|
383 |
+
logger.error(translations["spec_error"].format(spec_filename=spec_filename, e=e))
|
384 |
+
spec = torch.squeeze(spectrogram_torch(audio_norm, self.filter_length, self.hop_length, self.win_length, center=False), 0)
|
385 |
+
torch.save(spec, spec_filename, _use_new_zipfile_serialization=False)
|
386 |
+
else:
|
387 |
+
spec = torch.squeeze(spectrogram_torch(audio_norm, self.filter_length, self.hop_length, self.win_length, center=False), 0)
|
388 |
+
torch.save(spec, spec_filename, _use_new_zipfile_serialization=False)
|
389 |
+
return spec, audio_norm
|
390 |
+
|
391 |
+
def __getitem__(self, index):
|
392 |
+
return self.get_audio_text_pair(self.audiopaths_and_text[index])
|
393 |
+
|
394 |
+
def __len__(self):
|
395 |
+
return len(self.audiopaths_and_text)
|
396 |
+
|
397 |
+
class TextAudioCollateMultiNSFsid:
|
398 |
+
def __init__(self, return_ids=False):
|
399 |
+
self.return_ids = return_ids
|
400 |
+
|
401 |
+
def __call__(self, batch):
|
402 |
+
_, ids_sorted_decreasing = torch.sort(torch.LongTensor([x[0].size(1) for x in batch]), dim=0, descending=True)
|
403 |
+
spec_lengths, wave_lengths = torch.LongTensor(len(batch)), torch.LongTensor(len(batch))
|
404 |
+
spec_padded, wave_padded = torch.FloatTensor(len(batch), batch[0][0].size(0), max([x[0].size(1) for x in batch])), torch.FloatTensor(len(batch), 1, max([x[1].size(1) for x in batch]))
|
405 |
+
spec_padded.zero_()
|
406 |
+
wave_padded.zero_()
|
407 |
+
max_phone_len = max([x[2].size(0) for x in batch])
|
408 |
+
phone_lengths, phone_padded = torch.LongTensor(len(batch)), torch.FloatTensor(len(batch), max_phone_len, batch[0][2].shape[1])
|
409 |
+
pitch_padded, pitchf_padded = torch.LongTensor(len(batch), max_phone_len), torch.FloatTensor(len(batch), max_phone_len)
|
410 |
+
phone_padded.zero_()
|
411 |
+
pitch_padded.zero_()
|
412 |
+
pitchf_padded.zero_()
|
413 |
+
sid = torch.LongTensor(len(batch))
|
414 |
+
|
415 |
+
for i in range(len(ids_sorted_decreasing)):
|
416 |
+
row = batch[ids_sorted_decreasing[i]]
|
417 |
+
spec = row[0]
|
418 |
+
spec_padded[i, :, : spec.size(1)] = spec
|
419 |
+
spec_lengths[i] = spec.size(1)
|
420 |
+
wave = row[1]
|
421 |
+
wave_padded[i, :, : wave.size(1)] = wave
|
422 |
+
wave_lengths[i] = wave.size(1)
|
423 |
+
phone = row[2]
|
424 |
+
phone_padded[i, : phone.size(0), :] = phone
|
425 |
+
phone_lengths[i] = phone.size(0)
|
426 |
+
pitch = row[3]
|
427 |
+
pitch_padded[i, : pitch.size(0)] = pitch
|
428 |
+
pitchf = row[4]
|
429 |
+
pitchf_padded[i, : pitchf.size(0)] = pitchf
|
430 |
+
sid[i] = row[5]
|
431 |
+
return (phone_padded, phone_lengths, pitch_padded, pitchf_padded, spec_padded, spec_lengths, wave_padded, wave_lengths, sid)
|
432 |
+
|
433 |
+
class TextAudioLoader(tdata.Dataset):
|
434 |
+
def __init__(self, hparams):
|
435 |
+
self.audiopaths_and_text = load_filepaths_and_text(hparams.training_files)
|
436 |
+
self.max_wav_value = hparams.max_wav_value
|
437 |
+
self.sample_rate = hparams.sample_rate
|
438 |
+
self.filter_length = hparams.filter_length
|
439 |
+
self.hop_length = hparams.hop_length
|
440 |
+
self.win_length = hparams.win_length
|
441 |
+
self.sample_rate = hparams.sample_rate
|
442 |
+
self.min_text_len = getattr(hparams, "min_text_len", 1)
|
443 |
+
self.max_text_len = getattr(hparams, "max_text_len", 5000)
|
444 |
+
self._filter()
|
445 |
+
|
446 |
+
def _filter(self):
|
447 |
+
audiopaths_and_text_new, lengths = [], []
|
448 |
+
for entry in self.audiopaths_and_text:
|
449 |
+
if len(entry) >= 3:
|
450 |
+
audiopath, text, dv = entry[:3]
|
451 |
+
if self.min_text_len <= len(text) and len(text) <= self.max_text_len:
|
452 |
+
audiopaths_and_text_new.append([audiopath, text, dv])
|
453 |
+
lengths.append(os.path.getsize(audiopath) // (3 * self.hop_length))
|
454 |
+
|
455 |
+
self.audiopaths_and_text = audiopaths_and_text_new
|
456 |
+
self.lengths = lengths
|
457 |
+
|
458 |
+
def get_sid(self, sid):
|
459 |
+
try:
|
460 |
+
sid = torch.LongTensor([int(sid)])
|
461 |
+
except ValueError as e:
|
462 |
+
logger.error(translations["sid_error"].format(sid=sid, e=e))
|
463 |
+
sid = torch.LongTensor([0])
|
464 |
+
return sid
|
465 |
+
|
466 |
+
def get_audio_text_pair(self, audiopath_and_text):
|
467 |
+
phone = self.get_labels(audiopath_and_text[1])
|
468 |
+
spec, wav = self.get_audio(audiopath_and_text[0])
|
469 |
+
dv = self.get_sid(audiopath_and_text[2])
|
470 |
+
len_phone = phone.size()[0]
|
471 |
+
len_spec = spec.size()[-1]
|
472 |
+
|
473 |
+
if len_phone != len_spec:
|
474 |
+
len_min = min(len_phone, len_spec)
|
475 |
+
len_wav = len_min * self.hop_length
|
476 |
+
spec = spec[:, :len_min]
|
477 |
+
wav = wav[:, :len_wav]
|
478 |
+
phone = phone[:len_min, :]
|
479 |
+
return (spec, wav, phone, dv)
|
480 |
+
|
481 |
+
def get_labels(self, phone):
|
482 |
+
phone = np.repeat(np.load(phone), 2, axis=0)
|
483 |
+
return torch.FloatTensor(phone[:min(phone.shape[0], 900), :])
|
484 |
+
|
485 |
+
def get_audio(self, filename):
|
486 |
+
audio, sample_rate = load_wav_to_torch(filename)
|
487 |
+
if sample_rate != self.sample_rate: raise ValueError(translations["sr_does_not_match"].format(sample_rate=sample_rate, sample_rate2=self.sample_rate))
|
488 |
+
audio_norm = audio.unsqueeze(0)
|
489 |
+
spec_filename = filename.replace(".wav", ".spec.pt")
|
490 |
+
|
491 |
+
if os.path.exists(spec_filename):
|
492 |
+
try:
|
493 |
+
spec = torch.load(spec_filename)
|
494 |
+
except Exception as e:
|
495 |
+
logger.error(translations["spec_error"].format(spec_filename=spec_filename, e=e))
|
496 |
+
spec = torch.squeeze(spectrogram_torch(audio_norm, self.filter_length, self.hop_length, self.win_length, center=False), 0)
|
497 |
+
torch.save(spec, spec_filename, _use_new_zipfile_serialization=False)
|
498 |
+
else:
|
499 |
+
spec = torch.squeeze(spectrogram_torch(audio_norm, self.filter_length, self.hop_length, self.win_length, center=False), 0)
|
500 |
+
torch.save(spec, spec_filename, _use_new_zipfile_serialization=False)
|
501 |
+
return spec, audio_norm
|
502 |
+
|
503 |
+
def __getitem__(self, index):
|
504 |
+
return self.get_audio_text_pair(self.audiopaths_and_text[index])
|
505 |
+
|
506 |
+
def __len__(self):
|
507 |
+
return len(self.audiopaths_and_text)
|
508 |
+
|
509 |
+
class TextAudioCollate:
|
510 |
+
def __init__(self, return_ids=False):
|
511 |
+
self.return_ids = return_ids
|
512 |
+
|
513 |
+
def __call__(self, batch):
|
514 |
+
_, ids_sorted_decreasing = torch.sort(torch.LongTensor([x[0].size(1) for x in batch]), dim=0, descending=True)
|
515 |
+
spec_lengths, wave_lengths = torch.LongTensor(len(batch)), torch.LongTensor(len(batch))
|
516 |
+
spec_padded, wave_padded = torch.FloatTensor(len(batch), batch[0][0].size(0), max([x[0].size(1) for x in batch])), torch.FloatTensor(len(batch), 1, max([x[1].size(1) for x in batch]))
|
517 |
+
spec_padded.zero_()
|
518 |
+
wave_padded.zero_()
|
519 |
+
max_phone_len = max([x[2].size(0) for x in batch])
|
520 |
+
phone_lengths, phone_padded = torch.LongTensor(len(batch)), torch.FloatTensor(len(batch), max_phone_len, batch[0][2].shape[1])
|
521 |
+
phone_padded.zero_()
|
522 |
+
sid = torch.LongTensor(len(batch))
|
523 |
+
for i in range(len(ids_sorted_decreasing)):
|
524 |
+
row = batch[ids_sorted_decreasing[i]]
|
525 |
+
spec = row[0]
|
526 |
+
spec_padded[i, :, : spec.size(1)] = spec
|
527 |
+
spec_lengths[i] = spec.size(1)
|
528 |
+
wave = row[1]
|
529 |
+
wave_padded[i, :, : wave.size(1)] = wave
|
530 |
+
wave_lengths[i] = wave.size(1)
|
531 |
+
phone = row[2]
|
532 |
+
phone_padded[i, : phone.size(0), :] = phone
|
533 |
+
phone_lengths[i] = phone.size(0)
|
534 |
+
sid[i] = row[3]
|
535 |
+
return (phone_padded, phone_lengths, spec_padded, spec_lengths, wave_padded, wave_lengths, sid)
|
536 |
+
|
537 |
+
class DistributedBucketSampler(tdata.distributed.DistributedSampler):
|
538 |
+
def __init__(self, dataset, batch_size, boundaries, num_replicas=None, rank=None, shuffle=True):
|
539 |
+
super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
|
540 |
+
self.lengths = dataset.lengths
|
541 |
+
self.batch_size = batch_size
|
542 |
+
self.boundaries = boundaries
|
543 |
+
self.buckets, self.num_samples_per_bucket = self._create_buckets()
|
544 |
+
self.total_size = sum(self.num_samples_per_bucket)
|
545 |
+
self.num_samples = self.total_size // self.num_replicas
|
546 |
+
|
547 |
+
def _create_buckets(self):
|
548 |
+
buckets = [[] for _ in range(len(self.boundaries) - 1)]
|
549 |
+
for i in range(len(self.lengths)):
|
550 |
+
idx_bucket = self._bisect(self.lengths[i])
|
551 |
+
if idx_bucket != -1: buckets[idx_bucket].append(i)
|
552 |
+
|
553 |
+
for i in range(len(buckets) - 1, -1, -1):
|
554 |
+
if len(buckets[i]) == 0:
|
555 |
+
buckets.pop(i)
|
556 |
+
self.boundaries.pop(i + 1)
|
557 |
+
|
558 |
+
num_samples_per_bucket = []
|
559 |
+
for i in range(len(buckets)):
|
560 |
+
len_bucket = len(buckets[i])
|
561 |
+
total_batch_size = self.num_replicas * self.batch_size
|
562 |
+
num_samples_per_bucket.append(len_bucket + ((total_batch_size - (len_bucket % total_batch_size)) % total_batch_size))
|
563 |
+
return buckets, num_samples_per_bucket
|
564 |
+
|
565 |
+
def __iter__(self):
|
566 |
+
g = torch.Generator()
|
567 |
+
g.manual_seed(self.epoch)
|
568 |
+
indices, batches = [], []
|
569 |
+
if self.shuffle:
|
570 |
+
for bucket in self.buckets:
|
571 |
+
indices.append(torch.randperm(len(bucket), generator=g).tolist())
|
572 |
+
else:
|
573 |
+
for bucket in self.buckets:
|
574 |
+
indices.append(list(range(len(bucket))))
|
575 |
+
|
576 |
+
for i in range(len(self.buckets)):
|
577 |
+
bucket = self.buckets[i]
|
578 |
+
len_bucket = len(bucket)
|
579 |
+
ids_bucket = indices[i]
|
580 |
+
rem = self.num_samples_per_bucket[i] - len_bucket
|
581 |
+
ids_bucket = (ids_bucket + ids_bucket * (rem // len_bucket) + ids_bucket[: (rem % len_bucket)])[self.rank :: self.num_replicas]
|
582 |
+
|
583 |
+
for j in range(len(ids_bucket) // self.batch_size):
|
584 |
+
batches.append([bucket[idx] for idx in ids_bucket[j * self.batch_size : (j + 1) * self.batch_size]])
|
585 |
+
|
586 |
+
if self.shuffle: batches = [batches[i] for i in torch.randperm(len(batches), generator=g).tolist()]
|
587 |
+
self.batches = batches
|
588 |
+
assert len(self.batches) * self.batch_size == self.num_samples
|
589 |
+
return iter(self.batches)
|
590 |
+
|
591 |
+
def _bisect(self, x, lo=0, hi=None):
|
592 |
+
if hi is None: hi = len(self.boundaries) - 1
|
593 |
+
|
594 |
+
if hi > lo:
|
595 |
+
mid = (hi + lo) // 2
|
596 |
+
if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]: return mid
|
597 |
+
elif x <= self.boundaries[mid]: return self._bisect(x, lo, mid)
|
598 |
+
else: return self._bisect(x, mid + 1, hi)
|
599 |
+
else: return -1
|
600 |
+
|
601 |
+
def __len__(self):
|
602 |
+
return self.num_samples // self.batch_size
|
603 |
+
|
604 |
+
class MultiPeriodDiscriminator(torch.nn.Module):
|
605 |
+
def __init__(self, version, use_spectral_norm=False, checkpointing=False):
|
606 |
+
super(MultiPeriodDiscriminator, self).__init__()
|
607 |
+
self.checkpointing = checkpointing
|
608 |
+
periods = ([2, 3, 5, 7, 11, 17] if version == "v1" else [2, 3, 5, 7, 11, 17, 23, 37])
|
609 |
+
self.discriminators = torch.nn.ModuleList([DiscriminatorS(use_spectral_norm=use_spectral_norm, checkpointing=checkpointing)] + [DiscriminatorP(p, use_spectral_norm=use_spectral_norm, checkpointing=checkpointing) for p in periods])
|
610 |
+
|
611 |
+
def forward(self, y, y_hat):
|
612 |
+
y_d_rs, y_d_gs, fmap_rs, fmap_gs = [], [], [], []
|
613 |
+
for d in self.discriminators:
|
614 |
+
if self.training and self.checkpointing:
|
615 |
+
def forward_discriminator(d, y, y_hat):
|
616 |
+
y_d_r, fmap_r = d(y)
|
617 |
+
y_d_g, fmap_g = d(y_hat)
|
618 |
+
return y_d_r, fmap_r, y_d_g, fmap_g
|
619 |
+
y_d_r, fmap_r, y_d_g, fmap_g = checkpoint.checkpoint(forward_discriminator, d, y, y_hat, use_reentrant=False)
|
620 |
+
else:
|
621 |
+
y_d_r, fmap_r = d(y)
|
622 |
+
y_d_g, fmap_g = d(y_hat)
|
623 |
+
|
624 |
+
y_d_rs.append(y_d_r)
|
625 |
+
y_d_gs.append(y_d_g)
|
626 |
+
fmap_rs.append(fmap_r)
|
627 |
+
fmap_gs.append(fmap_g)
|
628 |
+
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
|
629 |
+
|
630 |
+
class DiscriminatorS(torch.nn.Module):
|
631 |
+
def __init__(self, use_spectral_norm=False, checkpointing=False):
|
632 |
+
super(DiscriminatorS, self).__init__()
|
633 |
+
self.checkpointing = checkpointing
|
634 |
+
norm_f = spectral_norm if use_spectral_norm else weight_norm
|
635 |
+
self.convs = torch.nn.ModuleList([norm_f(torch.nn.Conv1d(1, 16, 15, 1, padding=7)), norm_f(torch.nn.Conv1d(16, 64, 41, 4, groups=4, padding=20)), norm_f(torch.nn.Conv1d(64, 256, 41, 4, groups=16, padding=20)), norm_f(torch.nn.Conv1d(256, 1024, 41, 4, groups=64, padding=20)), norm_f(torch.nn.Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), norm_f(torch.nn.Conv1d(1024, 1024, 5, 1, padding=2))])
|
636 |
+
self.conv_post = norm_f(torch.nn.Conv1d(1024, 1, 3, 1, padding=1))
|
637 |
+
self.lrelu = torch.nn.LeakyReLU(LRELU_SLOPE)
|
638 |
+
|
639 |
+
def forward(self, x):
|
640 |
+
fmap = []
|
641 |
+
for conv in self.convs:
|
642 |
+
x = checkpoint.checkpoint(self.lrelu, checkpoint.checkpoint(conv, x, use_reentrant = False), use_reentrant = False) if self.training and self.checkpointing else self.lrelu(conv(x))
|
643 |
+
fmap.append(x)
|
644 |
+
|
645 |
+
x = self.conv_post(x)
|
646 |
+
fmap.append(x)
|
647 |
+
return torch.flatten(x, 1, -1), fmap
|
648 |
+
|
649 |
+
class DiscriminatorP(torch.nn.Module):
|
650 |
+
def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False, checkpointing=False):
|
651 |
+
super(DiscriminatorP, self).__init__()
|
652 |
+
self.period = period
|
653 |
+
self.checkpointing = checkpointing
|
654 |
+
norm_f = spectral_norm if use_spectral_norm else weight_norm
|
655 |
+
self.convs = torch.nn.ModuleList([norm_f(torch.nn.Conv2d(in_ch, out_ch, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))) for in_ch, out_ch in zip([1, 32, 128, 512, 1024], [32, 128, 512, 1024, 1024])])
|
656 |
+
self.conv_post = norm_f(torch.nn.Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
|
657 |
+
self.lrelu = torch.nn.LeakyReLU(LRELU_SLOPE)
|
658 |
+
|
659 |
+
def forward(self, x):
|
660 |
+
fmap = []
|
661 |
+
b, c, t = x.shape
|
662 |
+
if t % self.period != 0: x = torch.nn.functional.pad(x, (0, (self.period - (t % self.period))), "reflect")
|
663 |
+
x = x.view(b, c, -1, self.period)
|
664 |
+
for conv in self.convs:
|
665 |
+
x = checkpoint.checkpoint(self.lrelu, checkpoint.checkpoint(conv, x, use_reentrant = False), use_reentrant = False) if self.training and self.checkpointing else self.lrelu(conv(x))
|
666 |
+
fmap.append(x)
|
667 |
+
|
668 |
+
x = self.conv_post(x)
|
669 |
+
fmap.append(x)
|
670 |
+
return torch.flatten(x, 1, -1), fmap
|
671 |
+
|
672 |
+
class EpochRecorder:
|
673 |
+
def __init__(self):
|
674 |
+
self.last_time = ttime()
|
675 |
+
|
676 |
+
def record(self):
|
677 |
+
now_time = ttime()
|
678 |
+
elapsed_time = now_time - self.last_time
|
679 |
+
self.last_time = now_time
|
680 |
+
return translations["time_or_speed_training"].format(current_time=datetime.datetime.now().strftime("%H:%M:%S"), elapsed_time_str=str(datetime.timedelta(seconds=int(round(elapsed_time, 1)))))
|
681 |
+
|
682 |
+
def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
|
683 |
+
return torch.log(torch.clamp(x, min=clip_val) * C)
|
684 |
+
|
685 |
+
def dynamic_range_decompression_torch(x, C=1):
|
686 |
+
return torch.exp(x) / C
|
687 |
+
|
688 |
+
def spectral_normalize_torch(magnitudes):
|
689 |
+
return dynamic_range_compression_torch(magnitudes)
|
690 |
+
|
691 |
+
def spectral_de_normalize_torch(magnitudes):
|
692 |
+
return dynamic_range_decompression_torch(magnitudes)
|
693 |
+
|
694 |
+
mel_basis, hann_window = {}, {}
|
695 |
+
|
696 |
+
def spectrogram_torch(y, n_fft, hop_size, win_size, center=False):
|
697 |
+
global hann_window
|
698 |
+
|
699 |
+
wnsize_dtype_device = str(win_size) + "_" + str(y.dtype) + "_" + str(y.device)
|
700 |
+
if wnsize_dtype_device not in hann_window: hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
|
701 |
+
spec = torch.stft(torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)), mode="reflect").squeeze(1), n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], center=center, pad_mode="reflect", normalized=False, onesided=True, return_complex=True)
|
702 |
+
return torch.sqrt(spec.real.pow(2) + spec.imag.pow(2) + 1e-6)
|
703 |
+
|
704 |
+
def spec_to_mel_torch(spec, n_fft, num_mels, sample_rate, fmin, fmax):
|
705 |
+
global mel_basis
|
706 |
+
|
707 |
+
fmax_dtype_device = str(fmax) + "_" + str(spec.dtype) + "_" + str(spec.device)
|
708 |
+
if fmax_dtype_device not in mel_basis: mel_basis[fmax_dtype_device] = torch.from_numpy(librosa_mel_fn(sr=sample_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax)).to(dtype=spec.dtype, device=spec.device)
|
709 |
+
return spectral_normalize_torch(torch.matmul(mel_basis[fmax_dtype_device], spec))
|
710 |
+
|
711 |
+
def mel_spectrogram_torch(y, n_fft, num_mels, sample_rate, hop_size, win_size, fmin, fmax, center=False):
|
712 |
+
return spec_to_mel_torch(spectrogram_torch(y, n_fft, hop_size, win_size, center), n_fft, num_mels, sample_rate, fmin, fmax)
|
713 |
+
|
714 |
+
def replace_keys_in_dict(d, old_key_part, new_key_part):
|
715 |
+
updated_dict = OrderedDict() if isinstance(d, OrderedDict) else {}
|
716 |
+
for key, value in d.items():
|
717 |
+
updated_dict[(key.replace(old_key_part, new_key_part) if isinstance(key, str) else key)] = (replace_keys_in_dict(value, old_key_part, new_key_part) if isinstance(value, dict) else value)
|
718 |
+
return updated_dict
|
719 |
+
|
720 |
+
def extract_model(ckpt, sr, pitch_guidance, name, model_path, epoch, step, version, hps, model_author, vocoder):
|
721 |
+
try:
|
722 |
+
logger.info(translations["savemodel"].format(model_dir=model_path, epoch=epoch, step=step))
|
723 |
+
os.makedirs(os.path.dirname(model_path), exist_ok=True)
|
724 |
+
|
725 |
+
opt = OrderedDict(weight={key: value.half() for key, value in ckpt.items() if "enc_q" not in key})
|
726 |
+
opt["config"] = [hps.data.filter_length // 2 + 1, 32, hps.model.inter_channels, hps.model.hidden_channels, hps.model.filter_channels, hps.model.n_heads, hps.model.n_layers, hps.model.kernel_size, hps.model.p_dropout, hps.model.resblock, hps.model.resblock_kernel_sizes, hps.model.resblock_dilation_sizes, hps.model.upsample_rates, hps.model.upsample_initial_channel, hps.model.upsample_kernel_sizes, hps.model.spk_embed_dim, hps.model.gin_channels, hps.data.sample_rate]
|
727 |
+
opt["epoch"] = f"{epoch}epoch"
|
728 |
+
opt["step"] = step
|
729 |
+
opt["sr"] = sr
|
730 |
+
opt["f0"] = int(pitch_guidance)
|
731 |
+
opt["version"] = version
|
732 |
+
opt["creation_date"] = datetime.datetime.now().isoformat()
|
733 |
+
opt["model_hash"] = hashlib.sha256(f"{str(ckpt)} {epoch} {step} {datetime.datetime.now().isoformat()}".encode()).hexdigest()
|
734 |
+
opt["model_name"] = name
|
735 |
+
opt["author"] = model_author
|
736 |
+
opt["vocoder"] = vocoder
|
737 |
+
|
738 |
+
torch.save(replace_keys_in_dict(replace_keys_in_dict(opt, ".parametrizations.weight.original1", ".weight_v"), ".parametrizations.weight.original0", ".weight_g"), model_path)
|
739 |
+
except Exception as e:
|
740 |
+
logger.error(f"{translations['extract_model_error']}: {e}")
|
741 |
+
|
742 |
+
def run(rank, n_gpus, experiment_dir, pretrainG, pretrainD, pitch_guidance, custom_total_epoch, custom_save_every_weights, config, device, model_author, vocoder, checkpointing):
|
743 |
+
global global_step
|
744 |
+
|
745 |
+
if rank == 0: writer_eval = SummaryWriter(log_dir=os.path.join(experiment_dir, "eval"))
|
746 |
+
else: writer_eval = None
|
747 |
+
|
748 |
+
dist.init_process_group(backend="gloo", init_method="env://", world_size=n_gpus, rank=rank)
|
749 |
+
torch.manual_seed(config.train.seed)
|
750 |
+
if torch.cuda.is_available(): torch.cuda.set_device(rank)
|
751 |
+
|
752 |
+
train_dataset = TextAudioLoaderMultiNSFsid(config.data)
|
753 |
+
train_loader = tdata.DataLoader(train_dataset, num_workers=4, shuffle=False, pin_memory=True, collate_fn=TextAudioCollateMultiNSFsid(), batch_sampler=DistributedBucketSampler(train_dataset, batch_size * n_gpus, [100, 200, 300, 400, 500, 600, 700, 800, 900], num_replicas=n_gpus, rank=rank, shuffle=True), persistent_workers=True, prefetch_factor=8)
|
754 |
+
|
755 |
+
net_g, net_d = Synthesizer(config.data.filter_length // 2 + 1, config.train.segment_size // config.data.hop_length, **config.model, use_f0=pitch_guidance, sr=sample_rate, vocoder=vocoder, checkpointing=checkpointing), MultiPeriodDiscriminator(version, config.model.use_spectral_norm, checkpointing=checkpointing)
|
756 |
+
|
757 |
+
if torch.cuda.is_available(): net_g, net_d = net_g.cuda(rank), net_d.cuda(rank)
|
758 |
+
else: net_g, net_d = net_g.to(device), net_d.to(device)
|
759 |
+
optim_g, optim_d = torch.optim.AdamW(net_g.parameters(), config.train.learning_rate, betas=config.train.betas, eps=config.train.eps), torch.optim.AdamW(net_d.parameters(), config.train.learning_rate, betas=config.train.betas, eps=config.train.eps)
|
760 |
+
net_g, net_d = (DDP(net_g, device_ids=[rank]), DDP(net_d, device_ids=[rank])) if torch.cuda.is_available() else (DDP(net_g), DDP(net_d))
|
761 |
+
|
762 |
+
try:
|
763 |
+
logger.info(translations["start_training"])
|
764 |
+
_, _, _, epoch_str = load_checkpoint(latest_checkpoint_path(experiment_dir, "D_*.pth"), net_d, optim_d)
|
765 |
+
_, _, _, epoch_str = load_checkpoint(latest_checkpoint_path(experiment_dir, "G_*.pth"), net_g, optim_g)
|
766 |
+
epoch_str += 1
|
767 |
+
global_step = (epoch_str - 1) * len(train_loader)
|
768 |
+
except:
|
769 |
+
epoch_str, global_step = 1, 0
|
770 |
+
|
771 |
+
if pretrainG != "" and pretrainG != "None":
|
772 |
+
if rank == 0:
|
773 |
+
verify_checkpoint_shapes(pretrainG, net_g)
|
774 |
+
logger.info(translations["import_pretrain"].format(dg="G", pretrain=pretrainG))
|
775 |
+
|
776 |
+
if hasattr(net_g, "module"): net_g.module.load_state_dict(torch.load(pretrainG, map_location="cpu")["model"])
|
777 |
+
else: net_g.load_state_dict(torch.load(pretrainG, map_location="cpu")["model"])
|
778 |
+
else: logger.warning(translations["not_using_pretrain"].format(dg="G"))
|
779 |
+
|
780 |
+
if pretrainD != "" and pretrainD != "None":
|
781 |
+
if rank == 0:
|
782 |
+
verify_checkpoint_shapes(pretrainD, net_d)
|
783 |
+
logger.info(translations["import_pretrain"].format(dg="D", pretrain=pretrainD))
|
784 |
+
|
785 |
+
if hasattr(net_d, "module"): net_d.module.load_state_dict(torch.load(pretrainD, map_location="cpu")["model"])
|
786 |
+
else: net_d.load_state_dict(torch.load(pretrainD, map_location="cpu")["model"])
|
787 |
+
else: logger.warning(translations["not_using_pretrain"].format(dg="D"))
|
788 |
+
|
789 |
+
scheduler_g, scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=config.train.lr_decay, last_epoch=epoch_str - 2), torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=config.train.lr_decay, last_epoch=epoch_str - 2)
|
790 |
+
|
791 |
+
optim_d.step()
|
792 |
+
optim_g.step()
|
793 |
+
|
794 |
+
scaler = GradScaler(enabled=False)
|
795 |
+
cache = []
|
796 |
+
|
797 |
+
for info in train_loader:
|
798 |
+
phone, phone_lengths, pitch, pitchf, _, _, _, _, sid = info
|
799 |
+
reference = (phone.cuda(rank, non_blocking=True), phone_lengths.cuda(rank, non_blocking=True), (pitch.cuda(rank, non_blocking=True) if pitch_guidance else None), (pitchf.cuda(rank, non_blocking=True) if pitch_guidance else None), sid.cuda(rank, non_blocking=True)) if device.type == "cuda" else (phone.to(device), phone_lengths.to(device), (pitch.to(device) if pitch_guidance else None), (pitchf.to(device) if pitch_guidance else None), sid.to(device))
|
800 |
+
break
|
801 |
+
|
802 |
+
for epoch in range(epoch_str, total_epoch + 1):
|
803 |
+
train_and_evaluate(rank, epoch, config, [net_g, net_d], [optim_g, optim_d], scaler, train_loader, writer_eval, cache, custom_save_every_weights, custom_total_epoch, device, reference, model_author, vocoder)
|
804 |
+
scheduler_g.step()
|
805 |
+
scheduler_d.step()
|
806 |
+
|
807 |
+
def train_and_evaluate(rank, epoch, hps, nets, optims, scaler, train_loader, writer, cache, custom_save_every_weights, custom_total_epoch, device, reference, model_author, vocoder):
|
808 |
+
global global_step, lowest_value, loss_disc, consecutive_increases_gen, consecutive_increases_disc
|
809 |
+
|
810 |
+
if epoch == 1:
|
811 |
+
lowest_value = {"step": 0, "value": float("inf"), "epoch": 0}
|
812 |
+
last_loss_gen_all, consecutive_increases_gen, consecutive_increases_disc = 0.0, 0, 0
|
813 |
+
|
814 |
+
net_g, net_d = nets
|
815 |
+
optim_g, optim_d = optims
|
816 |
+
train_loader.batch_sampler.set_epoch(epoch)
|
817 |
+
|
818 |
+
net_g.train()
|
819 |
+
net_d.train()
|
820 |
+
|
821 |
+
if device.type == "cuda" and cache_data_in_gpu:
|
822 |
+
data_iterator = cache
|
823 |
+
if cache == []:
|
824 |
+
for batch_idx, info in enumerate(train_loader):
|
825 |
+
cache.append((batch_idx, [tensor.cuda(rank, non_blocking=True) for tensor in info]))
|
826 |
+
else: shuffle(cache)
|
827 |
+
else: data_iterator = enumerate(train_loader)
|
828 |
+
|
829 |
+
with tqdm(total=len(train_loader), leave=False) as pbar:
|
830 |
+
for batch_idx, info in data_iterator:
|
831 |
+
if device.type == "cuda" and not cache_data_in_gpu: info = [tensor.cuda(rank, non_blocking=True) for tensor in info]
|
832 |
+
elif device.type != "cuda": info = [tensor.to(device) for tensor in info]
|
833 |
+
|
834 |
+
phone, phone_lengths, pitch, pitchf, spec, spec_lengths, wave, _, sid = info
|
835 |
+
pitch = pitch if pitch_guidance else None
|
836 |
+
pitchf = pitchf if pitch_guidance else None
|
837 |
+
|
838 |
+
with autocast(enabled=False):
|
839 |
+
model_output = net_g(phone, phone_lengths, pitch, pitchf, spec, spec_lengths, sid)
|
840 |
+
y_hat, ids_slice, _, z_mask, (_, z_p, m_p, logs_p, _, logs_q) = model_output
|
841 |
+
|
842 |
+
mel = spec_to_mel_torch(spec, config.data.filter_length, config.data.n_mel_channels, config.data.sample_rate, config.data.mel_fmin, config.data.mel_fmax)
|
843 |
+
y_mel = slice_segments(mel, ids_slice, config.train.segment_size // config.data.hop_length, dim=3)
|
844 |
+
|
845 |
+
with autocast(enabled=False):
|
846 |
+
y_hat_mel = mel_spectrogram_torch(y_hat.float().squeeze(1), config.data.filter_length, config.data.n_mel_channels, config.data.sample_rate, config.data.hop_length, config.data.win_length, config.data.mel_fmin, config.data.mel_fmax)
|
847 |
+
|
848 |
+
wave = slice_segments(wave, ids_slice * config.data.hop_length, config.train.segment_size, dim=3)
|
849 |
+
y_d_hat_r, y_d_hat_g, _, _ = net_d(wave, y_hat.detach())
|
850 |
+
|
851 |
+
with autocast(enabled=False):
|
852 |
+
loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g)
|
853 |
+
|
854 |
+
optim_d.zero_grad()
|
855 |
+
scaler.scale(loss_disc).backward()
|
856 |
+
scaler.unscale_(optim_d)
|
857 |
+
grad_norm_d = clip_grad_value(net_d.parameters(), None)
|
858 |
+
scaler.step(optim_d)
|
859 |
+
|
860 |
+
with autocast(enabled=False):
|
861 |
+
y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(wave, y_hat)
|
862 |
+
with autocast(enabled=False):
|
863 |
+
loss_mel = F.l1_loss(y_mel, y_hat_mel) * config.train.c_mel
|
864 |
+
loss_kl = (kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * config.train.c_kl)
|
865 |
+
loss_fm = feature_loss(fmap_r, fmap_g)
|
866 |
+
loss_gen, losses_gen = generator_loss(y_d_hat_g)
|
867 |
+
loss_gen_all = loss_gen + loss_fm + loss_mel + loss_kl
|
868 |
+
|
869 |
+
if loss_gen_all < lowest_value["value"]:
|
870 |
+
lowest_value["value"] = loss_gen_all
|
871 |
+
lowest_value["step"] = global_step
|
872 |
+
lowest_value["epoch"] = epoch
|
873 |
+
if epoch > lowest_value["epoch"]: logger.warning(translations["training_warning"])
|
874 |
+
|
875 |
+
optim_g.zero_grad()
|
876 |
+
scaler.scale(loss_gen_all).backward()
|
877 |
+
scaler.unscale_(optim_g)
|
878 |
+
grad_norm_g = clip_grad_value(net_g.parameters(), None)
|
879 |
+
scaler.step(optim_g)
|
880 |
+
scaler.update()
|
881 |
+
|
882 |
+
if rank == 0:
|
883 |
+
if global_step % config.train.log_interval == 0:
|
884 |
+
if loss_mel > 75: loss_mel = 75
|
885 |
+
if loss_kl > 9: loss_kl = 9
|
886 |
+
|
887 |
+
scalar_dict = {"loss/g/total": loss_gen_all, "loss/d/total": loss_disc, "learning_rate": optim_g.param_groups[0]["lr"], "grad/norm_d": grad_norm_d, "grad/norm_g": grad_norm_g, "loss/g/fm": loss_fm, "loss/g/mel": loss_mel, "loss/g/kl": loss_kl}
|
888 |
+
scalar_dict.update({f"loss/g/{i}": v for i, v in enumerate(losses_gen)})
|
889 |
+
scalar_dict.update({f"loss/d_r/{i}": v for i, v in enumerate(losses_disc_r)})
|
890 |
+
scalar_dict.update({f"loss/d_g/{i}": v for i, v in enumerate(losses_disc_g)})
|
891 |
+
|
892 |
+
with torch.no_grad():
|
893 |
+
o, *_ = net_g.module.infer(*reference) if hasattr(net_g, "module") else net_g.infer(*reference)
|
894 |
+
|
895 |
+
summarize(writer=writer, global_step=global_step, images={"slice/mel_org": plot_spectrogram_to_numpy(y_mel[0].data.cpu().numpy()), "slice/mel_gen": plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()), "all/mel": plot_spectrogram_to_numpy(mel[0].data.cpu().numpy())}, scalars=scalar_dict, audios={f"gen/audio_{global_step:07d}": o[0, :, :]}, audio_sample_rate=config.data.sample_rate)
|
896 |
+
|
897 |
+
global_step += 1
|
898 |
+
pbar.update(1)
|
899 |
+
|
900 |
+
def check_overtraining(smoothed_loss_history, threshold, epsilon=0.004):
|
901 |
+
if len(smoothed_loss_history) < threshold + 1: return False
|
902 |
+
|
903 |
+
for i in range(-threshold, -1):
|
904 |
+
if smoothed_loss_history[i + 1] > smoothed_loss_history[i]: return True
|
905 |
+
if abs(smoothed_loss_history[i + 1] - smoothed_loss_history[i]) >= epsilon: return False
|
906 |
+
|
907 |
+
return True
|
908 |
+
|
909 |
+
def update_exponential_moving_average(smoothed_loss_history, new_value, smoothing=0.987):
|
910 |
+
smoothed_value = new_value if not smoothed_loss_history else (smoothing * smoothed_loss_history[-1] + (1 - smoothing) * new_value)
|
911 |
+
smoothed_loss_history.append(smoothed_value)
|
912 |
+
return smoothed_value
|
913 |
+
|
914 |
+
def save_to_json(file_path, loss_disc_history, smoothed_loss_disc_history, loss_gen_history, smoothed_loss_gen_history):
|
915 |
+
with open(file_path, "w") as f:
|
916 |
+
json.dump({"loss_disc_history": loss_disc_history, "smoothed_loss_disc_history": smoothed_loss_disc_history, "loss_gen_history": loss_gen_history, "smoothed_loss_gen_history": smoothed_loss_gen_history}, f)
|
917 |
+
|
918 |
+
model_add, model_del = [], []
|
919 |
+
done = False
|
920 |
+
|
921 |
+
if rank == 0:
|
922 |
+
if epoch % save_every_epoch == False:
|
923 |
+
checkpoint_suffix = f"{'latest' if save_only_latest else global_step}.pth"
|
924 |
+
save_checkpoint(net_g, optim_g, config.train.learning_rate, epoch, os.path.join(experiment_dir, "G_" + checkpoint_suffix))
|
925 |
+
save_checkpoint(net_d, optim_d, config.train.learning_rate, epoch, os.path.join(experiment_dir, "D_" + checkpoint_suffix))
|
926 |
+
if custom_save_every_weights: model_add.append(os.path.join("assets", "weights", f"{model_name}_{epoch}e_{global_step}s.pth"))
|
927 |
+
|
928 |
+
if overtraining_detector and epoch > 1:
|
929 |
+
current_loss_disc = float(loss_disc)
|
930 |
+
loss_disc_history.append(current_loss_disc)
|
931 |
+
smoothed_value_disc = update_exponential_moving_average(smoothed_loss_disc_history, current_loss_disc)
|
932 |
+
is_overtraining_disc = check_overtraining(smoothed_loss_disc_history, overtraining_threshold * 2)
|
933 |
+
|
934 |
+
if is_overtraining_disc: consecutive_increases_disc += 1
|
935 |
+
else: consecutive_increases_disc = 0
|
936 |
+
|
937 |
+
current_loss_gen = float(lowest_value["value"])
|
938 |
+
loss_gen_history.append(current_loss_gen)
|
939 |
+
smoothed_value_gen = update_exponential_moving_average(smoothed_loss_gen_history, current_loss_gen)
|
940 |
+
is_overtraining_gen = check_overtraining(smoothed_loss_gen_history, overtraining_threshold, 0.01)
|
941 |
+
|
942 |
+
if is_overtraining_gen: consecutive_increases_gen += 1
|
943 |
+
else: consecutive_increases_gen = 0
|
944 |
+
|
945 |
+
if epoch % save_every_epoch == 0: save_to_json(training_file_path, loss_disc_history, smoothed_loss_disc_history, loss_gen_history, smoothed_loss_gen_history)
|
946 |
+
|
947 |
+
if (is_overtraining_gen and consecutive_increases_gen == overtraining_threshold or is_overtraining_disc and consecutive_increases_disc == (overtraining_threshold * 2)):
|
948 |
+
logger.info(translations["overtraining_find"].format(epoch=epoch, smoothed_value_gen=f"{smoothed_value_gen:.3f}", smoothed_value_disc=f"{smoothed_value_disc:.3f}"))
|
949 |
+
done = True
|
950 |
+
else:
|
951 |
+
logger.info(translations["best_epoch"].format(epoch=epoch, smoothed_value_gen=f"{smoothed_value_gen:.3f}", smoothed_value_disc=f"{smoothed_value_disc:.3f}"))
|
952 |
+
for file in glob.glob(os.path.join("assets", "weights", f"{model_name}_*e_*s_best_epoch.pth")):
|
953 |
+
model_del.append(file)
|
954 |
+
|
955 |
+
model_add.append(os.path.join("assets", "weights", f"{model_name}_{epoch}e_{global_step}s_best_epoch.pth"))
|
956 |
+
|
957 |
+
if epoch >= custom_total_epoch:
|
958 |
+
logger.info(translations["success_training"].format(epoch=epoch, global_step=global_step, loss_gen_all=round(loss_gen_all.item(), 3)))
|
959 |
+
logger.info(translations["training_info"].format(lowest_value_rounded=round(float(lowest_value["value"]), 3), lowest_value_epoch=lowest_value['epoch'], lowest_value_step=lowest_value['step']))
|
960 |
+
|
961 |
+
pid_file_path = os.path.join(experiment_dir, "config.json")
|
962 |
+
|
963 |
+
with open(pid_file_path, "r") as pid_file:
|
964 |
+
pid_data = json.load(pid_file)
|
965 |
+
|
966 |
+
with open(pid_file_path, "w") as pid_file:
|
967 |
+
pid_data.pop("process_pids", None)
|
968 |
+
json.dump(pid_data, pid_file, indent=4)
|
969 |
+
|
970 |
+
model_add.append(os.path.join("assets", "weights", f"{model_name}_{epoch}e_{global_step}s.pth"))
|
971 |
+
done = True
|
972 |
+
|
973 |
+
for m in model_del:
|
974 |
+
os.remove(m)
|
975 |
+
|
976 |
+
if model_add:
|
977 |
+
ckpt = (net_g.module.state_dict() if hasattr(net_g, "module") else net_g.state_dict())
|
978 |
+
|
979 |
+
for m in model_add:
|
980 |
+
extract_model(ckpt=ckpt, sr=sample_rate, pitch_guidance=pitch_guidance == True, name=model_name, model_path=m, epoch=epoch, step=global_step, version=version, hps=hps, model_author=model_author, vocoder=vocoder)
|
981 |
+
|
982 |
+
lowest_value_rounded = round(float(lowest_value["value"]), 3)
|
983 |
+
epoch_recorder = EpochRecorder()
|
984 |
+
|
985 |
+
if epoch > 1 and overtraining_detector: logger.info(translations["model_training_info"].format(model_name=model_name, epoch=epoch, global_step=global_step, epoch_recorder=epoch_recorder.record(), lowest_value_rounded=lowest_value_rounded, lowest_value_epoch=lowest_value['epoch'], lowest_value_step=lowest_value['step'], remaining_epochs_gen=(overtraining_threshold - consecutive_increases_gen), remaining_epochs_disc=((overtraining_threshold * 2) - consecutive_increases_disc), smoothed_value_gen=f"{smoothed_value_gen:.3f}", smoothed_value_disc=f"{smoothed_value_disc:.3f}"))
|
986 |
+
elif epoch > 1 and overtraining_detector == False: logger.info(translations["model_training_info_2"].format(model_name=model_name, epoch=epoch, global_step=global_step, epoch_recorder=epoch_recorder.record(), lowest_value_rounded=lowest_value_rounded, lowest_value_epoch=lowest_value['epoch'], lowest_value_step=lowest_value['step']))
|
987 |
+
else: logger.info(translations["model_training_info_3"].format(model_name=model_name, epoch=epoch, global_step=global_step, epoch_recorder=epoch_recorder.record()))
|
988 |
+
|
989 |
+
last_loss_gen_all = loss_gen_all
|
990 |
+
if done: os._exit(0)
|
991 |
+
|
992 |
+
if __name__ == "__main__":
|
993 |
+
torch.multiprocessing.set_start_method("spawn")
|
994 |
+
try:
|
995 |
+
main()
|
996 |
+
except Exception as e:
|
997 |
+
logger.error(f"{translations['training_error']} {e}")
|
998 |
+
|
999 |
+
import traceback
|
1000 |
+
logger.debug(traceback.format_exc())
|
main/library/algorithm/commons.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
|
3 |
+
|
4 |
+
def init_weights(m, mean=0.0, std=0.01):
|
5 |
+
if m.__class__.__name__.find("Conv") != -1: m.weight.data.normal_(mean, std)
|
6 |
+
|
7 |
+
def get_padding(kernel_size, dilation=1):
|
8 |
+
return int((kernel_size * dilation - dilation) / 2)
|
9 |
+
|
10 |
+
def convert_pad_shape(pad_shape):
|
11 |
+
return [item for sublist in pad_shape[::-1] for item in sublist]
|
12 |
+
|
13 |
+
def slice_segments(x, ids_str, segment_size = 4, dim = 2):
|
14 |
+
if dim == 2: ret = torch.zeros_like(x[:, :segment_size])
|
15 |
+
elif dim == 3: ret = torch.zeros_like(x[:, :, :segment_size])
|
16 |
+
for i in range(x.size(0)):
|
17 |
+
idx_str = ids_str[i].item()
|
18 |
+
idx_end = idx_str + segment_size
|
19 |
+
if dim == 2: ret[i] = x[i, idx_str:idx_end]
|
20 |
+
else: ret[i] = x[i, :, idx_str:idx_end]
|
21 |
+
return ret
|
22 |
+
|
23 |
+
def rand_slice_segments(x, x_lengths=None, segment_size=4):
|
24 |
+
b, _, t = x.size()
|
25 |
+
if x_lengths is None: x_lengths = t
|
26 |
+
ids_str = (torch.rand([b]).to(device=x.device) * (x_lengths - segment_size + 1)).to(dtype=torch.long)
|
27 |
+
return slice_segments(x, ids_str, segment_size, dim=3), ids_str
|
28 |
+
|
29 |
+
@torch.jit.script
|
30 |
+
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
|
31 |
+
n_channels_int = n_channels[0]
|
32 |
+
in_act = input_a + input_b
|
33 |
+
return torch.tanh(in_act[:, :n_channels_int, :]) * torch.sigmoid(in_act[:, n_channels_int:, :])
|
34 |
+
|
35 |
+
def convert_pad_shape(pad_shape):
|
36 |
+
return torch.tensor(pad_shape).flip(0).reshape(-1).int().tolist()
|
37 |
+
|
38 |
+
def sequence_mask(length, max_length = None):
|
39 |
+
if max_length is None: max_length = length.max()
|
40 |
+
return torch.arange(max_length, dtype=length.dtype, device=length.device).unsqueeze(0) < length.unsqueeze(1)
|
41 |
+
|
42 |
+
def clip_grad_value(parameters, clip_value, norm_type=2):
|
43 |
+
if isinstance(parameters, torch.Tensor): parameters = [parameters]
|
44 |
+
norm_type = float(norm_type)
|
45 |
+
if clip_value is not None: clip_value = float(clip_value)
|
46 |
+
total_norm = 0
|
47 |
+
for p in list(filter(lambda p: p.grad is not None, parameters)):
|
48 |
+
total_norm += (p.grad.data.norm(norm_type)).item() ** norm_type
|
49 |
+
if clip_value is not None: p.grad.data.clamp_(min=-clip_value, max=clip_value)
|
50 |
+
return total_norm ** (1.0 / norm_type)
|
main/library/algorithm/modules.py
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
import torch
|
4 |
+
|
5 |
+
sys.path.append(os.getcwd())
|
6 |
+
|
7 |
+
from .commons import fused_add_tanh_sigmoid_multiply
|
8 |
+
|
9 |
+
|
10 |
+
class WaveNet(torch.nn.Module):
|
11 |
+
def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
|
12 |
+
super(WaveNet, self).__init__()
|
13 |
+
assert kernel_size % 2 == 1
|
14 |
+
self.hidden_channels = hidden_channels
|
15 |
+
self.kernel_size = (kernel_size,)
|
16 |
+
self.dilation_rate = dilation_rate
|
17 |
+
self.n_layers = n_layers
|
18 |
+
self.gin_channels = gin_channels
|
19 |
+
self.p_dropout = p_dropout
|
20 |
+
self.in_layers = torch.nn.ModuleList()
|
21 |
+
self.res_skip_layers = torch.nn.ModuleList()
|
22 |
+
self.drop = torch.nn.Dropout(p_dropout)
|
23 |
+
|
24 |
+
if gin_channels != 0: self.cond_layer = torch.nn.utils.parametrizations.weight_norm(torch.nn.Conv1d(gin_channels, 2 * hidden_channels * n_layers, 1), name="weight")
|
25 |
+
|
26 |
+
dilations = [dilation_rate**i for i in range(n_layers)]
|
27 |
+
paddings = [(kernel_size * d - d) // 2 for d in dilations]
|
28 |
+
|
29 |
+
for i in range(n_layers):
|
30 |
+
in_layer = torch.nn.Conv1d(hidden_channels, 2 * hidden_channels, kernel_size, dilation=dilations[i], padding=paddings[i])
|
31 |
+
in_layer = torch.nn.utils.parametrizations.weight_norm(in_layer, name="weight")
|
32 |
+
self.in_layers.append(in_layer)
|
33 |
+
|
34 |
+
res_skip_channels = (hidden_channels if i == n_layers - 1 else 2 * hidden_channels)
|
35 |
+
res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
|
36 |
+
|
37 |
+
res_skip_layer = torch.nn.utils.parametrizations.weight_norm(res_skip_layer, name="weight")
|
38 |
+
self.res_skip_layers.append(res_skip_layer)
|
39 |
+
|
40 |
+
def forward(self, x, x_mask, g=None, **kwargs):
|
41 |
+
output = torch.zeros_like(x)
|
42 |
+
n_channels_tensor = torch.IntTensor([self.hidden_channels])
|
43 |
+
|
44 |
+
if g is not None: g = self.cond_layer(g)
|
45 |
+
|
46 |
+
for i in range(self.n_layers):
|
47 |
+
x_in = self.in_layers[i](x)
|
48 |
+
|
49 |
+
if g is not None:
|
50 |
+
cond_offset = i * 2 * self.hidden_channels
|
51 |
+
g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :]
|
52 |
+
else: g_l = torch.zeros_like(x_in)
|
53 |
+
|
54 |
+
res_skip_acts = self.res_skip_layers[i](self.drop(fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor)))
|
55 |
+
|
56 |
+
if i < self.n_layers - 1:
|
57 |
+
x = (x + (res_skip_acts[:, : self.hidden_channels, :])) * x_mask
|
58 |
+
output = output + res_skip_acts[:, self.hidden_channels :, :]
|
59 |
+
else: output = output + res_skip_acts
|
60 |
+
|
61 |
+
return output * x_mask
|
62 |
+
|
63 |
+
def remove_weight_norm(self):
|
64 |
+
if self.gin_channels != 0: torch.nn.utils.remove_weight_norm(self.cond_layer)
|
65 |
+
|
66 |
+
for l in self.in_layers:
|
67 |
+
torch.nn.utils.remove_weight_norm(l)
|
68 |
+
|
69 |
+
for l in self.res_skip_layers:
|
70 |
+
torch.nn.utils.remove_weight_norm(l)
|
main/library/algorithm/mrf_hifigan.py
ADDED
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
import torch
|
3 |
+
|
4 |
+
import numpy as np
|
5 |
+
import torch.nn.functional as F
|
6 |
+
import torch.utils.checkpoint as checkpoint
|
7 |
+
|
8 |
+
from torch.nn.utils import remove_weight_norm
|
9 |
+
from torch.nn.utils.parametrizations import weight_norm
|
10 |
+
|
11 |
+
|
12 |
+
LRELU_SLOPE = 0.1
|
13 |
+
|
14 |
+
class MRFLayer(torch.nn.Module):
|
15 |
+
def __init__(self, channels, kernel_size, dilation):
|
16 |
+
super().__init__()
|
17 |
+
self.conv1 = weight_norm(torch.nn.Conv1d(channels, channels, kernel_size, padding=(kernel_size * dilation - dilation) // 2, dilation=dilation))
|
18 |
+
self.conv2 = weight_norm(torch.nn.Conv1d(channels, channels, kernel_size, padding=kernel_size // 2, dilation=1))
|
19 |
+
|
20 |
+
def forward(self, x):
|
21 |
+
return x + self.conv2(F.leaky_relu(self.conv1(F.leaky_relu(x, LRELU_SLOPE)), LRELU_SLOPE))
|
22 |
+
|
23 |
+
def remove_weight_norm(self):
|
24 |
+
remove_weight_norm(self.conv1)
|
25 |
+
remove_weight_norm(self.conv2)
|
26 |
+
|
27 |
+
class MRFBlock(torch.nn.Module):
|
28 |
+
def __init__(self, channels, kernel_size, dilations):
|
29 |
+
super().__init__()
|
30 |
+
self.layers = torch.nn.ModuleList()
|
31 |
+
|
32 |
+
for dilation in dilations:
|
33 |
+
self.layers.append(MRFLayer(channels, kernel_size, dilation))
|
34 |
+
|
35 |
+
def forward(self, x):
|
36 |
+
for layer in self.layers:
|
37 |
+
x = layer(x)
|
38 |
+
|
39 |
+
return x
|
40 |
+
|
41 |
+
def remove_weight_norm(self):
|
42 |
+
for layer in self.layers:
|
43 |
+
layer.remove_weight_norm()
|
44 |
+
|
45 |
+
class SineGenerator(torch.nn.Module):
|
46 |
+
def __init__(self, samp_rate, harmonic_num=0, sine_amp=0.1, noise_std=0.003, voiced_threshold=0):
|
47 |
+
super(SineGenerator, self).__init__()
|
48 |
+
self.sine_amp = sine_amp
|
49 |
+
self.noise_std = noise_std
|
50 |
+
self.harmonic_num = harmonic_num
|
51 |
+
self.dim = self.harmonic_num + 1
|
52 |
+
self.sampling_rate = samp_rate
|
53 |
+
self.voiced_threshold = voiced_threshold
|
54 |
+
|
55 |
+
def _f02uv(self, f0):
|
56 |
+
return torch.ones_like(f0) * (f0 > self.voiced_threshold)
|
57 |
+
|
58 |
+
def _f02sine(self, f0_values):
|
59 |
+
rad_values = (f0_values / self.sampling_rate) % 1
|
60 |
+
rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2], device=f0_values.device)
|
61 |
+
|
62 |
+
rand_ini[:, 0] = 0
|
63 |
+
rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
|
64 |
+
|
65 |
+
tmp_over_one = torch.cumsum(rad_values, 1) % 1
|
66 |
+
tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
|
67 |
+
|
68 |
+
cumsum_shift = torch.zeros_like(rad_values)
|
69 |
+
cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
|
70 |
+
|
71 |
+
return torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi)
|
72 |
+
|
73 |
+
def forward(self, f0):
|
74 |
+
with torch.no_grad():
|
75 |
+
f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
|
76 |
+
f0_buf[:, :, 0] = f0[:, :, 0]
|
77 |
+
|
78 |
+
for idx in np.arange(self.harmonic_num):
|
79 |
+
f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (idx + 2)
|
80 |
+
|
81 |
+
sine_waves = self._f02sine(f0_buf) * self.sine_amp
|
82 |
+
uv = self._f02uv(f0)
|
83 |
+
|
84 |
+
sine_waves = sine_waves * uv + ((uv * self.noise_std + (1 - uv) * self.sine_amp / 3) * torch.randn_like(sine_waves))
|
85 |
+
|
86 |
+
return sine_waves
|
87 |
+
|
88 |
+
class SourceModuleHnNSF(torch.nn.Module):
|
89 |
+
def __init__(self, sampling_rate, harmonic_num=0, sine_amp=0.1, add_noise_std=0.003, voiced_threshold=0):
|
90 |
+
super(SourceModuleHnNSF, self).__init__()
|
91 |
+
self.sine_amp = sine_amp
|
92 |
+
self.noise_std = add_noise_std
|
93 |
+
|
94 |
+
self.l_sin_gen = SineGenerator(sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshold)
|
95 |
+
self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
|
96 |
+
self.l_tanh = torch.nn.Tanh()
|
97 |
+
|
98 |
+
def forward(self, x):
|
99 |
+
return self.l_tanh(self.l_linear(self.l_sin_gen(x).to(dtype=self.l_linear.weight.dtype)))
|
100 |
+
|
101 |
+
class HiFiGANMRFGenerator(torch.nn.Module):
|
102 |
+
def __init__(self, in_channel, upsample_initial_channel, upsample_rates, upsample_kernel_sizes, resblock_kernel_sizes, resblock_dilations, gin_channels, sample_rate, harmonic_num, checkpointing=False):
|
103 |
+
super().__init__()
|
104 |
+
self.num_kernels = len(resblock_kernel_sizes)
|
105 |
+
|
106 |
+
self.f0_upsample = torch.nn.Upsample(scale_factor=np.prod(upsample_rates))
|
107 |
+
self.m_source = SourceModuleHnNSF(sample_rate, harmonic_num)
|
108 |
+
|
109 |
+
self.conv_pre = weight_norm(torch.nn.Conv1d(in_channel, upsample_initial_channel, kernel_size=7, stride=1, padding=3))
|
110 |
+
self.checkpointing = checkpointing
|
111 |
+
|
112 |
+
self.upsamples = torch.nn.ModuleList()
|
113 |
+
self.noise_convs = torch.nn.ModuleList()
|
114 |
+
|
115 |
+
stride_f0s = [math.prod(upsample_rates[i + 1 :]) if i + 1 < len(upsample_rates) else 1 for i in range(len(upsample_rates))]
|
116 |
+
|
117 |
+
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
|
118 |
+
self.upsamples.append(weight_norm(torch.nn.ConvTranspose1d(upsample_initial_channel // (2**i), upsample_initial_channel // (2 ** (i + 1)), kernel_size=k, stride=u, padding=(((k - u) // 2) if u % 2 == 0 else (u // 2 + u % 2)), output_padding=u % 2)))
|
119 |
+
stride = stride_f0s[i]
|
120 |
+
|
121 |
+
kernel = 1 if stride == 1 else stride * 2 - stride % 2
|
122 |
+
self.noise_convs.append(torch.nn.Conv1d(1, upsample_initial_channel // (2 ** (i + 1)), kernel_size=kernel, stride=stride, padding=( 0 if stride == 1 else (kernel - stride) // 2)))
|
123 |
+
|
124 |
+
self.mrfs = torch.nn.ModuleList()
|
125 |
+
|
126 |
+
for i in range(len(self.upsamples)):
|
127 |
+
channel = upsample_initial_channel // (2 ** (i + 1))
|
128 |
+
self.mrfs.append(torch.nn.ModuleList([MRFBlock(channel, kernel_size=k, dilations=d) for k, d in zip(resblock_kernel_sizes, resblock_dilations)]))
|
129 |
+
|
130 |
+
self.conv_post = weight_norm(torch.nn.Conv1d(channel, 1, kernel_size=7, stride=1, padding=3))
|
131 |
+
if gin_channels != 0: self.cond = torch.nn.Conv1d(gin_channels, upsample_initial_channel, 1)
|
132 |
+
|
133 |
+
def forward(self, x, f0, g = None):
|
134 |
+
har_source = self.m_source(self.f0_upsample(f0[:, None, :]).transpose(-1, -2)).transpose(-1, -2)
|
135 |
+
|
136 |
+
x = self.conv_pre(x)
|
137 |
+
if g is not None: x = x + self.cond(g)
|
138 |
+
|
139 |
+
for ups, mrf, noise_conv in zip(self.upsamples, self.mrfs, self.noise_convs):
|
140 |
+
x = F.leaky_relu(x, LRELU_SLOPE)
|
141 |
+
x = checkpoint.checkpoint(ups, x, use_reentrant=False) if self.training and self.checkpointing else ups(x)
|
142 |
+
x += noise_conv(har_source)
|
143 |
+
|
144 |
+
def mrf_sum(x, layers):
|
145 |
+
return sum(layer(x) for layer in layers) / self.num_kernels
|
146 |
+
|
147 |
+
x = checkpoint.checkpoint(mrf_sum, x, mrf, use_reentrant=False) if self.training and self.checkpointing else mrf_sum(x, mrf)
|
148 |
+
|
149 |
+
return torch.tanh(self.conv_post(F.leaky_relu(x)))
|
150 |
+
|
151 |
+
def remove_weight_norm(self):
|
152 |
+
remove_weight_norm(self.conv_pre)
|
153 |
+
|
154 |
+
for up in self.upsamples:
|
155 |
+
remove_weight_norm(up)
|
156 |
+
|
157 |
+
for mrf in self.mrfs:
|
158 |
+
mrf.remove_weight_norm()
|
159 |
+
|
160 |
+
remove_weight_norm(self.conv_post)
|
main/library/algorithm/refinegan.py
ADDED
@@ -0,0 +1,180 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
import math
|
4 |
+
import torch
|
5 |
+
|
6 |
+
import numpy as np
|
7 |
+
import torch.nn.functional as F
|
8 |
+
import torch.utils.checkpoint as checkpoint
|
9 |
+
|
10 |
+
from torch.nn.utils.parametrizations import weight_norm
|
11 |
+
from torch.nn.utils.parametrize import remove_parametrizations
|
12 |
+
|
13 |
+
sys.path.append(os.getcwd())
|
14 |
+
|
15 |
+
from .commons import get_padding
|
16 |
+
|
17 |
+
class ResBlock(torch.nn.Module):
|
18 |
+
def __init__(self, *, in_channels, out_channels, kernel_size = 7, dilation = (1, 3, 5), leaky_relu_slope = 0.2):
|
19 |
+
super(ResBlock, self).__init__()
|
20 |
+
self.leaky_relu_slope = leaky_relu_slope
|
21 |
+
self.in_channels = in_channels
|
22 |
+
self.out_channels = out_channels
|
23 |
+
|
24 |
+
self.convs1 = torch.nn.ModuleList([weight_norm(torch.nn.Conv1d(in_channels=in_channels if idx == 0 else out_channels, out_channels=out_channels, kernel_size=kernel_size, stride=1, dilation=d, padding=get_padding(kernel_size, d))) for idx, d in enumerate(dilation)])
|
25 |
+
self.convs1.apply(self.init_weights)
|
26 |
+
|
27 |
+
self.convs2 = torch.nn.ModuleList([weight_norm(torch.nn.Conv1d(in_channels=out_channels, out_channels=out_channels, kernel_size=kernel_size, stride=1, dilation=d, padding=get_padding(kernel_size, d))) for _, d in enumerate(dilation)])
|
28 |
+
self.convs2.apply(self.init_weights)
|
29 |
+
|
30 |
+
def forward(self, x):
|
31 |
+
for idx, (c1, c2) in enumerate(zip(self.convs1, self.convs2)):
|
32 |
+
xt = c2(F.leaky_relu(c1(F.leaky_relu(x, self.leaky_relu_slope)), self.leaky_relu_slope))
|
33 |
+
x = (xt + x) if idx != 0 or self.in_channels == self.out_channels else xt
|
34 |
+
|
35 |
+
return x
|
36 |
+
|
37 |
+
def remove_parametrizations(self):
|
38 |
+
for c1, c2 in zip(self.convs1, self.convs2):
|
39 |
+
remove_parametrizations(c1)
|
40 |
+
remove_parametrizations(c2)
|
41 |
+
|
42 |
+
def init_weights(self, m):
|
43 |
+
if type(m) == torch.nn.Conv1d:
|
44 |
+
m.weight.data.normal_(0, 0.01)
|
45 |
+
m.bias.data.fill_(0.0)
|
46 |
+
|
47 |
+
class AdaIN(torch.nn.Module):
|
48 |
+
def __init__(self, *, channels, leaky_relu_slope = 0.2):
|
49 |
+
super().__init__()
|
50 |
+
self.weight = torch.nn.Parameter(torch.ones(channels))
|
51 |
+
self.activation = torch.nn.LeakyReLU(leaky_relu_slope)
|
52 |
+
|
53 |
+
def forward(self, x):
|
54 |
+
return self.activation(x + (torch.randn_like(x) * self.weight[None, :, None]))
|
55 |
+
|
56 |
+
class ParallelResBlock(torch.nn.Module):
|
57 |
+
def __init__(self, *, in_channels, out_channels, kernel_sizes = (3, 7, 11), dilation = (1, 3, 5), leaky_relu_slope = 0.2):
|
58 |
+
super().__init__()
|
59 |
+
self.in_channels = in_channels
|
60 |
+
self.out_channels = out_channels
|
61 |
+
self.input_conv = torch.nn.Conv1d(in_channels=in_channels, out_channels=out_channels, kernel_size=7, stride=1, padding=3)
|
62 |
+
self.blocks = torch.nn.ModuleList([torch.nn.Sequential(AdaIN(channels=out_channels), ResBlock(in_channels=out_channels, out_channels=out_channels, kernel_size=kernel_size, dilation=dilation, leaky_relu_slope=leaky_relu_slope), AdaIN(channels=out_channels)) for kernel_size in kernel_sizes])
|
63 |
+
|
64 |
+
def forward(self, x):
|
65 |
+
return torch.mean(torch.stack([block(self.input_conv(x)) for block in self.blocks]), dim=0)
|
66 |
+
|
67 |
+
def remove_parametrizations(self):
|
68 |
+
for block in self.blocks:
|
69 |
+
block[1].remove_parametrizations()
|
70 |
+
|
71 |
+
class SineGenerator(torch.nn.Module):
|
72 |
+
def __init__(self, samp_rate, harmonic_num=0, sine_amp=0.1, noise_std=0.003, voiced_threshold=0):
|
73 |
+
super(SineGenerator, self).__init__()
|
74 |
+
self.sine_amp = sine_amp
|
75 |
+
self.noise_std = noise_std
|
76 |
+
self.harmonic_num = harmonic_num
|
77 |
+
self.dim = self.harmonic_num + 1
|
78 |
+
self.sampling_rate = samp_rate
|
79 |
+
self.voiced_threshold = voiced_threshold
|
80 |
+
self.merge = torch.nn.Sequential(torch.nn.Linear(self.dim, 1, bias=False), torch.nn.Tanh())
|
81 |
+
|
82 |
+
def _f02uv(self, f0):
|
83 |
+
return torch.ones_like(f0) * (f0 > self.voiced_threshold)
|
84 |
+
|
85 |
+
def _f02sine(self, f0_values):
|
86 |
+
rad_values = (f0_values / self.sampling_rate) % 1
|
87 |
+
rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2], device=f0_values.device)
|
88 |
+
|
89 |
+
rand_ini[:, 0] = 0
|
90 |
+
rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
|
91 |
+
|
92 |
+
tmp_over_one = torch.cumsum(rad_values, 1) % 1
|
93 |
+
tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
|
94 |
+
|
95 |
+
cumsum_shift = torch.zeros_like(rad_values)
|
96 |
+
cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
|
97 |
+
|
98 |
+
return torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi)
|
99 |
+
|
100 |
+
def forward(self, f0):
|
101 |
+
with torch.no_grad():
|
102 |
+
f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
|
103 |
+
f0_buf[:, :, 0] = f0[:, :, 0]
|
104 |
+
|
105 |
+
for idx in np.arange(self.harmonic_num):
|
106 |
+
f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (idx + 2)
|
107 |
+
|
108 |
+
sine_waves = self._f02sine(f0_buf) * self.sine_amp
|
109 |
+
uv = self._f02uv(f0)
|
110 |
+
|
111 |
+
sine_waves = sine_waves * uv + (uv * self.noise_std + (1 - uv) * self.sine_amp / 3) * torch.randn_like(sine_waves)
|
112 |
+
sine_waves = sine_waves - sine_waves.mean(dim=1, keepdim=True)
|
113 |
+
|
114 |
+
return self.merge(sine_waves)
|
115 |
+
|
116 |
+
class RefineGANGenerator(torch.nn.Module):
|
117 |
+
def __init__(self, *, sample_rate = 44100, upsample_rates = (8, 8, 2, 2), leaky_relu_slope = 0.2, num_mels = 128, gin_channels = 256, checkpointing = False, upsample_initial_channel = 512):
|
118 |
+
super().__init__()
|
119 |
+
self.upsample_rates = upsample_rates
|
120 |
+
self.checkpointing = checkpointing
|
121 |
+
self.leaky_relu_slope = leaky_relu_slope
|
122 |
+
self.upp = np.prod(upsample_rates)
|
123 |
+
self.m_source = SineGenerator(sample_rate)
|
124 |
+
self.pre_conv = weight_norm(torch.nn.Conv1d(in_channels=1, out_channels=upsample_initial_channel // 2, kernel_size=7, stride=1, padding=3, bias=False))
|
125 |
+
channels = upsample_initial_channel
|
126 |
+
self.downsample_blocks = torch.nn.ModuleList([])
|
127 |
+
|
128 |
+
stride_f0s = [math.prod(upsample_rates[i + 1 :]) if i + 1 < len(upsample_rates) else 1 for i in range(len(upsample_rates))]
|
129 |
+
|
130 |
+
for i, _ in enumerate(upsample_rates):
|
131 |
+
stride = stride_f0s[i]
|
132 |
+
kernel = 1 if stride == 1 else stride * 2 - stride % 2
|
133 |
+
|
134 |
+
self.downsample_blocks.append(torch.nn.Conv1d(in_channels=1, out_channels=channels // 2 ** (i + 2), kernel_size=kernel, stride=stride, padding=0 if stride == 1 else (kernel - stride) // 2))
|
135 |
+
|
136 |
+
self.mel_conv = weight_norm(torch.nn.Conv1d(in_channels=num_mels, out_channels=channels // 2, kernel_size=7, stride=1, padding=3))
|
137 |
+
if gin_channels != 0: self.cond = torch.nn.Conv1d(256, channels // 2, 1)
|
138 |
+
|
139 |
+
self.upsample_blocks = torch.nn.ModuleList([])
|
140 |
+
self.upsample_conv_blocks = torch.nn.ModuleList([])
|
141 |
+
self.filters = torch.nn.ModuleList([])
|
142 |
+
|
143 |
+
for rate in upsample_rates:
|
144 |
+
new_channels = channels // 2
|
145 |
+
self.upsample_blocks.append(torch.nn.Upsample(scale_factor=rate, mode="linear"))
|
146 |
+
|
147 |
+
low_pass = torch.nn.Conv1d(channels, channels, kernel_size=15, padding=7, groups=channels, bias=False)
|
148 |
+
low_pass.weight.data.fill_(1.0 / 15)
|
149 |
+
|
150 |
+
self.filters.append(low_pass)
|
151 |
+
|
152 |
+
self.upsample_conv_blocks.append(ParallelResBlock(in_channels=channels + channels // 4, out_channels=new_channels, kernel_sizes=(3, 7, 11), dilation=(1, 3, 5), leaky_relu_slope=leaky_relu_slope))
|
153 |
+
channels = new_channels
|
154 |
+
|
155 |
+
self.conv_post = weight_norm(torch.nn.Conv1d(in_channels=channels, out_channels=1, kernel_size=7, stride=1, padding=3))
|
156 |
+
|
157 |
+
def forward(self, mel, f0, g = None):
|
158 |
+
har_source = self.m_source(f0.transpose(1, 2)).transpose(1, 2)
|
159 |
+
x = F.interpolate(self.pre_conv(har_source), size=mel.shape[-1], mode="linear")
|
160 |
+
|
161 |
+
mel = self.mel_conv(mel)
|
162 |
+
if g is not None: mel += self.cond(g)
|
163 |
+
|
164 |
+
x = torch.cat([mel, x], dim=1)
|
165 |
+
|
166 |
+
for ups, res, down, flt in zip(self.upsample_blocks, self.upsample_conv_blocks, self.downsample_blocks, self.filters):
|
167 |
+
x = checkpoint(res, torch.cat([checkpoint(flt, checkpoint(ups, x, use_reentrant=False), use_reentrant=False), down(har_source)], dim=1), use_reentrant=False) if self.training and self.checkpointing else res(torch.cat([flt(ups(x)), down(har_source)], dim=1))
|
168 |
+
|
169 |
+
return torch.tanh_(self.conv_post(F.leaky_relu_(x, self.leaky_relu_slope)))
|
170 |
+
|
171 |
+
def remove_parametrizations(self):
|
172 |
+
remove_parametrizations(self.source_conv)
|
173 |
+
remove_parametrizations(self.mel_conv)
|
174 |
+
remove_parametrizations(self.conv_post)
|
175 |
+
|
176 |
+
for block in self.downsample_blocks:
|
177 |
+
block[1].remove_parametrizations()
|
178 |
+
|
179 |
+
for block in self.upsample_conv_blocks:
|
180 |
+
block.remove_parametrizations()
|
main/library/algorithm/residuals.py
ADDED
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
import torch
|
4 |
+
|
5 |
+
from torch.nn.utils import remove_weight_norm
|
6 |
+
from torch.nn.utils.parametrizations import weight_norm
|
7 |
+
|
8 |
+
sys.path.append(os.getcwd())
|
9 |
+
|
10 |
+
from .modules import WaveNet
|
11 |
+
from .commons import get_padding, init_weights
|
12 |
+
|
13 |
+
|
14 |
+
LRELU_SLOPE = 0.1
|
15 |
+
|
16 |
+
def create_conv1d_layer(channels, kernel_size, dilation):
|
17 |
+
return weight_norm(torch.nn.Conv1d(channels, channels, kernel_size, 1, dilation=dilation, padding=get_padding(kernel_size, dilation)))
|
18 |
+
|
19 |
+
def apply_mask(tensor, mask):
|
20 |
+
return tensor * mask if mask is not None else tensor
|
21 |
+
|
22 |
+
class ResBlockBase(torch.nn.Module):
|
23 |
+
def __init__(self, channels, kernel_size, dilations):
|
24 |
+
super(ResBlockBase, self).__init__()
|
25 |
+
|
26 |
+
self.convs1 = torch.nn.ModuleList([create_conv1d_layer(channels, kernel_size, d) for d in dilations])
|
27 |
+
self.convs1.apply(init_weights)
|
28 |
+
|
29 |
+
self.convs2 = torch.nn.ModuleList([create_conv1d_layer(channels, kernel_size, 1) for _ in dilations])
|
30 |
+
self.convs2.apply(init_weights)
|
31 |
+
|
32 |
+
def forward(self, x, x_mask=None):
|
33 |
+
for c1, c2 in zip(self.convs1, self.convs2):
|
34 |
+
x = c2(apply_mask(torch.nn.functional.leaky_relu(c1(apply_mask(torch.nn.functional.leaky_relu(x, LRELU_SLOPE), x_mask)), LRELU_SLOPE), x_mask)) + x
|
35 |
+
|
36 |
+
return apply_mask(x, x_mask)
|
37 |
+
|
38 |
+
def remove_weight_norm(self):
|
39 |
+
for conv in self.convs1 + self.convs2:
|
40 |
+
remove_weight_norm(conv)
|
41 |
+
|
42 |
+
class ResBlock(ResBlockBase):
|
43 |
+
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
|
44 |
+
super(ResBlock, self).__init__(channels, kernel_size, dilation)
|
45 |
+
|
46 |
+
class Log(torch.nn.Module):
|
47 |
+
def forward(self, x, x_mask, reverse=False, **kwargs):
|
48 |
+
if not reverse:
|
49 |
+
y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
|
50 |
+
return y, torch.sum(-y, [1, 2])
|
51 |
+
else: return torch.exp(x) * x_mask
|
52 |
+
|
53 |
+
class Flip(torch.nn.Module):
|
54 |
+
def forward(self, x, *args, reverse=False, **kwargs):
|
55 |
+
x = torch.flip(x, [1])
|
56 |
+
|
57 |
+
if not reverse: return x, torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
|
58 |
+
else: return x
|
59 |
+
|
60 |
+
class ElementwiseAffine(torch.nn.Module):
|
61 |
+
def __init__(self, channels):
|
62 |
+
super().__init__()
|
63 |
+
self.channels = channels
|
64 |
+
self.m = torch.nn.Parameter(torch.zeros(channels, 1))
|
65 |
+
self.logs = torch.nn.Parameter(torch.zeros(channels, 1))
|
66 |
+
|
67 |
+
def forward(self, x, x_mask, reverse=False, **kwargs):
|
68 |
+
if not reverse: return ((self.m + torch.exp(self.logs) * x) * x_mask), torch.sum(self.logs * x_mask, [1, 2])
|
69 |
+
else: return (x - self.m) * torch.exp(-self.logs) * x_mask
|
70 |
+
|
71 |
+
class ResidualCouplingBlock(torch.nn.Module):
|
72 |
+
def __init__(self, channels, hidden_channels, kernel_size, dilation_rate, n_layers, n_flows=4, gin_channels=0):
|
73 |
+
super(ResidualCouplingBlock, self).__init__()
|
74 |
+
self.channels = channels
|
75 |
+
self.hidden_channels = hidden_channels
|
76 |
+
self.kernel_size = kernel_size
|
77 |
+
self.dilation_rate = dilation_rate
|
78 |
+
self.n_layers = n_layers
|
79 |
+
self.n_flows = n_flows
|
80 |
+
self.gin_channels = gin_channels
|
81 |
+
self.flows = torch.nn.ModuleList()
|
82 |
+
|
83 |
+
for _ in range(n_flows):
|
84 |
+
self.flows.append(ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True))
|
85 |
+
self.flows.append(Flip())
|
86 |
+
|
87 |
+
def forward(self, x, x_mask, g = None, reverse = False):
|
88 |
+
if not reverse:
|
89 |
+
for flow in self.flows:
|
90 |
+
x, _ = flow(x, x_mask, g=g, reverse=reverse)
|
91 |
+
else:
|
92 |
+
for flow in reversed(self.flows):
|
93 |
+
x = flow.forward(x, x_mask, g=g, reverse=reverse)
|
94 |
+
|
95 |
+
return x
|
96 |
+
|
97 |
+
def remove_weight_norm(self):
|
98 |
+
for i in range(self.n_flows):
|
99 |
+
self.flows[i * 2].remove_weight_norm()
|
100 |
+
|
101 |
+
def __prepare_scriptable__(self):
|
102 |
+
for i in range(self.n_flows):
|
103 |
+
for hook in self.flows[i * 2]._forward_pre_hooks.values():
|
104 |
+
if (hook.__module__ == "torch.nn.utils.parametrizations.weight_norm" and hook.__class__.__name__ == "WeightNorm"): torch.nn.utils.remove_weight_norm(self.flows[i * 2])
|
105 |
+
|
106 |
+
return self
|
107 |
+
|
108 |
+
class ResidualCouplingLayer(torch.nn.Module):
|
109 |
+
def __init__(self, channels, hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=0, gin_channels=0, mean_only=False):
|
110 |
+
assert channels % 2 == 0, "Channels/2"
|
111 |
+
super().__init__()
|
112 |
+
self.channels = channels
|
113 |
+
self.hidden_channels = hidden_channels
|
114 |
+
self.kernel_size = kernel_size
|
115 |
+
self.dilation_rate = dilation_rate
|
116 |
+
self.n_layers = n_layers
|
117 |
+
self.half_channels = channels // 2
|
118 |
+
self.mean_only = mean_only
|
119 |
+
|
120 |
+
self.pre = torch.nn.Conv1d(self.half_channels, hidden_channels, 1)
|
121 |
+
self.enc = WaveNet(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels)
|
122 |
+
self.post = torch.nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
|
123 |
+
|
124 |
+
self.post.weight.data.zero_()
|
125 |
+
self.post.bias.data.zero_()
|
126 |
+
|
127 |
+
def forward(self, x, x_mask, g=None, reverse=False):
|
128 |
+
x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
|
129 |
+
stats = self.post(self.enc((self.pre(x0) * x_mask), x_mask, g=g)) * x_mask
|
130 |
+
|
131 |
+
if not self.mean_only: m, logs = torch.split(stats, [self.half_channels] * 2, 1)
|
132 |
+
else:
|
133 |
+
m = stats
|
134 |
+
logs = torch.zeros_like(m)
|
135 |
+
|
136 |
+
if not reverse: return torch.cat([x0, (m + x1 * torch.exp(logs) * x_mask)], 1), torch.sum(logs, [1, 2])
|
137 |
+
else: return torch.cat([x0, ((x1 - m) * torch.exp(-logs) * x_mask)], 1)
|
138 |
+
|
139 |
+
def remove_weight_norm(self):
|
140 |
+
self.enc.remove_weight_norm()
|
main/library/algorithm/separator.py
ADDED
@@ -0,0 +1,330 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
import time
|
4 |
+
import yaml
|
5 |
+
import torch
|
6 |
+
import codecs
|
7 |
+
import hashlib
|
8 |
+
import logging
|
9 |
+
import platform
|
10 |
+
import warnings
|
11 |
+
import requests
|
12 |
+
import onnxruntime
|
13 |
+
|
14 |
+
from importlib import metadata, import_module
|
15 |
+
|
16 |
+
now_dir = os.getcwd()
|
17 |
+
sys.path.append(now_dir)
|
18 |
+
|
19 |
+
from main.configs.config import Config
|
20 |
+
translations = Config().translations
|
21 |
+
|
22 |
+
class Separator:
|
23 |
+
def __init__(self, logger=logging.getLogger(__name__), log_level=logging.INFO, log_formatter=None, model_file_dir="assets/models/uvr5", output_dir=None, output_format="wav", output_bitrate=None, normalization_threshold=0.9, output_single_stem=None, invert_using_spec=False, sample_rate=44100, mdx_params={"hop_length": 1024, "segment_size": 256, "overlap": 0.25, "batch_size": 1, "enable_denoise": False}, demucs_params={"segment_size": "Default", "shifts": 2, "overlap": 0.25, "segments_enabled": True}):
|
24 |
+
self.logger = logger
|
25 |
+
self.log_level = log_level
|
26 |
+
self.log_formatter = log_formatter
|
27 |
+
self.log_handler = logging.StreamHandler()
|
28 |
+
|
29 |
+
if self.log_formatter is None: self.log_formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(module)s - %(message)s")
|
30 |
+
self.log_handler.setFormatter(self.log_formatter)
|
31 |
+
|
32 |
+
if not self.logger.hasHandlers(): self.logger.addHandler(self.log_handler)
|
33 |
+
if log_level > logging.DEBUG: warnings.filterwarnings("ignore")
|
34 |
+
|
35 |
+
self.logger.info(translations["separator_info"].format(output_dir=output_dir, output_format=output_format))
|
36 |
+
self.model_file_dir = model_file_dir
|
37 |
+
|
38 |
+
if output_dir is None:
|
39 |
+
output_dir = now_dir
|
40 |
+
self.logger.info(translations["output_dir_is_none"])
|
41 |
+
|
42 |
+
self.output_dir = output_dir
|
43 |
+
|
44 |
+
os.makedirs(self.model_file_dir, exist_ok=True)
|
45 |
+
os.makedirs(self.output_dir, exist_ok=True)
|
46 |
+
|
47 |
+
self.output_format = output_format
|
48 |
+
self.output_bitrate = output_bitrate
|
49 |
+
|
50 |
+
if self.output_format is None: self.output_format = "wav"
|
51 |
+
self.normalization_threshold = normalization_threshold
|
52 |
+
if normalization_threshold <= 0 or normalization_threshold > 1: raise ValueError(translations[">0or=1"])
|
53 |
+
|
54 |
+
self.output_single_stem = output_single_stem
|
55 |
+
if output_single_stem is not None: self.logger.debug(translations["output_single"].format(output_single_stem=output_single_stem))
|
56 |
+
|
57 |
+
self.invert_using_spec = invert_using_spec
|
58 |
+
if self.invert_using_spec: self.logger.debug(translations["step2"])
|
59 |
+
|
60 |
+
self.sample_rate = int(sample_rate)
|
61 |
+
self.arch_specific_params = {"MDX": mdx_params, "Demucs": demucs_params}
|
62 |
+
self.torch_device = None
|
63 |
+
self.torch_device_cpu = None
|
64 |
+
self.torch_device_mps = None
|
65 |
+
self.onnx_execution_provider = None
|
66 |
+
self.model_instance = None
|
67 |
+
self.model_is_uvr_vip = False
|
68 |
+
self.model_friendly_name = None
|
69 |
+
self.setup_accelerated_inferencing_device()
|
70 |
+
|
71 |
+
def setup_accelerated_inferencing_device(self):
|
72 |
+
system_info = self.get_system_info()
|
73 |
+
self.log_onnxruntime_packages()
|
74 |
+
self.setup_torch_device(system_info)
|
75 |
+
|
76 |
+
def get_system_info(self):
|
77 |
+
os_name = platform.system()
|
78 |
+
os_version = platform.version()
|
79 |
+
self.logger.info(f"{translations['os']}: {os_name} {os_version}")
|
80 |
+
system_info = platform.uname()
|
81 |
+
self.logger.info(translations["platform_info"].format(system_info=system_info, node=system_info.node, release=system_info.release, machine=system_info.machine, processor=system_info.processor))
|
82 |
+
python_version = platform.python_version()
|
83 |
+
self.logger.info(f"{translations['name_ver'].format(name='python')}: {python_version}")
|
84 |
+
pytorch_version = torch.__version__
|
85 |
+
self.logger.info(f"{translations['name_ver'].format(name='pytorch')}: {pytorch_version}")
|
86 |
+
|
87 |
+
return system_info
|
88 |
+
|
89 |
+
def log_onnxruntime_packages(self):
|
90 |
+
onnxruntime_gpu_package = self.get_package_distribution("onnxruntime-gpu")
|
91 |
+
onnxruntime_cpu_package = self.get_package_distribution("onnxruntime")
|
92 |
+
|
93 |
+
if onnxruntime_gpu_package is not None: self.logger.info(f"{translations['install_onnx'].format(pu='GPU')}: {onnxruntime_gpu_package.version}")
|
94 |
+
if onnxruntime_cpu_package is not None: self.logger.info(f"{translations['install_onnx'].format(pu='CPU')}: {onnxruntime_cpu_package.version}")
|
95 |
+
|
96 |
+
def setup_torch_device(self, system_info):
|
97 |
+
hardware_acceleration_enabled = False
|
98 |
+
ort_providers = onnxruntime.get_available_providers()
|
99 |
+
self.torch_device_cpu = torch.device("cpu")
|
100 |
+
|
101 |
+
if torch.cuda.is_available():
|
102 |
+
self.configure_cuda(ort_providers)
|
103 |
+
hardware_acceleration_enabled = True
|
104 |
+
elif hasattr(torch.backends, "mps") and torch.backends.mps.is_available() and system_info.processor == "arm":
|
105 |
+
self.configure_mps(ort_providers)
|
106 |
+
hardware_acceleration_enabled = True
|
107 |
+
|
108 |
+
if not hardware_acceleration_enabled:
|
109 |
+
self.logger.info(translations["running_in_cpu"])
|
110 |
+
self.torch_device = self.torch_device_cpu
|
111 |
+
self.onnx_execution_provider = ["CPUExecutionProvider"]
|
112 |
+
|
113 |
+
def configure_cuda(self, ort_providers):
|
114 |
+
self.logger.info(translations["running_in_cuda"])
|
115 |
+
self.torch_device = torch.device("cuda")
|
116 |
+
|
117 |
+
if "CUDAExecutionProvider" in ort_providers:
|
118 |
+
self.logger.info(translations["onnx_have"].format(have='CUDAExecutionProvider'))
|
119 |
+
self.onnx_execution_provider = ["CUDAExecutionProvider"]
|
120 |
+
else: self.logger.warning(translations["onnx_not_have"].format(have='CUDAExecutionProvider'))
|
121 |
+
|
122 |
+
def configure_mps(self, ort_providers):
|
123 |
+
self.logger.info(translations["set_torch_mps"])
|
124 |
+
self.torch_device_mps = torch.device("mps")
|
125 |
+
self.torch_device = self.torch_device_mps
|
126 |
+
|
127 |
+
if "CoreMLExecutionProvider" in ort_providers:
|
128 |
+
self.logger.info(translations["onnx_have"].format(have='CoreMLExecutionProvider'))
|
129 |
+
self.onnx_execution_provider = ["CoreMLExecutionProvider"]
|
130 |
+
else: self.logger.warning(translations["onnx_not_have"].format(have='CoreMLExecutionProvider'))
|
131 |
+
|
132 |
+
def get_package_distribution(self, package_name):
|
133 |
+
try:
|
134 |
+
return metadata.distribution(package_name)
|
135 |
+
except metadata.PackageNotFoundError:
|
136 |
+
self.logger.debug(translations["python_not_install"].format(package_name=package_name))
|
137 |
+
return None
|
138 |
+
|
139 |
+
def get_model_hash(self, model_path):
|
140 |
+
self.logger.debug(translations["hash"].format(model_path=model_path))
|
141 |
+
|
142 |
+
try:
|
143 |
+
with open(model_path, "rb") as f:
|
144 |
+
f.seek(-10000 * 1024, 2)
|
145 |
+
return hashlib.md5(f.read()).hexdigest()
|
146 |
+
except IOError as e:
|
147 |
+
self.logger.error(translations["ioerror"].format(e=e))
|
148 |
+
return hashlib.md5(open(model_path, "rb").read()).hexdigest()
|
149 |
+
|
150 |
+
def download_file_if_not_exists(self, url, output_path):
|
151 |
+
if os.path.isfile(output_path):
|
152 |
+
self.logger.debug(translations["cancel_download"].format(output_path=output_path))
|
153 |
+
return
|
154 |
+
|
155 |
+
self.logger.debug(translations["download_model"].format(url=url, output_path=output_path))
|
156 |
+
response = requests.get(url, stream=True, timeout=300)
|
157 |
+
|
158 |
+
if response.status_code == 200:
|
159 |
+
from tqdm import tqdm
|
160 |
+
|
161 |
+
progress_bar = tqdm(total=int(response.headers.get("content-length", 0)), ncols=100, unit="byte")
|
162 |
+
|
163 |
+
with open(output_path, "wb") as f:
|
164 |
+
for chunk in response.iter_content(chunk_size=8192):
|
165 |
+
progress_bar.update(len(chunk))
|
166 |
+
f.write(chunk)
|
167 |
+
|
168 |
+
progress_bar.close()
|
169 |
+
else: raise RuntimeError(translations["download_error"].format(url=url, status_code=response.status_code))
|
170 |
+
|
171 |
+
def print_uvr_vip_message(self):
|
172 |
+
if self.model_is_uvr_vip:
|
173 |
+
self.logger.warning(translations["vip_model"].format(model_friendly_name=self.model_friendly_name))
|
174 |
+
self.logger.warning(translations["vip_print"])
|
175 |
+
|
176 |
+
def list_supported_model_files(self):
|
177 |
+
response = requests.get(codecs.decode("uggcf://uhttvatsnpr.pb/NauC/Ivrganzrfr-EIP-Cebwrpg/enj/znva/wfba/hie_zbqryf.wfba", "rot13"))
|
178 |
+
response.raise_for_status()
|
179 |
+
model_downloads_list = response.json()
|
180 |
+
self.logger.debug(translations["load_download_json"])
|
181 |
+
|
182 |
+
return {"MDX": {**model_downloads_list["mdx_download_list"], **model_downloads_list["mdx_download_vip_list"]}, "Demucs": {key: value for key, value in model_downloads_list["demucs_download_list"].items() if key.startswith("Demucs v4")}}
|
183 |
+
|
184 |
+
def download_model_files(self, model_filename):
|
185 |
+
model_path = os.path.join(self.model_file_dir, model_filename)
|
186 |
+
supported_model_files_grouped = self.list_supported_model_files()
|
187 |
+
|
188 |
+
yaml_config_filename = None
|
189 |
+
self.logger.debug(translations["search_model"].format(model_filename=model_filename))
|
190 |
+
|
191 |
+
for model_type, model_list in supported_model_files_grouped.items():
|
192 |
+
for model_friendly_name, model_download_list in model_list.items():
|
193 |
+
self.model_is_uvr_vip = "VIP" in model_friendly_name
|
194 |
+
model_repo_url_prefix = codecs.decode("uggcf://uhttvatsnpr.pb/NauC/Ivrganzrfr-EIP-Cebwrpg/erfbyir/znva/hie5_zbqryf", "rot13")
|
195 |
+
|
196 |
+
if isinstance(model_download_list, str) and model_download_list == model_filename:
|
197 |
+
self.logger.debug(translations["single_model"].format(model_friendly_name=model_friendly_name))
|
198 |
+
self.model_friendly_name = model_friendly_name
|
199 |
+
|
200 |
+
try:
|
201 |
+
self.download_file_if_not_exists(f"{model_repo_url_prefix}/MDX/{model_filename}", model_path)
|
202 |
+
except RuntimeError:
|
203 |
+
self.logger.warning(translations["not_found_model"])
|
204 |
+
self.download_file_if_not_exists(f"{model_repo_url_prefix}/Demucs/{model_filename}", model_path)
|
205 |
+
|
206 |
+
self.print_uvr_vip_message()
|
207 |
+
self.logger.debug(translations["single_model_path"].format(model_path=model_path))
|
208 |
+
|
209 |
+
return model_filename, model_type, model_friendly_name, model_path, yaml_config_filename
|
210 |
+
elif isinstance(model_download_list, dict):
|
211 |
+
this_model_matches_input_filename = False
|
212 |
+
|
213 |
+
for file_name, file_url in model_download_list.items():
|
214 |
+
if file_name == model_filename or file_url == model_filename:
|
215 |
+
self.logger.debug(translations["find_model"].format(model_filename=model_filename, model_friendly_name=model_friendly_name))
|
216 |
+
this_model_matches_input_filename = True
|
217 |
+
|
218 |
+
if this_model_matches_input_filename:
|
219 |
+
self.logger.debug(translations["find_models"].format(model_friendly_name=model_friendly_name))
|
220 |
+
self.model_friendly_name = model_friendly_name
|
221 |
+
self.print_uvr_vip_message()
|
222 |
+
|
223 |
+
for config_key, config_value in model_download_list.items():
|
224 |
+
self.logger.debug(f"{translations['find_path']}: {config_key} -> {config_value}")
|
225 |
+
|
226 |
+
if config_value.startswith("http"): self.download_file_if_not_exists(config_value, os.path.join(self.model_file_dir, config_key))
|
227 |
+
elif config_key.endswith(".ckpt"):
|
228 |
+
try:
|
229 |
+
self.download_file_if_not_exists(f"{model_repo_url_prefix}/Demucs/{config_key}", os.path.join(self.model_file_dir, config_key))
|
230 |
+
except RuntimeError:
|
231 |
+
self.logger.warning(translations["not_found_model_warehouse"])
|
232 |
+
|
233 |
+
if model_filename.endswith(".yaml"):
|
234 |
+
self.logger.warning(translations["yaml_warning"].format(model_filename=model_filename))
|
235 |
+
self.logger.warning(translations["yaml_warning_2"].format(config_key=config_key))
|
236 |
+
self.logger.warning(translations["yaml_warning_3"])
|
237 |
+
|
238 |
+
model_filename = config_key
|
239 |
+
model_path = os.path.join(self.model_file_dir, f"{model_filename}")
|
240 |
+
|
241 |
+
yaml_config_filename = config_value
|
242 |
+
yaml_config_filepath = os.path.join(self.model_file_dir, yaml_config_filename)
|
243 |
+
|
244 |
+
try:
|
245 |
+
self.download_file_if_not_exists(f"{model_repo_url_prefix}/mdx_c_configs/{yaml_config_filename}", yaml_config_filepath)
|
246 |
+
except RuntimeError:
|
247 |
+
self.logger.debug(translations["yaml_debug"])
|
248 |
+
else: self.download_file_if_not_exists(f"{model_repo_url_prefix}/Demucs/{config_value}", os.path.join(self.model_file_dir, config_value))
|
249 |
+
|
250 |
+
self.logger.debug(translations["download_model_friendly"].format(model_friendly_name=model_friendly_name, model_path=model_path))
|
251 |
+
return model_filename, model_type, model_friendly_name, model_path, yaml_config_filename
|
252 |
+
|
253 |
+
raise ValueError(translations["not_found_model_2"].format(model_filename=model_filename))
|
254 |
+
|
255 |
+
def load_model_data_from_yaml(self, yaml_config_filename):
|
256 |
+
model_data_yaml_filepath = os.path.join(self.model_file_dir, yaml_config_filename) if not os.path.exists(yaml_config_filename) else yaml_config_filename
|
257 |
+
self.logger.debug(translations["load_yaml"].format(model_data_yaml_filepath=model_data_yaml_filepath))
|
258 |
+
|
259 |
+
model_data = yaml.load(open(model_data_yaml_filepath, encoding="utf-8"), Loader=yaml.FullLoader)
|
260 |
+
self.logger.debug(translations["load_yaml_2"].format(model_data=model_data))
|
261 |
+
|
262 |
+
if "roformer" in model_data_yaml_filepath: model_data["is_roformer"] = True
|
263 |
+
return model_data
|
264 |
+
|
265 |
+
def load_model_data_using_hash(self, model_path):
|
266 |
+
self.logger.debug(translations["hash_md5"])
|
267 |
+
model_hash = self.get_model_hash(model_path)
|
268 |
+
|
269 |
+
self.logger.debug(translations["model_hash"].format(model_path=model_path, model_hash=model_hash))
|
270 |
+
mdx_model_data_path = codecs.decode("uggcf://uhttvatsnpr.pb/NauC/Ivrganzrfr-EIP-Cebwrpg/enj/znva/wfba/zbqry_qngn.wfba", "rot13")
|
271 |
+
self.logger.debug(translations["mdx_data"].format(mdx_model_data_path=mdx_model_data_path))
|
272 |
+
|
273 |
+
response = requests.get(mdx_model_data_path)
|
274 |
+
response.raise_for_status()
|
275 |
+
|
276 |
+
mdx_model_data_object = response.json()
|
277 |
+
self.logger.debug(translations["load_mdx"])
|
278 |
+
|
279 |
+
if model_hash in mdx_model_data_object: model_data = mdx_model_data_object[model_hash]
|
280 |
+
else: raise ValueError(translations["model_not_support"].format(model_hash=model_hash))
|
281 |
+
|
282 |
+
self.logger.debug(translations["uvr_json"].format(model_hash=model_hash, model_data=model_data))
|
283 |
+
return model_data
|
284 |
+
|
285 |
+
def load_model(self, model_filename):
|
286 |
+
self.logger.info(translations["loading_model"].format(model_filename=model_filename))
|
287 |
+
load_model_start_time = time.perf_counter()
|
288 |
+
model_filename, model_type, model_friendly_name, model_path, yaml_config_filename = self.download_model_files(model_filename)
|
289 |
+
self.logger.debug(translations["download_model_friendly_2"].format(model_friendly_name=model_friendly_name, model_path=model_path))
|
290 |
+
|
291 |
+
if model_path.lower().endswith(".yaml"): yaml_config_filename = model_path
|
292 |
+
|
293 |
+
common_params = {"logger": self.logger, "log_level": self.log_level, "torch_device": self.torch_device, "torch_device_cpu": self.torch_device_cpu, "torch_device_mps": self.torch_device_mps, "onnx_execution_provider": self.onnx_execution_provider, "model_name": model_filename.split(".")[0], "model_path": model_path, "model_data": self.load_model_data_from_yaml(yaml_config_filename) if yaml_config_filename is not None else self.load_model_data_using_hash(model_path), "output_format": self.output_format, "output_bitrate": self.output_bitrate, "output_dir": self.output_dir, "normalization_threshold": self.normalization_threshold, "output_single_stem": self.output_single_stem, "invert_using_spec": self.invert_using_spec, "sample_rate": self.sample_rate}
|
294 |
+
separator_classes = {"MDX": "mdx_separator.MDXSeparator", "Demucs": "demucs_separator.DemucsSeparator"}
|
295 |
+
|
296 |
+
if model_type not in self.arch_specific_params or model_type not in separator_classes: raise ValueError(translations["model_type_not_support"].format(model_type=model_type))
|
297 |
+
if model_type == "Demucs" and sys.version_info < (3, 10): raise Exception(translations["demucs_not_support_python<3.10"])
|
298 |
+
|
299 |
+
self.logger.debug(f"{translations['import_module']} {model_type}: {separator_classes[model_type]}")
|
300 |
+
module_name, class_name = separator_classes[model_type].split(".")
|
301 |
+
separator_class = getattr(import_module(f"main.library.architectures.{module_name}"), class_name)
|
302 |
+
|
303 |
+
self.logger.debug(f"{translations['initialization']} {model_type}: {separator_class}")
|
304 |
+
self.model_instance = separator_class(common_config=common_params, arch_config=self.arch_specific_params[model_type])
|
305 |
+
|
306 |
+
self.logger.debug(translations["loading_model_success"])
|
307 |
+
self.logger.info(f"{translations['loading_model_duration']}: {time.strftime('%H:%M:%S', time.gmtime(int(time.perf_counter() - load_model_start_time)))}")
|
308 |
+
|
309 |
+
def separate(self, audio_file_path):
|
310 |
+
self.logger.info(f"{translations['starting_separator']}: {audio_file_path}")
|
311 |
+
separate_start_time = time.perf_counter()
|
312 |
+
|
313 |
+
self.logger.debug(translations["normalization"].format(normalization_threshold=self.normalization_threshold))
|
314 |
+
output_files = self.model_instance.separate(audio_file_path)
|
315 |
+
|
316 |
+
self.model_instance.clear_gpu_cache()
|
317 |
+
self.model_instance.clear_file_specific_paths()
|
318 |
+
|
319 |
+
self.print_uvr_vip_message()
|
320 |
+
|
321 |
+
self.logger.debug(translations["separator_success_3"])
|
322 |
+
self.logger.info(f"{translations['separator_duration']}: {time.strftime('%H:%M:%S', time.gmtime(int(time.perf_counter() - separate_start_time)))}")
|
323 |
+
return output_files
|
324 |
+
|
325 |
+
def download_model_and_data(self, model_filename):
|
326 |
+
self.logger.info(translations["loading_separator_model"].format(model_filename=model_filename))
|
327 |
+
model_filename, model_type, model_friendly_name, model_path, yaml_config_filename = self.download_model_files(model_filename)
|
328 |
+
|
329 |
+
if model_path.lower().endswith(".yaml"): yaml_config_filename = model_path
|
330 |
+
self.logger.info(translations["downloading_model"].format(model_type=model_type, model_friendly_name=model_friendly_name, model_path=model_path, model_data_dict_size=len(self.load_model_data_from_yaml(yaml_config_filename) if yaml_config_filename is not None else self.load_model_data_using_hash(model_path))))
|
main/library/algorithm/synthesizers.py
ADDED
@@ -0,0 +1,450 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
import math
|
4 |
+
import torch
|
5 |
+
|
6 |
+
import torch.nn.functional as F
|
7 |
+
import torch.utils.checkpoint as checkpoint
|
8 |
+
|
9 |
+
from torch.nn.utils import remove_weight_norm
|
10 |
+
from torch.nn.utils.parametrizations import weight_norm
|
11 |
+
|
12 |
+
sys.path.append(os.getcwd())
|
13 |
+
|
14 |
+
from .modules import WaveNet
|
15 |
+
from .refinegan import RefineGANGenerator
|
16 |
+
from .mrf_hifigan import HiFiGANMRFGenerator
|
17 |
+
from .residuals import ResidualCouplingBlock, ResBlock, LRELU_SLOPE
|
18 |
+
from .commons import init_weights, slice_segments, rand_slice_segments, sequence_mask, convert_pad_shape
|
19 |
+
|
20 |
+
class Generator(torch.nn.Module):
|
21 |
+
def __init__(self, initial_channel, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0):
|
22 |
+
super(Generator, self).__init__()
|
23 |
+
self.num_kernels = len(resblock_kernel_sizes)
|
24 |
+
self.num_upsamples = len(upsample_rates)
|
25 |
+
self.conv_pre = torch.nn.Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)
|
26 |
+
self.ups_and_resblocks = torch.nn.ModuleList()
|
27 |
+
|
28 |
+
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
|
29 |
+
self.ups_and_resblocks.append(weight_norm(torch.nn.ConvTranspose1d(upsample_initial_channel // (2**i), upsample_initial_channel // (2 ** (i + 1)), k, u, padding=(k - u) // 2)))
|
30 |
+
ch = upsample_initial_channel // (2 ** (i + 1))
|
31 |
+
|
32 |
+
for _, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
|
33 |
+
self.ups_and_resblocks.append(ResBlock(ch, k, d))
|
34 |
+
|
35 |
+
self.conv_post = torch.nn.Conv1d(ch, 1, 7, 1, padding=3, bias=False)
|
36 |
+
self.ups_and_resblocks.apply(init_weights)
|
37 |
+
if gin_channels != 0: self.cond = torch.nn.Conv1d(gin_channels, upsample_initial_channel, 1)
|
38 |
+
|
39 |
+
def forward(self, x, g = None):
|
40 |
+
x = self.conv_pre(x)
|
41 |
+
if g is not None: x = x + self.cond(g)
|
42 |
+
resblock_idx = 0
|
43 |
+
|
44 |
+
for _ in range(self.num_upsamples):
|
45 |
+
x = self.ups_and_resblocks[resblock_idx](F.leaky_relu(x, LRELU_SLOPE))
|
46 |
+
resblock_idx += 1
|
47 |
+
xs = 0
|
48 |
+
|
49 |
+
for _ in range(self.num_kernels):
|
50 |
+
xs += self.ups_and_resblocks[resblock_idx](x)
|
51 |
+
resblock_idx += 1
|
52 |
+
|
53 |
+
x = xs / self.num_kernels
|
54 |
+
|
55 |
+
return torch.tanh(self.conv_post(F.leaky_relu(x)))
|
56 |
+
|
57 |
+
def __prepare_scriptable__(self):
|
58 |
+
for l in self.ups_and_resblocks:
|
59 |
+
for hook in l._forward_pre_hooks.values():
|
60 |
+
if (hook.__module__ == "torch.nn.utils.parametrizations.weight_norm" and hook.__class__.__name__ == "WeightNorm"): torch.nn.utils.remove_weight_norm(l)
|
61 |
+
|
62 |
+
return self
|
63 |
+
|
64 |
+
def remove_weight_norm(self):
|
65 |
+
for l in self.ups_and_resblocks:
|
66 |
+
remove_weight_norm(l)
|
67 |
+
|
68 |
+
class SineGen(torch.nn.Module):
|
69 |
+
def __init__(self, samp_rate, harmonic_num=0, sine_amp=0.1, noise_std=0.003, voiced_threshold=0, flag_for_pulse=False):
|
70 |
+
super(SineGen, self).__init__()
|
71 |
+
self.sine_amp = sine_amp
|
72 |
+
self.noise_std = noise_std
|
73 |
+
self.harmonic_num = harmonic_num
|
74 |
+
self.dim = self.harmonic_num + 1
|
75 |
+
self.sample_rate = samp_rate
|
76 |
+
self.voiced_threshold = voiced_threshold
|
77 |
+
|
78 |
+
def _f02uv(self, f0):
|
79 |
+
return torch.ones_like(f0) * (f0 > self.voiced_threshold)
|
80 |
+
|
81 |
+
def forward(self, f0, upp):
|
82 |
+
with torch.no_grad():
|
83 |
+
f0 = f0[:, None].transpose(1, 2)
|
84 |
+
|
85 |
+
f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
|
86 |
+
f0_buf[:, :, 0] = f0[:, :, 0]
|
87 |
+
f0_buf[:, :, 1:] = (f0_buf[:, :, 0:1] * torch.arange(2, self.harmonic_num + 2, device=f0.device)[None, None, :])
|
88 |
+
|
89 |
+
rad_values = (f0_buf / float(self.sample_rate)) % 1
|
90 |
+
rand_ini = torch.rand(f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device)
|
91 |
+
rand_ini[:, 0] = 0
|
92 |
+
rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
|
93 |
+
|
94 |
+
tmp_over_one = torch.cumsum(rad_values, 1)
|
95 |
+
tmp_over_one *= upp
|
96 |
+
tmp_over_one = F.interpolate(tmp_over_one.transpose(2, 1), scale_factor=float(upp), mode="linear", align_corners=True).transpose(2, 1)
|
97 |
+
|
98 |
+
rad_values = F.interpolate(rad_values.transpose(2, 1), scale_factor=float(upp), mode="nearest").transpose(2, 1)
|
99 |
+
tmp_over_one %= 1
|
100 |
+
|
101 |
+
cumsum_shift = torch.zeros_like(rad_values)
|
102 |
+
cumsum_shift[:, 1:, :] = ((tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0) * -1.0
|
103 |
+
|
104 |
+
uv = F.interpolate(self._f02uv(f0).transpose(2, 1), scale_factor=float(upp), mode="nearest").transpose(2, 1)
|
105 |
+
sine_waves = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * torch.pi) * self.sine_amp
|
106 |
+
sine_waves = sine_waves * uv + ((uv * self.noise_std + (1 - uv) * self.sine_amp / 3) * torch.randn_like(sine_waves))
|
107 |
+
|
108 |
+
return sine_waves
|
109 |
+
|
110 |
+
class SourceModuleHnNSF(torch.nn.Module):
|
111 |
+
def __init__(self, sample_rate, harmonic_num=0, sine_amp=0.1, add_noise_std=0.003, voiced_threshod=0):
|
112 |
+
super(SourceModuleHnNSF, self).__init__()
|
113 |
+
self.sine_amp = sine_amp
|
114 |
+
self.noise_std = add_noise_std
|
115 |
+
self.l_sin_gen = SineGen(sample_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod)
|
116 |
+
self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
|
117 |
+
self.l_tanh = torch.nn.Tanh()
|
118 |
+
|
119 |
+
def forward(self, x, upsample_factor = 1):
|
120 |
+
return self.l_tanh(self.l_linear(self.l_sin_gen(x, upsample_factor).to(dtype=self.l_linear.weight.dtype)))
|
121 |
+
|
122 |
+
class GeneratorNSF(torch.nn.Module):
|
123 |
+
def __init__(self, initial_channel, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels, sr, checkpointing = False):
|
124 |
+
super(GeneratorNSF, self).__init__()
|
125 |
+
self.num_kernels = len(resblock_kernel_sizes)
|
126 |
+
self.num_upsamples = len(upsample_rates)
|
127 |
+
self.f0_upsamp = torch.nn.Upsample(scale_factor=math.prod(upsample_rates))
|
128 |
+
self.m_source = SourceModuleHnNSF(sample_rate=sr, harmonic_num=0)
|
129 |
+
self.conv_pre = torch.nn.Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)
|
130 |
+
self.checkpointing = checkpointing
|
131 |
+
self.ups = torch.nn.ModuleList()
|
132 |
+
self.noise_convs = torch.nn.ModuleList()
|
133 |
+
channels = [upsample_initial_channel // (2 ** (i + 1)) for i in range(len(upsample_rates))]
|
134 |
+
stride_f0s = [math.prod(upsample_rates[i + 1 :]) if i + 1 < len(upsample_rates) else 1 for i in range(len(upsample_rates))]
|
135 |
+
|
136 |
+
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
|
137 |
+
self.ups.append(weight_norm(torch.nn.ConvTranspose1d(upsample_initial_channel // (2**i), channels[i], k, u, padding=(k - u) // 2)))
|
138 |
+
self.noise_convs.append(torch.nn.Conv1d(1, channels[i], kernel_size=(stride_f0s[i] * 2 if stride_f0s[i] > 1 else 1), stride=stride_f0s[i], padding=(stride_f0s[i] // 2 if stride_f0s[i] > 1 else 0)))
|
139 |
+
|
140 |
+
self.resblocks = torch.nn.ModuleList([ResBlock(channels[i], k, d) for i in range(len(self.ups)) for k, d in zip(resblock_kernel_sizes, resblock_dilation_sizes)])
|
141 |
+
self.conv_post = torch.nn.Conv1d(channels[-1], 1, 7, 1, padding=3, bias=False)
|
142 |
+
self.ups.apply(init_weights)
|
143 |
+
|
144 |
+
if gin_channels != 0: self.cond = torch.nn.Conv1d(gin_channels, upsample_initial_channel, 1)
|
145 |
+
|
146 |
+
self.upp = math.prod(upsample_rates)
|
147 |
+
self.lrelu_slope = LRELU_SLOPE
|
148 |
+
|
149 |
+
def forward(self, x, f0, g = None):
|
150 |
+
har_source = self.m_source(f0, self.upp).transpose(1, 2)
|
151 |
+
x = self.conv_pre(x)
|
152 |
+
if g is not None: x = x + self.cond(g)
|
153 |
+
|
154 |
+
for i, (ups, noise_convs) in enumerate(zip(self.ups, self.noise_convs)):
|
155 |
+
x = F.leaky_relu(x, self.lrelu_slope)
|
156 |
+
x = checkpoint.checkpoint(ups, x, use_reentrant=False) if self.training and self.checkpointing else ups(x)
|
157 |
+
x += noise_convs(har_source)
|
158 |
+
|
159 |
+
def resblock_forward(x, blocks):
|
160 |
+
return sum(block(x) for block in blocks) / len(blocks)
|
161 |
+
|
162 |
+
blocks = self.resblocks[i * self.num_kernels:(i + 1) * self.num_kernels]
|
163 |
+
x = checkpoint.checkpoint(resblock_forward, x, blocks, use_reentrant=False)if self.training and self.checkpointing else resblock_forward(x, blocks)
|
164 |
+
|
165 |
+
return torch.tanh(self.conv_post(F.leaky_relu(x)))
|
166 |
+
|
167 |
+
def remove_weight_norm(self):
|
168 |
+
for l in self.ups:
|
169 |
+
remove_weight_norm(l)
|
170 |
+
|
171 |
+
for l in self.resblocks:
|
172 |
+
l.remove_weight_norm()
|
173 |
+
|
174 |
+
class LayerNorm(torch.nn.Module):
|
175 |
+
def __init__(self, channels, eps=1e-5):
|
176 |
+
super().__init__()
|
177 |
+
self.eps = eps
|
178 |
+
self.gamma = torch.nn.Parameter(torch.ones(channels))
|
179 |
+
self.beta = torch.nn.Parameter(torch.zeros(channels))
|
180 |
+
|
181 |
+
def forward(self, x):
|
182 |
+
x = x.transpose(1, -1)
|
183 |
+
return F.layer_norm(x, (x.size(-1),), self.gamma, self.beta, self.eps).transpose(1, -1)
|
184 |
+
|
185 |
+
class MultiHeadAttention(torch.nn.Module):
|
186 |
+
def __init__(self, channels, out_channels, n_heads, p_dropout=0.0, window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False):
|
187 |
+
super().__init__()
|
188 |
+
assert channels % n_heads == 0
|
189 |
+
self.channels = channels
|
190 |
+
self.out_channels = out_channels
|
191 |
+
self.n_heads = n_heads
|
192 |
+
self.p_dropout = p_dropout
|
193 |
+
self.window_size = window_size
|
194 |
+
self.heads_share = heads_share
|
195 |
+
self.block_length = block_length
|
196 |
+
self.proximal_bias = proximal_bias
|
197 |
+
self.proximal_init = proximal_init
|
198 |
+
self.attn = None
|
199 |
+
self.k_channels = channels // n_heads
|
200 |
+
self.conv_q = torch.nn.Conv1d(channels, channels, 1)
|
201 |
+
self.conv_k = torch.nn.Conv1d(channels, channels, 1)
|
202 |
+
self.conv_v = torch.nn.Conv1d(channels, channels, 1)
|
203 |
+
self.conv_o = torch.nn.Conv1d(channels, out_channels, 1)
|
204 |
+
self.drop = torch.nn.Dropout(p_dropout)
|
205 |
+
|
206 |
+
if window_size is not None:
|
207 |
+
n_heads_rel = 1 if heads_share else n_heads
|
208 |
+
rel_stddev = self.k_channels**-0.5
|
209 |
+
self.emb_rel_k = torch.nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
|
210 |
+
self.emb_rel_v = torch.nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
|
211 |
+
|
212 |
+
torch.nn.init.xavier_uniform_(self.conv_q.weight)
|
213 |
+
torch.nn.init.xavier_uniform_(self.conv_k.weight)
|
214 |
+
torch.nn.init.xavier_uniform_(self.conv_v.weight)
|
215 |
+
|
216 |
+
if proximal_init:
|
217 |
+
with torch.no_grad():
|
218 |
+
self.conv_k.weight.copy_(self.conv_q.weight)
|
219 |
+
self.conv_k.bias.copy_(self.conv_q.bias)
|
220 |
+
|
221 |
+
def forward(self, x, c, attn_mask=None):
|
222 |
+
q, k, v = self.conv_q(x), self.conv_k(c), self.conv_v(c)
|
223 |
+
x, self.attn = self.attention(q, k, v, mask=attn_mask)
|
224 |
+
return self.conv_o(x)
|
225 |
+
|
226 |
+
def attention(self, query, key, value, mask=None):
|
227 |
+
b, d, t_s, t_t = (*key.size(), query.size(2))
|
228 |
+
query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
|
229 |
+
key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
230 |
+
|
231 |
+
scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
|
232 |
+
if self.window_size is not None:
|
233 |
+
assert (t_s == t_t), "(t_s == t_t)"
|
234 |
+
scores = scores + self._relative_position_to_absolute_position(self._matmul_with_relative_keys(query / math.sqrt(self.k_channels), self._get_relative_embeddings(self.emb_rel_k, t_s)))
|
235 |
+
|
236 |
+
if self.proximal_bias:
|
237 |
+
assert t_s == t_t, "t_s == t_t"
|
238 |
+
scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
|
239 |
+
|
240 |
+
if mask is not None:
|
241 |
+
scores = scores.masked_fill(mask == 0, -1e4)
|
242 |
+
if self.block_length is not None:
|
243 |
+
assert (t_s == t_t), "(t_s == t_t)"
|
244 |
+
scores = scores.masked_fill((torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)) == 0, -1e4)
|
245 |
+
|
246 |
+
p_attn = self.drop(F.softmax(scores, dim=-1) )
|
247 |
+
output = torch.matmul(p_attn, value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3))
|
248 |
+
|
249 |
+
if self.window_size is not None: output = output + self._matmul_with_relative_values(self._absolute_position_to_relative_position(p_attn), self._get_relative_embeddings(self.emb_rel_v, t_s))
|
250 |
+
return (output.transpose(2, 3).contiguous().view(b, d, t_t)), p_attn
|
251 |
+
|
252 |
+
def _matmul_with_relative_values(self, x, y):
|
253 |
+
return torch.matmul(x, y.unsqueeze(0))
|
254 |
+
|
255 |
+
def _matmul_with_relative_keys(self, x, y):
|
256 |
+
return torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
|
257 |
+
|
258 |
+
def _get_relative_embeddings(self, relative_embeddings, length):
|
259 |
+
pad_length = max(length - (self.window_size + 1), 0)
|
260 |
+
slice_start_position = max((self.window_size + 1) - length, 0)
|
261 |
+
return (F.pad(relative_embeddings, convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) if pad_length > 0 else relative_embeddings)[:, slice_start_position:(slice_start_position + 2 * length - 1)]
|
262 |
+
|
263 |
+
def _relative_position_to_absolute_position(self, x):
|
264 |
+
batch, heads, length, _ = x.size()
|
265 |
+
return F.pad(F.pad(x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])).view([batch, heads, length * 2 * length]), convert_pad_shape([[0, 0], [0, 0], [0, length - 1]])).view([batch, heads, length + 1, 2 * length - 1])[:, :, :length, length - 1 :]
|
266 |
+
|
267 |
+
def _absolute_position_to_relative_position(self, x):
|
268 |
+
batch, heads, length, _ = x.size()
|
269 |
+
return F.pad(F.pad(x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])).view([batch, heads, length**2 + length * (length - 1)]), convert_pad_shape([[0, 0], [0, 0], [length, 0]])).view([batch, heads, length, 2 * length])[:, :, :, 1:]
|
270 |
+
|
271 |
+
def _attention_bias_proximal(self, length):
|
272 |
+
r = torch.arange(length, dtype=torch.float32)
|
273 |
+
return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs((torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)))), 0), 0)
|
274 |
+
|
275 |
+
class FFN(torch.nn.Module):
|
276 |
+
def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0.0, activation=None, causal=False):
|
277 |
+
super().__init__()
|
278 |
+
self.in_channels = in_channels
|
279 |
+
self.out_channels = out_channels
|
280 |
+
self.filter_channels = filter_channels
|
281 |
+
self.kernel_size = kernel_size
|
282 |
+
self.p_dropout = p_dropout
|
283 |
+
self.activation = activation
|
284 |
+
self.causal = causal
|
285 |
+
self.padding = self._causal_padding if causal else self._same_padding
|
286 |
+
self.conv_1 = torch.nn.Conv1d(in_channels, filter_channels, kernel_size)
|
287 |
+
self.conv_2 = torch.nn.Conv1d(filter_channels, out_channels, kernel_size)
|
288 |
+
self.drop = torch.nn.Dropout(p_dropout)
|
289 |
+
|
290 |
+
def forward(self, x, x_mask):
|
291 |
+
x = self.conv_1(self.padding(x * x_mask))
|
292 |
+
return self.conv_2(self.padding(self.drop(((x * torch.sigmoid(1.702 * x)) if self.activation == "gelu" else torch.relu(x))) * x_mask)) * x_mask
|
293 |
+
|
294 |
+
def _causal_padding(self, x):
|
295 |
+
if self.kernel_size == 1: return x
|
296 |
+
|
297 |
+
return F.pad(x, convert_pad_shape([[0, 0], [0, 0], [(self.kernel_size - 1), 0]]))
|
298 |
+
|
299 |
+
def _same_padding(self, x):
|
300 |
+
if self.kernel_size == 1: return x
|
301 |
+
|
302 |
+
return F.pad(x, convert_pad_shape([[0, 0], [0, 0], [((self.kernel_size - 1) // 2), (self.kernel_size // 2)]]))
|
303 |
+
|
304 |
+
class Encoder(torch.nn.Module):
|
305 |
+
def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0.0, window_size=10, **kwargs):
|
306 |
+
super().__init__()
|
307 |
+
self.hidden_channels = hidden_channels
|
308 |
+
self.filter_channels = filter_channels
|
309 |
+
self.n_heads = n_heads
|
310 |
+
self.n_layers = n_layers
|
311 |
+
self.kernel_size = kernel_size
|
312 |
+
self.p_dropout = p_dropout
|
313 |
+
self.window_size = window_size
|
314 |
+
self.drop = torch.nn.Dropout(p_dropout)
|
315 |
+
self.attn_layers = torch.nn.ModuleList()
|
316 |
+
self.norm_layers_1 = torch.nn.ModuleList()
|
317 |
+
self.ffn_layers = torch.nn.ModuleList()
|
318 |
+
self.norm_layers_2 = torch.nn.ModuleList()
|
319 |
+
|
320 |
+
for _ in range(self.n_layers):
|
321 |
+
self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size))
|
322 |
+
self.norm_layers_1.append(LayerNorm(hidden_channels))
|
323 |
+
|
324 |
+
self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))
|
325 |
+
self.norm_layers_2.append(LayerNorm(hidden_channels))
|
326 |
+
|
327 |
+
def forward(self, x, x_mask):
|
328 |
+
attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
329 |
+
x = x * x_mask
|
330 |
+
|
331 |
+
for i in range(self.n_layers):
|
332 |
+
x = self.norm_layers_1[i](x + self.drop(self.attn_layers[i](x, x, attn_mask)))
|
333 |
+
x = self.norm_layers_2[i](x + self.drop(self.ffn_layers[i](x, x_mask)))
|
334 |
+
|
335 |
+
return x * x_mask
|
336 |
+
|
337 |
+
class TextEncoder(torch.nn.Module):
|
338 |
+
def __init__(self, out_channels, hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout, embedding_dim, f0=True):
|
339 |
+
super(TextEncoder, self).__init__()
|
340 |
+
self.out_channels = out_channels
|
341 |
+
self.hidden_channels = hidden_channels
|
342 |
+
self.filter_channels = filter_channels
|
343 |
+
self.n_heads = n_heads
|
344 |
+
self.n_layers = n_layers
|
345 |
+
self.kernel_size = kernel_size
|
346 |
+
self.p_dropout = float(p_dropout)
|
347 |
+
self.emb_phone = torch.nn.Linear(embedding_dim, hidden_channels)
|
348 |
+
self.lrelu = torch.nn.LeakyReLU(0.1, inplace=True)
|
349 |
+
if f0: self.emb_pitch = torch.nn.Embedding(256, hidden_channels)
|
350 |
+
self.encoder = Encoder(hidden_channels, filter_channels, n_heads, n_layers, kernel_size, float(p_dropout))
|
351 |
+
self.proj = torch.nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
352 |
+
|
353 |
+
def forward(self, phone, pitch, lengths):
|
354 |
+
x = self.emb_phone(phone) if pitch is None else (self.emb_phone(phone) + self.emb_pitch(pitch))
|
355 |
+
x = torch.transpose(self.lrelu((x * math.sqrt(self.hidden_channels))), 1, -1)
|
356 |
+
x_mask = torch.unsqueeze(sequence_mask(lengths, x.size(2)), 1).to(x.dtype)
|
357 |
+
m, logs = torch.split((self.proj(self.encoder(x * x_mask, x_mask)) * x_mask), self.out_channels, dim=1)
|
358 |
+
return m, logs, x_mask
|
359 |
+
|
360 |
+
class PosteriorEncoder(torch.nn.Module):
|
361 |
+
def __init__(self, in_channels, out_channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0):
|
362 |
+
super(PosteriorEncoder, self).__init__()
|
363 |
+
self.in_channels = in_channels
|
364 |
+
self.out_channels = out_channels
|
365 |
+
self.hidden_channels = hidden_channels
|
366 |
+
self.kernel_size = kernel_size
|
367 |
+
self.dilation_rate = dilation_rate
|
368 |
+
self.n_layers = n_layers
|
369 |
+
self.gin_channels = gin_channels
|
370 |
+
self.pre = torch.nn.Conv1d(in_channels, hidden_channels, 1)
|
371 |
+
self.enc = WaveNet(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels)
|
372 |
+
self.proj = torch.nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
373 |
+
|
374 |
+
def forward(self, x, x_lengths, g = None):
|
375 |
+
x_mask = torch.unsqueeze(sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
|
376 |
+
m, logs = torch.split((self.proj(self.enc((self.pre(x) * x_mask), x_mask, g=g)) * x_mask), self.out_channels, dim=1)
|
377 |
+
return ((m + torch.randn_like(m) * torch.exp(logs)) * x_mask), m, logs, x_mask
|
378 |
+
|
379 |
+
def remove_weight_norm(self):
|
380 |
+
self.enc.remove_weight_norm()
|
381 |
+
|
382 |
+
class Synthesizer(torch.nn.Module):
|
383 |
+
def __init__(self, spec_channels, segment_size, inter_channels, hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, spk_embed_dim, gin_channels, sr, use_f0, text_enc_hidden_dim=768, vocoder="Default", checkpointing = False, **kwargs):
|
384 |
+
super(Synthesizer, self).__init__()
|
385 |
+
self.spec_channels = spec_channels
|
386 |
+
self.inter_channels = inter_channels
|
387 |
+
self.hidden_channels = hidden_channels
|
388 |
+
self.filter_channels = filter_channels
|
389 |
+
self.n_heads = n_heads
|
390 |
+
self.n_layers = n_layers
|
391 |
+
self.kernel_size = kernel_size
|
392 |
+
self.p_dropout = float(p_dropout)
|
393 |
+
self.resblock_kernel_sizes = resblock_kernel_sizes
|
394 |
+
self.resblock_dilation_sizes = resblock_dilation_sizes
|
395 |
+
self.upsample_rates = upsample_rates
|
396 |
+
self.upsample_initial_channel = upsample_initial_channel
|
397 |
+
self.upsample_kernel_sizes = upsample_kernel_sizes
|
398 |
+
self.segment_size = segment_size
|
399 |
+
self.gin_channels = gin_channels
|
400 |
+
self.spk_embed_dim = spk_embed_dim
|
401 |
+
self.use_f0 = use_f0
|
402 |
+
self.enc_p = TextEncoder(inter_channels, hidden_channels, filter_channels, n_heads, n_layers, kernel_size, float(p_dropout), text_enc_hidden_dim, f0=use_f0)
|
403 |
+
|
404 |
+
if use_f0:
|
405 |
+
if vocoder == "RefineGAN": self.dec = RefineGANGenerator(sample_rate=sr, upsample_rates=upsample_rates, num_mels=inter_channels, checkpointing=checkpointing)
|
406 |
+
elif vocoder == "MRF HiFi-GAN": self.dec = HiFiGANMRFGenerator(in_channel=inter_channels, upsample_initial_channel=upsample_initial_channel, upsample_rates=upsample_rates, upsample_kernel_sizes=upsample_kernel_sizes, resblock_kernel_sizes=resblock_kernel_sizes, resblock_dilations=resblock_dilation_sizes, gin_channels=gin_channels, sample_rate=sr, harmonic_num=8, checkpointing=checkpointing)
|
407 |
+
else: self.dec = GeneratorNSF(inter_channels, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels, sr=sr, checkpointing=checkpointing)
|
408 |
+
else: self.dec = Generator(inter_channels, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels)
|
409 |
+
|
410 |
+
self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels)
|
411 |
+
self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels)
|
412 |
+
self.emb_g = torch.nn.Embedding(self.spk_embed_dim, gin_channels)
|
413 |
+
|
414 |
+
def remove_weight_norm(self):
|
415 |
+
self.dec.remove_weight_norm()
|
416 |
+
self.flow.remove_weight_norm()
|
417 |
+
self.enc_q.remove_weight_norm()
|
418 |
+
|
419 |
+
@torch.jit.ignore
|
420 |
+
def forward(self, phone, phone_lengths, pitch = None, pitchf = None, y = None, y_lengths = None, ds = None):
|
421 |
+
g = self.emb_g(ds).unsqueeze(-1)
|
422 |
+
m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
|
423 |
+
|
424 |
+
if y is not None:
|
425 |
+
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
|
426 |
+
z_slice, ids_slice = rand_slice_segments(z, y_lengths, self.segment_size)
|
427 |
+
return (self.dec(z_slice, slice_segments(pitchf, ids_slice, self.segment_size, 2), g=g) if self.use_f0 else self.dec(z_slice, g=g)), ids_slice, x_mask, y_mask, (z, self.flow(z, y_mask, g=g), m_p, logs_p, m_q, logs_q)
|
428 |
+
else: return None, None, x_mask, None, (None, None, m_p, logs_p, None, None)
|
429 |
+
|
430 |
+
@torch.jit.export
|
431 |
+
def infer(self, phone, phone_lengths, pitch = None, nsff0 = None, sid = None, rate = None):
|
432 |
+
g = self.emb_g(sid).unsqueeze(-1)
|
433 |
+
m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
|
434 |
+
z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
|
435 |
+
|
436 |
+
if rate is not None:
|
437 |
+
assert isinstance(rate, torch.Tensor)
|
438 |
+
head = int(z_p.shape[2] * (1.0 - rate.item()))
|
439 |
+
z_p = z_p[:, :, head:]
|
440 |
+
x_mask = x_mask[:, :, head:]
|
441 |
+
if self.use_f0: nsff0 = nsff0[:, head:]
|
442 |
+
|
443 |
+
if self.use_f0:
|
444 |
+
z = self.flow(z_p, x_mask, g=g, reverse=True)
|
445 |
+
o = self.dec(z * x_mask, nsff0, g=g)
|
446 |
+
else:
|
447 |
+
z = self.flow(z_p, x_mask, g=g, reverse=True)
|
448 |
+
o = self.dec(z * x_mask, g=g)
|
449 |
+
|
450 |
+
return o, x_mask, (z, z_p, m_p, logs_p)
|