kevinwang676 commited on
Commit
ca53f73
·
verified ·
1 Parent(s): 2115f08

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. GPT_SoVITS/BigVGAN/LICENSE +21 -0
  2. GPT_SoVITS/BigVGAN/README.md +266 -0
  3. GPT_SoVITS/BigVGAN/activations.py +122 -0
  4. GPT_SoVITS/BigVGAN/alias_free_activation/cuda/__init__.py +0 -0
  5. GPT_SoVITS/BigVGAN/alias_free_activation/cuda/activation1d.py +69 -0
  6. GPT_SoVITS/BigVGAN/alias_free_activation/cuda/anti_alias_activation.cpp +23 -0
  7. GPT_SoVITS/BigVGAN/alias_free_activation/cuda/anti_alias_activation_cuda.cu +246 -0
  8. GPT_SoVITS/BigVGAN/alias_free_activation/cuda/compat.h +29 -0
  9. GPT_SoVITS/BigVGAN/alias_free_activation/cuda/load.py +82 -0
  10. GPT_SoVITS/BigVGAN/alias_free_activation/cuda/type_shim.h +92 -0
  11. GPT_SoVITS/BigVGAN/alias_free_activation/torch/__init__.py +6 -0
  12. GPT_SoVITS/BigVGAN/alias_free_activation/torch/act.py +30 -0
  13. GPT_SoVITS/BigVGAN/alias_free_activation/torch/filter.py +99 -0
  14. GPT_SoVITS/BigVGAN/alias_free_activation/torch/resample.py +48 -0
  15. GPT_SoVITS/BigVGAN/bigvgan.py +461 -0
  16. GPT_SoVITS/BigVGAN/configs/bigvgan_22khz_80band.json +45 -0
  17. GPT_SoVITS/BigVGAN/configs/bigvgan_24khz_100band.json +45 -0
  18. GPT_SoVITS/BigVGAN/configs/bigvgan_base_22khz_80band.json +45 -0
  19. GPT_SoVITS/BigVGAN/configs/bigvgan_base_24khz_100band.json +45 -0
  20. GPT_SoVITS/BigVGAN/configs/bigvgan_v2_22khz_80band_256x.json +61 -0
  21. GPT_SoVITS/BigVGAN/configs/bigvgan_v2_22khz_80band_fmax8k_256x.json +61 -0
  22. GPT_SoVITS/BigVGAN/configs/bigvgan_v2_24khz_100band_256x.json +61 -0
  23. GPT_SoVITS/BigVGAN/configs/bigvgan_v2_44khz_128band_256x.json +61 -0
  24. GPT_SoVITS/BigVGAN/configs/bigvgan_v2_44khz_128band_512x.json +61 -0
  25. GPT_SoVITS/BigVGAN/discriminators.py +625 -0
  26. GPT_SoVITS/BigVGAN/env.py +18 -0
  27. GPT_SoVITS/BigVGAN/incl_licenses/LICENSE_1 +21 -0
  28. GPT_SoVITS/BigVGAN/incl_licenses/LICENSE_2 +21 -0
  29. GPT_SoVITS/BigVGAN/incl_licenses/LICENSE_3 +201 -0
  30. GPT_SoVITS/BigVGAN/incl_licenses/LICENSE_4 +29 -0
  31. GPT_SoVITS/BigVGAN/incl_licenses/LICENSE_5 +16 -0
  32. GPT_SoVITS/BigVGAN/incl_licenses/LICENSE_6 +21 -0
  33. GPT_SoVITS/BigVGAN/incl_licenses/LICENSE_7 +21 -0
  34. GPT_SoVITS/BigVGAN/incl_licenses/LICENSE_8 +21 -0
  35. GPT_SoVITS/BigVGAN/inference.py +85 -0
  36. GPT_SoVITS/BigVGAN/inference_e2e.py +100 -0
  37. GPT_SoVITS/BigVGAN/loss.py +238 -0
  38. GPT_SoVITS/BigVGAN/meldataset.py +370 -0
  39. GPT_SoVITS/BigVGAN/nv-modelcard++/.gitkeep +1 -0
  40. GPT_SoVITS/BigVGAN/nv-modelcard++/bias.md +4 -0
  41. GPT_SoVITS/BigVGAN/nv-modelcard++/explainability.md +13 -0
  42. GPT_SoVITS/BigVGAN/nv-modelcard++/overview.md +126 -0
  43. GPT_SoVITS/BigVGAN/nv-modelcard++/privacy.md +14 -0
  44. GPT_SoVITS/BigVGAN/nv-modelcard++/safety.md +6 -0
  45. GPT_SoVITS/BigVGAN/requirements.txt +13 -0
  46. GPT_SoVITS/BigVGAN/tests/test_activation.py +62 -0
  47. GPT_SoVITS/BigVGAN/tests/test_activation_snake_beta.py +62 -0
  48. GPT_SoVITS/BigVGAN/tests/test_cuda_vs_torch_model.py +215 -0
  49. GPT_SoVITS/BigVGAN/train.py +716 -0
  50. GPT_SoVITS/BigVGAN/utils0.py +99 -0
GPT_SoVITS/BigVGAN/LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2024 NVIDIA CORPORATION.
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
GPT_SoVITS/BigVGAN/README.md ADDED
@@ -0,0 +1,266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## BigVGAN: A Universal Neural Vocoder with Large-Scale Training
2
+
3
+ #### Sang-gil Lee, Wei Ping, Boris Ginsburg, Bryan Catanzaro, Sungroh Yoon
4
+
5
+ [[Paper]](https://arxiv.org/abs/2206.04658) - [[Code]](https://github.com/NVIDIA/BigVGAN) - [[Showcase]](https://bigvgan-demo.github.io/) - [[Project Page]](https://research.nvidia.com/labs/adlr/projects/bigvgan/) - [[Weights]](https://huggingface.co/collections/nvidia/bigvgan-66959df3d97fd7d98d97dc9a) - [[Demo]](https://huggingface.co/spaces/nvidia/BigVGAN)
6
+
7
+ [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/bigvgan-a-universal-neural-vocoder-with-large/speech-synthesis-on-libritts)](https://paperswithcode.com/sota/speech-synthesis-on-libritts?p=bigvgan-a-universal-neural-vocoder-with-large)
8
+
9
+ <center><img src="https://user-images.githubusercontent.com/15963413/218609148-881e39df-33af-4af9-ab95-1427c4ebf062.png" width="800"></center>
10
+
11
+ ## News
12
+ - **Sep 2024 (v2.4):**
13
+ - We have updated the pretrained checkpoints trained for 5M steps. This is final release of the BigVGAN-v2 checkpoints.
14
+
15
+ - **Jul 2024 (v2.3):**
16
+ - General refactor and code improvements for improved readability.
17
+ - Fully fused CUDA kernel of anti-alised activation (upsampling + activation + downsampling) with inference speed benchmark.
18
+
19
+ - **Jul 2024 (v2.2):** The repository now includes an interactive local demo using gradio.
20
+
21
+ - **Jul 2024 (v2.1):** BigVGAN is now integrated with 🤗 Hugging Face Hub with easy access to inference using pretrained checkpoints. We also provide an interactive demo on Hugging Face Spaces.
22
+
23
+ - **Jul 2024 (v2):** We release BigVGAN-v2 along with pretrained checkpoints. Below are the highlights:
24
+ - Custom CUDA kernel for inference: we provide a fused upsampling + activation kernel written in CUDA for accelerated inference speed. Our test shows 1.5 - 3x faster speed on a single A100 GPU.
25
+ - Improved discriminator and loss: BigVGAN-v2 is trained using a [multi-scale sub-band CQT discriminator](https://arxiv.org/abs/2311.14957) and a [multi-scale mel spectrogram loss](https://arxiv.org/abs/2306.06546).
26
+ - Larger training data: BigVGAN-v2 is trained using datasets containing diverse audio types, including speech in multiple languages, environmental sounds, and instruments.
27
+ - We provide pretrained checkpoints of BigVGAN-v2 using diverse audio configurations, supporting up to 44 kHz sampling rate and 512x upsampling ratio.
28
+
29
+ ## Installation
30
+
31
+ The codebase has been tested on Python `3.10` and PyTorch `2.3.1` conda packages with either `pytorch-cuda=12.1` or `pytorch-cuda=11.8`. Below is an example command to create the conda environment:
32
+
33
+ ```shell
34
+ conda create -n bigvgan python=3.10 pytorch torchvision torchaudio pytorch-cuda=12.1 -c pytorch -c nvidia
35
+ conda activate bigvgan
36
+ ```
37
+
38
+ Clone the repository and install dependencies:
39
+
40
+ ```shell
41
+ git clone https://github.com/NVIDIA/BigVGAN
42
+ cd BigVGAN
43
+ pip install -r requirements.txt
44
+ ```
45
+
46
+ ## Inference Quickstart using 🤗 Hugging Face Hub
47
+
48
+ Below example describes how you can use BigVGAN: load the pretrained BigVGAN generator from Hugging Face Hub, compute mel spectrogram from input waveform, and generate synthesized waveform using the mel spectrogram as the model's input.
49
+
50
+ ```python
51
+ device = 'cuda'
52
+
53
+ import torch
54
+ import bigvgan
55
+ import librosa
56
+ from meldataset import get_mel_spectrogram
57
+
58
+ # instantiate the model. You can optionally set use_cuda_kernel=True for faster inference.
59
+ model = bigvgan.BigVGAN.from_pretrained('nvidia/bigvgan_v2_24khz_100band_256x', use_cuda_kernel=False)
60
+
61
+ # remove weight norm in the model and set to eval mode
62
+ model.remove_weight_norm()
63
+ model = model.eval().to(device)
64
+
65
+ # load wav file and compute mel spectrogram
66
+ wav_path = '/path/to/your/audio.wav'
67
+ wav, sr = librosa.load(wav_path, sr=model.h.sampling_rate, mono=True) # wav is np.ndarray with shape [T_time] and values in [-1, 1]
68
+ wav = torch.FloatTensor(wav).unsqueeze(0) # wav is FloatTensor with shape [B(1), T_time]
69
+
70
+ # compute mel spectrogram from the ground truth audio
71
+ mel = get_mel_spectrogram(wav, model.h).to(device) # mel is FloatTensor with shape [B(1), C_mel, T_frame]
72
+
73
+ # generate waveform from mel
74
+ with torch.inference_mode():
75
+ wav_gen = model(mel) # wav_gen is FloatTensor with shape [B(1), 1, T_time] and values in [-1, 1]
76
+ wav_gen_float = wav_gen.squeeze(0).cpu() # wav_gen is FloatTensor with shape [1, T_time]
77
+
78
+ # you can convert the generated waveform to 16 bit linear PCM
79
+ wav_gen_int16 = (wav_gen_float * 32767.0).numpy().astype('int16') # wav_gen is now np.ndarray with shape [1, T_time] and int16 dtype
80
+ ```
81
+
82
+ ## Local gradio demo <a href='https://github.com/gradio-app/gradio'><img src='https://img.shields.io/github/stars/gradio-app/gradio'></a>
83
+
84
+ You can run a local gradio demo using below command:
85
+
86
+ ```python
87
+ pip install -r demo/requirements.txt
88
+ python demo/app.py
89
+ ```
90
+
91
+ ## Training
92
+
93
+ Create symbolic link to the root of the dataset. The codebase uses filelist with the relative path from the dataset. Below are the example commands for LibriTTS dataset:
94
+
95
+ ```shell
96
+ cd filelists/LibriTTS && \
97
+ ln -s /path/to/your/LibriTTS/train-clean-100 train-clean-100 && \
98
+ ln -s /path/to/your/LibriTTS/train-clean-360 train-clean-360 && \
99
+ ln -s /path/to/your/LibriTTS/train-other-500 train-other-500 && \
100
+ ln -s /path/to/your/LibriTTS/dev-clean dev-clean && \
101
+ ln -s /path/to/your/LibriTTS/dev-other dev-other && \
102
+ ln -s /path/to/your/LibriTTS/test-clean test-clean && \
103
+ ln -s /path/to/your/LibriTTS/test-other test-other && \
104
+ cd ../..
105
+ ```
106
+
107
+ Train BigVGAN model. Below is an example command for training BigVGAN-v2 using LibriTTS dataset at 24kHz with a full 100-band mel spectrogram as input:
108
+
109
+ ```shell
110
+ python train.py \
111
+ --config configs/bigvgan_v2_24khz_100band_256x.json \
112
+ --input_wavs_dir filelists/LibriTTS \
113
+ --input_training_file filelists/LibriTTS/train-full.txt \
114
+ --input_validation_file filelists/LibriTTS/val-full.txt \
115
+ --list_input_unseen_wavs_dir filelists/LibriTTS filelists/LibriTTS \
116
+ --list_input_unseen_validation_file filelists/LibriTTS/dev-clean.txt filelists/LibriTTS/dev-other.txt \
117
+ --checkpoint_path exp/bigvgan_v2_24khz_100band_256x
118
+ ```
119
+
120
+ ## Synthesis
121
+
122
+ Synthesize from BigVGAN model. Below is an example command for generating audio from the model.
123
+ It computes mel spectrograms using wav files from `--input_wavs_dir` and saves the generated audio to `--output_dir`.
124
+
125
+ ```shell
126
+ python inference.py \
127
+ --checkpoint_file /path/to/your/bigvgan_v2_24khz_100band_256x/bigvgan_generator.pt \
128
+ --input_wavs_dir /path/to/your/input_wav \
129
+ --output_dir /path/to/your/output_wav
130
+ ```
131
+
132
+ `inference_e2e.py` supports synthesis directly from the mel spectrogram saved in `.npy` format, with shapes `[1, channel, frame]` or `[channel, frame]`.
133
+ It loads mel spectrograms from `--input_mels_dir` and saves the generated audio to `--output_dir`.
134
+
135
+ Make sure that the STFT hyperparameters for mel spectrogram are the same as the model, which are defined in `config.json` of the corresponding model.
136
+
137
+ ```shell
138
+ python inference_e2e.py \
139
+ --checkpoint_file /path/to/your/bigvgan_v2_24khz_100band_256x/bigvgan_generator.pt \
140
+ --input_mels_dir /path/to/your/input_mel \
141
+ --output_dir /path/to/your/output_wav
142
+ ```
143
+
144
+ ## Using Custom CUDA Kernel for Synthesis
145
+
146
+ You can apply the fast CUDA inference kernel by using a parameter `use_cuda_kernel` when instantiating BigVGAN:
147
+
148
+ ```python
149
+ generator = BigVGAN(h, use_cuda_kernel=True)
150
+ ```
151
+
152
+ You can also pass `--use_cuda_kernel` to `inference.py` and `inference_e2e.py` to enable this feature.
153
+
154
+ When applied for the first time, it builds the kernel using `nvcc` and `ninja`. If the build succeeds, the kernel is saved to `alias_free_activation/cuda/build` and the model automatically loads the kernel. The codebase has been tested using CUDA `12.1`.
155
+
156
+ Please make sure that both are installed in your system and `nvcc` installed in your system matches the version your PyTorch build is using.
157
+
158
+ We recommend running `test_cuda_vs_torch_model.py` first to build and check the correctness of the CUDA kernel. See below example command and its output, where it returns `[Success] test CUDA fused vs. plain torch BigVGAN inference`:
159
+
160
+ ```python
161
+ python tests/test_cuda_vs_torch_model.py \
162
+ --checkpoint_file /path/to/your/bigvgan_generator.pt
163
+ ```
164
+
165
+ ```shell
166
+ loading plain Pytorch BigVGAN
167
+ ...
168
+ loading CUDA kernel BigVGAN with auto-build
169
+ Detected CUDA files, patching ldflags
170
+ Emitting ninja build file /path/to/your/BigVGAN/alias_free_activation/cuda/build/build.ninja..
171
+ Building extension module anti_alias_activation_cuda...
172
+ ...
173
+ Loading extension module anti_alias_activation_cuda...
174
+ ...
175
+ Loading '/path/to/your/bigvgan_generator.pt'
176
+ ...
177
+ [Success] test CUDA fused vs. plain torch BigVGAN inference
178
+ > mean_difference=0.0007238413265440613
179
+ ...
180
+ ```
181
+
182
+ If you see `[Fail] test CUDA fused vs. plain torch BigVGAN inference`, it means that the CUDA kernel inference is incorrect. Please check if `nvcc` installed in your system is compatible with your PyTorch version.
183
+
184
+ ## Pretrained Models
185
+
186
+ We provide the [pretrained models on Hugging Face Collections](https://huggingface.co/collections/nvidia/bigvgan-66959df3d97fd7d98d97dc9a).
187
+ One can download the checkpoints of the generator weight (named `bigvgan_generator.pt`) and its discriminator/optimizer states (named `bigvgan_discriminator_optimizer.pt`) within the listed model repositories.
188
+
189
+ | Model Name | Sampling Rate | Mel band | fmax | Upsampling Ratio | Params | Dataset | Steps | Fine-Tuned |
190
+ |:--------------------------------------------------------------------------------------------------------:|:-------------:|:--------:|:-----:|:----------------:|:------:|:--------------------------:|:-----:|:----------:|
191
+ | [bigvgan_v2_44khz_128band_512x](https://huggingface.co/nvidia/bigvgan_v2_44khz_128band_512x) | 44 kHz | 128 | 22050 | 512 | 122M | Large-scale Compilation | 5M | No |
192
+ | [bigvgan_v2_44khz_128band_256x](https://huggingface.co/nvidia/bigvgan_v2_44khz_128band_256x) | 44 kHz | 128 | 22050 | 256 | 112M | Large-scale Compilation | 5M | No |
193
+ | [bigvgan_v2_24khz_100band_256x](https://huggingface.co/nvidia/bigvgan_v2_24khz_100band_256x) | 24 kHz | 100 | 12000 | 256 | 112M | Large-scale Compilation | 5M | No |
194
+ | [bigvgan_v2_22khz_80band_256x](https://huggingface.co/nvidia/bigvgan_v2_22khz_80band_256x) | 22 kHz | 80 | 11025 | 256 | 112M | Large-scale Compilation | 5M | No |
195
+ | [bigvgan_v2_22khz_80band_fmax8k_256x](https://huggingface.co/nvidia/bigvgan_v2_22khz_80band_fmax8k_256x) | 22 kHz | 80 | 8000 | 256 | 112M | Large-scale Compilation | 5M | No |
196
+ | [bigvgan_24khz_100band](https://huggingface.co/nvidia/bigvgan_24khz_100band) | 24 kHz | 100 | 12000 | 256 | 112M | LibriTTS | 5M | No |
197
+ | [bigvgan_base_24khz_100band](https://huggingface.co/nvidia/bigvgan_base_24khz_100band) | 24 kHz | 100 | 12000 | 256 | 14M | LibriTTS | 5M | No |
198
+ | [bigvgan_22khz_80band](https://huggingface.co/nvidia/bigvgan_22khz_80band) | 22 kHz | 80 | 8000 | 256 | 112M | LibriTTS + VCTK + LJSpeech | 5M | No |
199
+ | [bigvgan_base_22khz_80band](https://huggingface.co/nvidia/bigvgan_base_22khz_80band) | 22 kHz | 80 | 8000 | 256 | 14M | LibriTTS + VCTK + LJSpeech | 5M | No |
200
+
201
+ The paper results are based on the original 24kHz BigVGAN models (`bigvgan_24khz_100band` and `bigvgan_base_24khz_100band`) trained on LibriTTS dataset.
202
+ We also provide 22kHz BigVGAN models with band-limited setup (i.e., fmax=8000) for TTS applications.
203
+ Note that the checkpoints use `snakebeta` activation with log scale parameterization, which have the best overall quality.
204
+
205
+ You can fine-tune the models by:
206
+
207
+ 1. downloading the checkpoints (both the generator weight and its discriminator/optimizer states)
208
+ 2. resuming training using your audio dataset by specifying `--checkpoint_path` that includes the checkpoints when launching `train.py`
209
+
210
+ ## Training Details of BigVGAN-v2
211
+
212
+ Comapred to the original BigVGAN, the pretrained checkpoints of BigVGAN-v2 used `batch_size=32` with a longer `segment_size=65536` and are trained using 8 A100 GPUs.
213
+
214
+ Note that the BigVGAN-v2 `json` config files in `./configs` use `batch_size=4` as default to fit in a single A100 GPU for training. You can fine-tune the models adjusting `batch_size` depending on your GPUs.
215
+
216
+ When training BigVGAN-v2 from scratch with small batch size, it can potentially encounter the early divergence problem mentioned in the paper. In such case, we recommend lowering the `clip_grad_norm` value (e.g. `100`) for the early training iterations (e.g. 20000 steps) and increase the value to the default `500`.
217
+
218
+ ## Evaluation Results of BigVGAN-v2
219
+
220
+ Below are the objective results of the 24kHz model (`bigvgan_v2_24khz_100band_256x`) obtained from the LibriTTS `dev` sets. BigVGAN-v2 shows noticeable improvements of the metrics. The model also exhibits reduced perceptual artifacts, especially for non-speech audio.
221
+
222
+ | Model | Dataset | Steps | PESQ(↑) | M-STFT(↓) | MCD(↓) | Periodicity(↓) | V/UV F1(↑) |
223
+ |:----------:|:-----------------------:|:-----:|:---------:|:----------:|:----------:|:--------------:|:----------:|
224
+ | BigVGAN | LibriTTS | 1M | 4.027 | 0.7997 | 0.3745 | 0.1018 | 0.9598 |
225
+ | BigVGAN | LibriTTS | 5M | 4.256 | 0.7409 | 0.2988 | 0.0809 | 0.9698 |
226
+ | BigVGAN-v2 | Large-scale Compilation | 3M | 4.359 | 0.7134 | 0.3060 | 0.0621 | 0.9777 |
227
+ | BigVGAN-v2 | Large-scale Compilation | 5M | **4.362** | **0.7026** | **0.2903** | **0.0593** | **0.9793** |
228
+
229
+ ## Speed Benchmark
230
+
231
+ Below are the speed and VRAM usage benchmark results of BigVGAN from `tests/test_cuda_vs_torch_model.py`, using `bigvgan_v2_24khz_100band_256x` as a reference model.
232
+
233
+ | GPU | num_mel_frame | use_cuda_kernel | Speed (kHz) | Real-time Factor | VRAM (GB) |
234
+ |:--------------------------:|:-------------:|:---------------:|:-----------:|:----------------:|:---------:|
235
+ | NVIDIA A100 | 256 | False | 1672.1 | 69.7x | 1.3 |
236
+ | | | True | 3916.5 | 163.2x | 1.3 |
237
+ | | 2048 | False | 1899.6 | 79.2x | 1.7 |
238
+ | | | True | 5330.1 | 222.1x | 1.7 |
239
+ | | 16384 | False | 1973.8 | 82.2x | 5.0 |
240
+ | | | True | 5761.7 | 240.1x | 4.4 |
241
+ | NVIDIA GeForce RTX 3080 | 256 | False | 841.1 | 35.0x | 1.3 |
242
+ | | | True | 1598.1 | 66.6x | 1.3 |
243
+ | | 2048 | False | 929.9 | 38.7x | 1.7 |
244
+ | | | True | 1971.3 | 82.1x | 1.6 |
245
+ | | 16384 | False | 943.4 | 39.3x | 5.0 |
246
+ | | | True | 2026.5 | 84.4x | 3.9 |
247
+ | NVIDIA GeForce RTX 2080 Ti | 256 | False | 515.6 | 21.5x | 1.3 |
248
+ | | | True | 811.3 | 33.8x | 1.3 |
249
+ | | 2048 | False | 576.5 | 24.0x | 1.7 |
250
+ | | | True | 1023.0 | 42.6x | 1.5 |
251
+ | | 16384 | False | 589.4 | 24.6x | 5.0 |
252
+ | | | True | 1068.1 | 44.5x | 3.2 |
253
+
254
+ ## Acknowledgements
255
+
256
+ We thank Vijay Anand Korthikanti and Kevin J. Shih for their generous support in implementing the CUDA kernel for inference.
257
+
258
+ ## References
259
+
260
+ - [HiFi-GAN](https://github.com/jik876/hifi-gan) (for generator and multi-period discriminator)
261
+ - [Snake](https://github.com/EdwardDixon/snake) (for periodic activation)
262
+ - [Alias-free-torch](https://github.com/junjun3518/alias-free-torch) (for anti-aliasing)
263
+ - [Julius](https://github.com/adefossez/julius) (for low-pass filter)
264
+ - [UnivNet](https://github.com/mindslab-ai/univnet) (for multi-resolution discriminator)
265
+ - [descript-audio-codec](https://github.com/descriptinc/descript-audio-codec) and [vocos](https://github.com/gemelo-ai/vocos) (for multi-band multi-scale STFT discriminator and multi-scale mel spectrogram loss)
266
+ - [Amphion](https://github.com/open-mmlab/Amphion) (for multi-scale sub-band CQT discriminator)
GPT_SoVITS/BigVGAN/activations.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Implementation adapted from https://github.com/EdwardDixon/snake under the MIT license.
2
+ # LICENSE is in incl_licenses directory.
3
+
4
+ import torch
5
+ from torch import nn, sin, pow
6
+ from torch.nn import Parameter
7
+
8
+
9
+ class Snake(nn.Module):
10
+ """
11
+ Implementation of a sine-based periodic activation function
12
+ Shape:
13
+ - Input: (B, C, T)
14
+ - Output: (B, C, T), same shape as the input
15
+ Parameters:
16
+ - alpha - trainable parameter
17
+ References:
18
+ - This activation function is from this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda:
19
+ https://arxiv.org/abs/2006.08195
20
+ Examples:
21
+ >>> a1 = snake(256)
22
+ >>> x = torch.randn(256)
23
+ >>> x = a1(x)
24
+ """
25
+
26
+ def __init__(self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False):
27
+ """
28
+ Initialization.
29
+ INPUT:
30
+ - in_features: shape of the input
31
+ - alpha: trainable parameter
32
+ alpha is initialized to 1 by default, higher values = higher-frequency.
33
+ alpha will be trained along with the rest of your model.
34
+ """
35
+ super(Snake, self).__init__()
36
+ self.in_features = in_features
37
+
38
+ # Initialize alpha
39
+ self.alpha_logscale = alpha_logscale
40
+ if self.alpha_logscale: # Log scale alphas initialized to zeros
41
+ self.alpha = Parameter(torch.zeros(in_features) * alpha)
42
+ else: # Linear scale alphas initialized to ones
43
+ self.alpha = Parameter(torch.ones(in_features) * alpha)
44
+
45
+ self.alpha.requires_grad = alpha_trainable
46
+
47
+ self.no_div_by_zero = 0.000000001
48
+
49
+ def forward(self, x):
50
+ """
51
+ Forward pass of the function.
52
+ Applies the function to the input elementwise.
53
+ Snake ∶= x + 1/a * sin^2 (xa)
54
+ """
55
+ alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # Line up with x to [B, C, T]
56
+ if self.alpha_logscale:
57
+ alpha = torch.exp(alpha)
58
+ x = x + (1.0 / (alpha + self.no_div_by_zero)) * pow(sin(x * alpha), 2)
59
+
60
+ return x
61
+
62
+
63
+ class SnakeBeta(nn.Module):
64
+ """
65
+ A modified Snake function which uses separate parameters for the magnitude of the periodic components
66
+ Shape:
67
+ - Input: (B, C, T)
68
+ - Output: (B, C, T), same shape as the input
69
+ Parameters:
70
+ - alpha - trainable parameter that controls frequency
71
+ - beta - trainable parameter that controls magnitude
72
+ References:
73
+ - This activation function is a modified version based on this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda:
74
+ https://arxiv.org/abs/2006.08195
75
+ Examples:
76
+ >>> a1 = snakebeta(256)
77
+ >>> x = torch.randn(256)
78
+ >>> x = a1(x)
79
+ """
80
+
81
+ def __init__(self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False):
82
+ """
83
+ Initialization.
84
+ INPUT:
85
+ - in_features: shape of the input
86
+ - alpha - trainable parameter that controls frequency
87
+ - beta - trainable parameter that controls magnitude
88
+ alpha is initialized to 1 by default, higher values = higher-frequency.
89
+ beta is initialized to 1 by default, higher values = higher-magnitude.
90
+ alpha will be trained along with the rest of your model.
91
+ """
92
+ super(SnakeBeta, self).__init__()
93
+ self.in_features = in_features
94
+
95
+ # Initialize alpha
96
+ self.alpha_logscale = alpha_logscale
97
+ if self.alpha_logscale: # Log scale alphas initialized to zeros
98
+ self.alpha = Parameter(torch.zeros(in_features) * alpha)
99
+ self.beta = Parameter(torch.zeros(in_features) * alpha)
100
+ else: # Linear scale alphas initialized to ones
101
+ self.alpha = Parameter(torch.ones(in_features) * alpha)
102
+ self.beta = Parameter(torch.ones(in_features) * alpha)
103
+
104
+ self.alpha.requires_grad = alpha_trainable
105
+ self.beta.requires_grad = alpha_trainable
106
+
107
+ self.no_div_by_zero = 0.000000001
108
+
109
+ def forward(self, x):
110
+ """
111
+ Forward pass of the function.
112
+ Applies the function to the input elementwise.
113
+ SnakeBeta ∶= x + 1/b * sin^2 (xa)
114
+ """
115
+ alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # Line up with x to [B, C, T]
116
+ beta = self.beta.unsqueeze(0).unsqueeze(-1)
117
+ if self.alpha_logscale:
118
+ alpha = torch.exp(alpha)
119
+ beta = torch.exp(beta)
120
+ x = x + (1.0 / (beta + self.no_div_by_zero)) * pow(sin(x * alpha), 2)
121
+
122
+ return x
GPT_SoVITS/BigVGAN/alias_free_activation/cuda/__init__.py ADDED
File without changes
GPT_SoVITS/BigVGAN/alias_free_activation/cuda/activation1d.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 NVIDIA CORPORATION.
2
+ # Licensed under the MIT license.
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ from alias_free_activation.torch.resample import UpSample1d, DownSample1d
7
+
8
+ # load fused CUDA kernel: this enables importing anti_alias_activation_cuda
9
+ from alias_free_activation.cuda import load
10
+
11
+ anti_alias_activation_cuda = load.load()
12
+
13
+
14
+ class FusedAntiAliasActivation(torch.autograd.Function):
15
+ """
16
+ Assumes filter size 12, replication padding on upsampling/downsampling, and logscale alpha/beta parameters as inputs.
17
+ The hyperparameters are hard-coded in the kernel to maximize speed.
18
+ NOTE: The fused kenrel is incorrect for Activation1d with different hyperparameters.
19
+ """
20
+
21
+ @staticmethod
22
+ def forward(ctx, inputs, up_ftr, down_ftr, alpha, beta):
23
+ activation_results = anti_alias_activation_cuda.forward(inputs, up_ftr, down_ftr, alpha, beta)
24
+
25
+ return activation_results
26
+
27
+ @staticmethod
28
+ def backward(ctx, output_grads):
29
+ raise NotImplementedError
30
+ return output_grads, None, None
31
+
32
+
33
+ class Activation1d(nn.Module):
34
+ def __init__(
35
+ self,
36
+ activation,
37
+ up_ratio: int = 2,
38
+ down_ratio: int = 2,
39
+ up_kernel_size: int = 12,
40
+ down_kernel_size: int = 12,
41
+ fused: bool = True,
42
+ ):
43
+ super().__init__()
44
+ self.up_ratio = up_ratio
45
+ self.down_ratio = down_ratio
46
+ self.act = activation
47
+ self.upsample = UpSample1d(up_ratio, up_kernel_size)
48
+ self.downsample = DownSample1d(down_ratio, down_kernel_size)
49
+
50
+ self.fused = fused # Whether to use fused CUDA kernel or not
51
+
52
+ def forward(self, x):
53
+ if not self.fused:
54
+ x = self.upsample(x)
55
+ x = self.act(x)
56
+ x = self.downsample(x)
57
+ return x
58
+ else:
59
+ if self.act.__class__.__name__ == "Snake":
60
+ beta = self.act.alpha.data # Snake uses same params for alpha and beta
61
+ else:
62
+ beta = self.act.beta.data # Snakebeta uses different params for alpha and beta
63
+ alpha = self.act.alpha.data
64
+ if not self.act.alpha_logscale: # Exp baked into cuda kernel, cancel it out with a log
65
+ alpha = torch.log(alpha)
66
+ beta = torch.log(beta)
67
+
68
+ x = FusedAntiAliasActivation.apply(x, self.upsample.filter, self.downsample.lowpass.filter, alpha, beta)
69
+ return x
GPT_SoVITS/BigVGAN/alias_free_activation/cuda/anti_alias_activation.cpp ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* coding=utf-8
2
+ * Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #include <torch/extension.h>
18
+
19
+ extern "C" torch::Tensor fwd_cuda(torch::Tensor const &input, torch::Tensor const &up_filter, torch::Tensor const &down_filter, torch::Tensor const &alpha, torch::Tensor const &beta);
20
+
21
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
22
+ m.def("forward", &fwd_cuda, "Anti-Alias Activation forward (CUDA)");
23
+ }
GPT_SoVITS/BigVGAN/alias_free_activation/cuda/anti_alias_activation_cuda.cu ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* coding=utf-8
2
+ * Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #include <ATen/ATen.h>
18
+ #include <cuda.h>
19
+ #include <cuda_runtime.h>
20
+ #include <cuda_fp16.h>
21
+ #include <cuda_profiler_api.h>
22
+ #include <ATen/cuda/CUDAContext.h>
23
+ #include <torch/extension.h>
24
+ #include "type_shim.h"
25
+ #include <assert.h>
26
+ #include <cfloat>
27
+ #include <limits>
28
+ #include <stdint.h>
29
+ #include <c10/macros/Macros.h>
30
+
31
+ namespace
32
+ {
33
+ // Hard-coded hyperparameters
34
+ // WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and
35
+ constexpr int ELEMENTS_PER_LDG_STG = 1; //(WARP_ITERATIONS < 4) ? 1 : 4;
36
+ constexpr int BUFFER_SIZE = 32;
37
+ constexpr int FILTER_SIZE = 12;
38
+ constexpr int HALF_FILTER_SIZE = 6;
39
+ constexpr int UPSAMPLE_REPLICATION_PAD = 5; // 5 on each side, matching torch impl
40
+ constexpr int DOWNSAMPLE_REPLICATION_PAD_LEFT = 5; // matching torch impl
41
+ constexpr int DOWNSAMPLE_REPLICATION_PAD_RIGHT = 6; // matching torch impl
42
+
43
+ template <typename input_t, typename output_t, typename acc_t>
44
+ __global__ void anti_alias_activation_forward(
45
+ output_t *dst,
46
+ const input_t *src,
47
+ const input_t *up_ftr,
48
+ const input_t *down_ftr,
49
+ const input_t *alpha,
50
+ const input_t *beta,
51
+ int batch_size,
52
+ int channels,
53
+ int seq_len)
54
+ {
55
+ // Up and downsample filters
56
+ input_t up_filter[FILTER_SIZE];
57
+ input_t down_filter[FILTER_SIZE];
58
+
59
+ // Load data from global memory including extra indices reserved for replication paddings
60
+ input_t elements[2 * FILTER_SIZE + 2 * BUFFER_SIZE + 2 * UPSAMPLE_REPLICATION_PAD] = {0};
61
+ input_t intermediates[2 * FILTER_SIZE + 2 * BUFFER_SIZE + DOWNSAMPLE_REPLICATION_PAD_LEFT + DOWNSAMPLE_REPLICATION_PAD_RIGHT] = {0};
62
+
63
+ // Output stores downsampled output before writing to dst
64
+ output_t output[BUFFER_SIZE];
65
+
66
+ // blockDim/threadIdx = (128, 1, 1)
67
+ // gridDim/blockIdx = (seq_blocks, channels, batches)
68
+ int block_offset = (blockIdx.x * 128 * BUFFER_SIZE + seq_len * (blockIdx.y + gridDim.y * blockIdx.z));
69
+ int local_offset = threadIdx.x * BUFFER_SIZE;
70
+ int seq_offset = blockIdx.x * 128 * BUFFER_SIZE + local_offset;
71
+
72
+ // intermediate have double the seq_len
73
+ int intermediate_local_offset = threadIdx.x * BUFFER_SIZE * 2;
74
+ int intermediate_seq_offset = blockIdx.x * 128 * BUFFER_SIZE * 2 + intermediate_local_offset;
75
+
76
+ // Get values needed for replication padding before moving pointer
77
+ const input_t *right_most_pntr = src + (seq_len * (blockIdx.y + gridDim.y * blockIdx.z));
78
+ input_t seq_left_most_value = right_most_pntr[0];
79
+ input_t seq_right_most_value = right_most_pntr[seq_len - 1];
80
+
81
+ // Move src and dst pointers
82
+ src += block_offset + local_offset;
83
+ dst += block_offset + local_offset;
84
+
85
+ // Alpha and beta values for snake activatons. Applies exp by default
86
+ alpha = alpha + blockIdx.y;
87
+ input_t alpha_val = expf(alpha[0]);
88
+ beta = beta + blockIdx.y;
89
+ input_t beta_val = expf(beta[0]);
90
+
91
+ #pragma unroll
92
+ for (int it = 0; it < FILTER_SIZE; it += 1)
93
+ {
94
+ up_filter[it] = up_ftr[it];
95
+ down_filter[it] = down_ftr[it];
96
+ }
97
+
98
+ // Apply replication padding for upsampling, matching torch impl
99
+ #pragma unroll
100
+ for (int it = -HALF_FILTER_SIZE; it < BUFFER_SIZE + HALF_FILTER_SIZE; it += 1)
101
+ {
102
+ int element_index = seq_offset + it; // index for element
103
+ if ((element_index < 0) && (element_index >= -UPSAMPLE_REPLICATION_PAD))
104
+ {
105
+ elements[2 * (HALF_FILTER_SIZE + it)] = 2 * seq_left_most_value;
106
+ }
107
+ if ((element_index >= seq_len) && (element_index < seq_len + UPSAMPLE_REPLICATION_PAD))
108
+ {
109
+ elements[2 * (HALF_FILTER_SIZE + it)] = 2 * seq_right_most_value;
110
+ }
111
+ if ((element_index >= 0) && (element_index < seq_len))
112
+ {
113
+ elements[2 * (HALF_FILTER_SIZE + it)] = 2 * src[it];
114
+ }
115
+ }
116
+
117
+ // Apply upsampling strided convolution and write to intermediates. It reserves DOWNSAMPLE_REPLICATION_PAD_LEFT for replication padding of the downsampilng conv later
118
+ #pragma unroll
119
+ for (int it = 0; it < (2 * BUFFER_SIZE + 2 * FILTER_SIZE); it += 1)
120
+ {
121
+ input_t acc = 0.0;
122
+ int element_index = intermediate_seq_offset + it; // index for intermediate
123
+ #pragma unroll
124
+ for (int f_idx = 0; f_idx < FILTER_SIZE; f_idx += 1)
125
+ {
126
+ if ((element_index + f_idx) >= 0)
127
+ {
128
+ acc += up_filter[f_idx] * elements[it + f_idx];
129
+ }
130
+ }
131
+ intermediates[it + DOWNSAMPLE_REPLICATION_PAD_LEFT] = acc;
132
+ }
133
+
134
+ // Apply activation function. It reserves DOWNSAMPLE_REPLICATION_PAD_LEFT and DOWNSAMPLE_REPLICATION_PAD_RIGHT for replication padding of the downsampilng conv later
135
+ double no_div_by_zero = 0.000000001;
136
+ #pragma unroll
137
+ for (int it = 0; it < 2 * BUFFER_SIZE + 2 * FILTER_SIZE; it += 1)
138
+ {
139
+ intermediates[it + DOWNSAMPLE_REPLICATION_PAD_LEFT] += (1.0 / (beta_val + no_div_by_zero)) * sinf(intermediates[it + DOWNSAMPLE_REPLICATION_PAD_LEFT] * alpha_val) * sinf(intermediates[it + DOWNSAMPLE_REPLICATION_PAD_LEFT] * alpha_val);
140
+ }
141
+
142
+ // Apply replication padding before downsampling conv from intermediates
143
+ #pragma unroll
144
+ for (int it = 0; it < DOWNSAMPLE_REPLICATION_PAD_LEFT; it += 1)
145
+ {
146
+ intermediates[it] = intermediates[DOWNSAMPLE_REPLICATION_PAD_LEFT];
147
+ }
148
+ #pragma unroll
149
+ for (int it = DOWNSAMPLE_REPLICATION_PAD_LEFT + 2 * BUFFER_SIZE + 2 * FILTER_SIZE; it < DOWNSAMPLE_REPLICATION_PAD_LEFT + 2 * BUFFER_SIZE + 2 * FILTER_SIZE + DOWNSAMPLE_REPLICATION_PAD_RIGHT; it += 1)
150
+ {
151
+ intermediates[it] = intermediates[DOWNSAMPLE_REPLICATION_PAD_LEFT + 2 * BUFFER_SIZE + 2 * FILTER_SIZE - 1];
152
+ }
153
+
154
+ // Apply downsample strided convolution (assuming stride=2) from intermediates
155
+ #pragma unroll
156
+ for (int it = 0; it < BUFFER_SIZE; it += 1)
157
+ {
158
+ input_t acc = 0.0;
159
+ #pragma unroll
160
+ for (int f_idx = 0; f_idx < FILTER_SIZE; f_idx += 1)
161
+ {
162
+ // Add constant DOWNSAMPLE_REPLICATION_PAD_RIGHT to match torch implementation
163
+ acc += down_filter[f_idx] * intermediates[it * 2 + f_idx + DOWNSAMPLE_REPLICATION_PAD_RIGHT];
164
+ }
165
+ output[it] = acc;
166
+ }
167
+
168
+ // Write output to dst
169
+ #pragma unroll
170
+ for (int it = 0; it < BUFFER_SIZE; it += ELEMENTS_PER_LDG_STG)
171
+ {
172
+ int element_index = seq_offset + it;
173
+ if (element_index < seq_len)
174
+ {
175
+ dst[it] = output[it];
176
+ }
177
+ }
178
+
179
+ }
180
+
181
+ template <typename input_t, typename output_t, typename acc_t>
182
+ void dispatch_anti_alias_activation_forward(
183
+ output_t *dst,
184
+ const input_t *src,
185
+ const input_t *up_ftr,
186
+ const input_t *down_ftr,
187
+ const input_t *alpha,
188
+ const input_t *beta,
189
+ int batch_size,
190
+ int channels,
191
+ int seq_len)
192
+ {
193
+ if (seq_len == 0)
194
+ {
195
+ return;
196
+ }
197
+ else
198
+ {
199
+ // Use 128 threads per block to maximimize gpu utilization
200
+ constexpr int threads_per_block = 128;
201
+ constexpr int seq_len_per_block = 4096;
202
+ int blocks_per_seq_len = (seq_len + seq_len_per_block - 1) / seq_len_per_block;
203
+ dim3 blocks(blocks_per_seq_len, channels, batch_size);
204
+ dim3 threads(threads_per_block, 1, 1);
205
+
206
+ anti_alias_activation_forward<input_t, output_t, acc_t>
207
+ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, up_ftr, down_ftr, alpha, beta, batch_size, channels, seq_len);
208
+ }
209
+ }
210
+ }
211
+
212
+ extern "C" torch::Tensor fwd_cuda(torch::Tensor const &input, torch::Tensor const &up_filter, torch::Tensor const &down_filter, torch::Tensor const &alpha, torch::Tensor const &beta)
213
+ {
214
+ // Input is a 3d tensor with dimensions [batches, channels, seq_len]
215
+ const int batches = input.size(0);
216
+ const int channels = input.size(1);
217
+ const int seq_len = input.size(2);
218
+
219
+ // Output
220
+ auto act_options = input.options().requires_grad(false);
221
+
222
+ torch::Tensor anti_alias_activation_results =
223
+ torch::empty({batches, channels, seq_len}, act_options);
224
+
225
+ void *input_ptr = static_cast<void *>(input.data_ptr());
226
+ void *up_filter_ptr = static_cast<void *>(up_filter.data_ptr());
227
+ void *down_filter_ptr = static_cast<void *>(down_filter.data_ptr());
228
+ void *alpha_ptr = static_cast<void *>(alpha.data_ptr());
229
+ void *beta_ptr = static_cast<void *>(beta.data_ptr());
230
+ void *anti_alias_activation_results_ptr = static_cast<void *>(anti_alias_activation_results.data_ptr());
231
+
232
+ DISPATCH_FLOAT_HALF_AND_BFLOAT(
233
+ input.scalar_type(),
234
+ "dispatch anti alias activation_forward",
235
+ dispatch_anti_alias_activation_forward<scalar_t, scalar_t, float>(
236
+ reinterpret_cast<scalar_t *>(anti_alias_activation_results_ptr),
237
+ reinterpret_cast<const scalar_t *>(input_ptr),
238
+ reinterpret_cast<const scalar_t *>(up_filter_ptr),
239
+ reinterpret_cast<const scalar_t *>(down_filter_ptr),
240
+ reinterpret_cast<const scalar_t *>(alpha_ptr),
241
+ reinterpret_cast<const scalar_t *>(beta_ptr),
242
+ batches,
243
+ channels,
244
+ seq_len););
245
+ return anti_alias_activation_results;
246
+ }
GPT_SoVITS/BigVGAN/alias_free_activation/cuda/compat.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* coding=utf-8
2
+ * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ /*This code is copied fron NVIDIA apex:
18
+ * https://github.com/NVIDIA/apex
19
+ * with minor changes. */
20
+
21
+ #ifndef TORCH_CHECK
22
+ #define TORCH_CHECK AT_CHECK
23
+ #endif
24
+
25
+ #ifdef VERSION_GE_1_3
26
+ #define DATA_PTR data_ptr
27
+ #else
28
+ #define DATA_PTR data
29
+ #endif
GPT_SoVITS/BigVGAN/alias_free_activation/cuda/load.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 NVIDIA CORPORATION.
2
+ # Licensed under the MIT license.
3
+
4
+ import os
5
+ import pathlib
6
+ import subprocess
7
+
8
+ from torch.utils import cpp_extension
9
+
10
+ """
11
+ Setting this param to a list has a problem of generating different compilation commands (with diferent order of architectures) and leading to recompilation of fused kernels.
12
+ Set it to empty stringo avoid recompilation and assign arch flags explicity in extra_cuda_cflags below
13
+ """
14
+ os.environ["TORCH_CUDA_ARCH_LIST"] = ""
15
+
16
+
17
+ def load():
18
+ # Check if cuda 11 is installed for compute capability 8.0
19
+ cc_flag = []
20
+ _, bare_metal_major, _ = _get_cuda_bare_metal_version(cpp_extension.CUDA_HOME)
21
+ if int(bare_metal_major) >= 11:
22
+ cc_flag.append("-gencode")
23
+ cc_flag.append("arch=compute_80,code=sm_80")
24
+
25
+ # Build path
26
+ srcpath = pathlib.Path(__file__).parent.absolute()
27
+ buildpath = srcpath / "build"
28
+ _create_build_dir(buildpath)
29
+
30
+ # Helper function to build the kernels.
31
+ def _cpp_extention_load_helper(name, sources, extra_cuda_flags):
32
+ return cpp_extension.load(
33
+ name=name,
34
+ sources=sources,
35
+ build_directory=buildpath,
36
+ extra_cflags=[
37
+ "-O3",
38
+ ],
39
+ extra_cuda_cflags=[
40
+ "-O3",
41
+ "-gencode",
42
+ "arch=compute_70,code=sm_70",
43
+ "--use_fast_math",
44
+ ]
45
+ + extra_cuda_flags
46
+ + cc_flag,
47
+ verbose=True,
48
+ )
49
+
50
+ extra_cuda_flags = [
51
+ "-U__CUDA_NO_HALF_OPERATORS__",
52
+ "-U__CUDA_NO_HALF_CONVERSIONS__",
53
+ "--expt-relaxed-constexpr",
54
+ "--expt-extended-lambda",
55
+ ]
56
+
57
+ sources = [
58
+ srcpath / "anti_alias_activation.cpp",
59
+ srcpath / "anti_alias_activation_cuda.cu",
60
+ ]
61
+ anti_alias_activation_cuda = _cpp_extention_load_helper("anti_alias_activation_cuda", sources, extra_cuda_flags)
62
+
63
+ return anti_alias_activation_cuda
64
+
65
+
66
+ def _get_cuda_bare_metal_version(cuda_dir):
67
+ raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
68
+ output = raw_output.split()
69
+ release_idx = output.index("release") + 1
70
+ release = output[release_idx].split(".")
71
+ bare_metal_major = release[0]
72
+ bare_metal_minor = release[1][0]
73
+
74
+ return raw_output, bare_metal_major, bare_metal_minor
75
+
76
+
77
+ def _create_build_dir(buildpath):
78
+ try:
79
+ os.mkdir(buildpath)
80
+ except OSError:
81
+ if not os.path.isdir(buildpath):
82
+ print(f"Creation of the build directory {buildpath} failed")
GPT_SoVITS/BigVGAN/alias_free_activation/cuda/type_shim.h ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* coding=utf-8
2
+ * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #include <ATen/ATen.h>
18
+ #include "compat.h"
19
+
20
+ #define DISPATCH_FLOAT_HALF_AND_BFLOAT(TYPE, NAME, ...) \
21
+ switch (TYPE) \
22
+ { \
23
+ case at::ScalarType::Float: \
24
+ { \
25
+ using scalar_t = float; \
26
+ __VA_ARGS__; \
27
+ break; \
28
+ } \
29
+ case at::ScalarType::Half: \
30
+ { \
31
+ using scalar_t = at::Half; \
32
+ __VA_ARGS__; \
33
+ break; \
34
+ } \
35
+ case at::ScalarType::BFloat16: \
36
+ { \
37
+ using scalar_t = at::BFloat16; \
38
+ __VA_ARGS__; \
39
+ break; \
40
+ } \
41
+ default: \
42
+ AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \
43
+ }
44
+
45
+ #define DISPATCH_FLOAT_HALF_AND_BFLOAT_INOUT_TYPES(TYPEIN, TYPEOUT, NAME, ...) \
46
+ switch (TYPEIN) \
47
+ { \
48
+ case at::ScalarType::Float: \
49
+ { \
50
+ using scalar_t_in = float; \
51
+ switch (TYPEOUT) \
52
+ { \
53
+ case at::ScalarType::Float: \
54
+ { \
55
+ using scalar_t_out = float; \
56
+ __VA_ARGS__; \
57
+ break; \
58
+ } \
59
+ case at::ScalarType::Half: \
60
+ { \
61
+ using scalar_t_out = at::Half; \
62
+ __VA_ARGS__; \
63
+ break; \
64
+ } \
65
+ case at::ScalarType::BFloat16: \
66
+ { \
67
+ using scalar_t_out = at::BFloat16; \
68
+ __VA_ARGS__; \
69
+ break; \
70
+ } \
71
+ default: \
72
+ AT_ERROR(#NAME, " not implemented for '", toString(TYPEOUT), "'"); \
73
+ } \
74
+ break; \
75
+ } \
76
+ case at::ScalarType::Half: \
77
+ { \
78
+ using scalar_t_in = at::Half; \
79
+ using scalar_t_out = at::Half; \
80
+ __VA_ARGS__; \
81
+ break; \
82
+ } \
83
+ case at::ScalarType::BFloat16: \
84
+ { \
85
+ using scalar_t_in = at::BFloat16; \
86
+ using scalar_t_out = at::BFloat16; \
87
+ __VA_ARGS__; \
88
+ break; \
89
+ } \
90
+ default: \
91
+ AT_ERROR(#NAME, " not implemented for '", toString(TYPEIN), "'"); \
92
+ }
GPT_SoVITS/BigVGAN/alias_free_activation/torch/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0
2
+ # LICENSE is in incl_licenses directory.
3
+
4
+ from .filter import *
5
+ from .resample import *
6
+ from .act import *
GPT_SoVITS/BigVGAN/alias_free_activation/torch/act.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0
2
+ # LICENSE is in incl_licenses directory.
3
+
4
+ import torch.nn as nn
5
+ from .resample import UpSample1d, DownSample1d
6
+
7
+
8
+ class Activation1d(nn.Module):
9
+ def __init__(
10
+ self,
11
+ activation,
12
+ up_ratio: int = 2,
13
+ down_ratio: int = 2,
14
+ up_kernel_size: int = 12,
15
+ down_kernel_size: int = 12,
16
+ ):
17
+ super().__init__()
18
+ self.up_ratio = up_ratio
19
+ self.down_ratio = down_ratio
20
+ self.act = activation
21
+ self.upsample = UpSample1d(up_ratio, up_kernel_size)
22
+ self.downsample = DownSample1d(down_ratio, down_kernel_size)
23
+
24
+ # x: [B,C,T]
25
+ def forward(self, x):
26
+ x = self.upsample(x)
27
+ x = self.act(x)
28
+ x = self.downsample(x)
29
+
30
+ return x
GPT_SoVITS/BigVGAN/alias_free_activation/torch/filter.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0
2
+ # LICENSE is in incl_licenses directory.
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ import torch.nn.functional as F
7
+ import math
8
+
9
+ if "sinc" in dir(torch):
10
+ sinc = torch.sinc
11
+ else:
12
+ # This code is adopted from adefossez's julius.core.sinc under the MIT License
13
+ # https://adefossez.github.io/julius/julius/core.html
14
+ # LICENSE is in incl_licenses directory.
15
+ def sinc(x: torch.Tensor):
16
+ """
17
+ Implementation of sinc, i.e. sin(pi * x) / (pi * x)
18
+ __Warning__: Different to julius.sinc, the input is multiplied by `pi`!
19
+ """
20
+ return torch.where(
21
+ x == 0,
22
+ torch.tensor(1.0, device=x.device, dtype=x.dtype),
23
+ torch.sin(math.pi * x) / math.pi / x,
24
+ )
25
+
26
+
27
+ # This code is adopted from adefossez's julius.lowpass.LowPassFilters under the MIT License
28
+ # https://adefossez.github.io/julius/julius/lowpass.html
29
+ # LICENSE is in incl_licenses directory.
30
+ def kaiser_sinc_filter1d(cutoff, half_width, kernel_size): # return filter [1,1,kernel_size]
31
+ even = kernel_size % 2 == 0
32
+ half_size = kernel_size // 2
33
+
34
+ # For kaiser window
35
+ delta_f = 4 * half_width
36
+ A = 2.285 * (half_size - 1) * math.pi * delta_f + 7.95
37
+ if A > 50.0:
38
+ beta = 0.1102 * (A - 8.7)
39
+ elif A >= 21.0:
40
+ beta = 0.5842 * (A - 21) ** 0.4 + 0.07886 * (A - 21.0)
41
+ else:
42
+ beta = 0.0
43
+ window = torch.kaiser_window(kernel_size, beta=beta, periodic=False)
44
+
45
+ # ratio = 0.5/cutoff -> 2 * cutoff = 1 / ratio
46
+ if even:
47
+ time = torch.arange(-half_size, half_size) + 0.5
48
+ else:
49
+ time = torch.arange(kernel_size) - half_size
50
+ if cutoff == 0:
51
+ filter_ = torch.zeros_like(time)
52
+ else:
53
+ filter_ = 2 * cutoff * window * sinc(2 * cutoff * time)
54
+ """
55
+ Normalize filter to have sum = 1, otherwise we will have a small leakage of the constant component in the input signal.
56
+ """
57
+ filter_ /= filter_.sum()
58
+ filter = filter_.view(1, 1, kernel_size)
59
+
60
+ return filter
61
+
62
+
63
+ class LowPassFilter1d(nn.Module):
64
+ def __init__(
65
+ self,
66
+ cutoff=0.5,
67
+ half_width=0.6,
68
+ stride: int = 1,
69
+ padding: bool = True,
70
+ padding_mode: str = "replicate",
71
+ kernel_size: int = 12,
72
+ ):
73
+ """
74
+ kernel_size should be even number for stylegan3 setup, in this implementation, odd number is also possible.
75
+ """
76
+ super().__init__()
77
+ if cutoff < -0.0:
78
+ raise ValueError("Minimum cutoff must be larger than zero.")
79
+ if cutoff > 0.5:
80
+ raise ValueError("A cutoff above 0.5 does not make sense.")
81
+ self.kernel_size = kernel_size
82
+ self.even = kernel_size % 2 == 0
83
+ self.pad_left = kernel_size // 2 - int(self.even)
84
+ self.pad_right = kernel_size // 2
85
+ self.stride = stride
86
+ self.padding = padding
87
+ self.padding_mode = padding_mode
88
+ filter = kaiser_sinc_filter1d(cutoff, half_width, kernel_size)
89
+ self.register_buffer("filter", filter)
90
+
91
+ # Input [B, C, T]
92
+ def forward(self, x):
93
+ _, C, _ = x.shape
94
+
95
+ if self.padding:
96
+ x = F.pad(x, (self.pad_left, self.pad_right), mode=self.padding_mode)
97
+ out = F.conv1d(x, self.filter.expand(C, -1, -1), stride=self.stride, groups=C)
98
+
99
+ return out
GPT_SoVITS/BigVGAN/alias_free_activation/torch/resample.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0
2
+ # LICENSE is in incl_licenses directory.
3
+
4
+ import torch.nn as nn
5
+ from torch.nn import functional as F
6
+ from .filter import LowPassFilter1d
7
+ from .filter import kaiser_sinc_filter1d
8
+
9
+
10
+ class UpSample1d(nn.Module):
11
+ def __init__(self, ratio=2, kernel_size=None):
12
+ super().__init__()
13
+ self.ratio = ratio
14
+ self.kernel_size = int(6 * ratio // 2) * 2 if kernel_size is None else kernel_size
15
+ self.stride = ratio
16
+ self.pad = self.kernel_size // ratio - 1
17
+ self.pad_left = self.pad * self.stride + (self.kernel_size - self.stride) // 2
18
+ self.pad_right = self.pad * self.stride + (self.kernel_size - self.stride + 1) // 2
19
+ filter = kaiser_sinc_filter1d(cutoff=0.5 / ratio, half_width=0.6 / ratio, kernel_size=self.kernel_size)
20
+ self.register_buffer("filter", filter)
21
+
22
+ # x: [B, C, T]
23
+ def forward(self, x):
24
+ _, C, _ = x.shape
25
+
26
+ x = F.pad(x, (self.pad, self.pad), mode="replicate")
27
+ x = self.ratio * F.conv_transpose1d(x, self.filter.expand(C, -1, -1), stride=self.stride, groups=C)
28
+ x = x[..., self.pad_left : -self.pad_right]
29
+
30
+ return x
31
+
32
+
33
+ class DownSample1d(nn.Module):
34
+ def __init__(self, ratio=2, kernel_size=None):
35
+ super().__init__()
36
+ self.ratio = ratio
37
+ self.kernel_size = int(6 * ratio // 2) * 2 if kernel_size is None else kernel_size
38
+ self.lowpass = LowPassFilter1d(
39
+ cutoff=0.5 / ratio,
40
+ half_width=0.6 / ratio,
41
+ stride=ratio,
42
+ kernel_size=self.kernel_size,
43
+ )
44
+
45
+ def forward(self, x):
46
+ xx = self.lowpass(x)
47
+
48
+ return xx
GPT_SoVITS/BigVGAN/bigvgan.py ADDED
@@ -0,0 +1,461 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 NVIDIA CORPORATION.
2
+ # Licensed under the MIT license.
3
+
4
+ # Adapted from https://github.com/jik876/hifi-gan under the MIT license.
5
+ # LICENSE is in incl_licenses directory.
6
+
7
+ import os
8
+ import json
9
+ from pathlib import Path
10
+ from typing import Optional, Union, Dict
11
+
12
+ import torch
13
+ import torch.nn as nn
14
+ from torch.nn import Conv1d, ConvTranspose1d
15
+ from torch.nn.utils import weight_norm, remove_weight_norm
16
+
17
+ from . import activations
18
+ from .utils0 import init_weights, get_padding
19
+ from .alias_free_activation.torch.act import Activation1d as TorchActivation1d
20
+ from .env import AttrDict
21
+
22
+ from huggingface_hub import PyTorchModelHubMixin, hf_hub_download
23
+
24
+
25
+ def load_hparams_from_json(path) -> AttrDict:
26
+ with open(path) as f:
27
+ data = f.read()
28
+ return AttrDict(json.loads(data))
29
+
30
+
31
+ class AMPBlock1(torch.nn.Module):
32
+ """
33
+ AMPBlock applies Snake / SnakeBeta activation functions with trainable parameters that control periodicity, defined for each layer.
34
+ AMPBlock1 has additional self.convs2 that contains additional Conv1d layers with a fixed dilation=1 followed by each layer in self.convs1
35
+
36
+ Args:
37
+ h (AttrDict): Hyperparameters.
38
+ channels (int): Number of convolution channels.
39
+ kernel_size (int): Size of the convolution kernel. Default is 3.
40
+ dilation (tuple): Dilation rates for the convolutions. Each dilation layer has two convolutions. Default is (1, 3, 5).
41
+ activation (str): Activation function type. Should be either 'snake' or 'snakebeta'. Default is None.
42
+ """
43
+
44
+ def __init__(
45
+ self,
46
+ h: AttrDict,
47
+ channels: int,
48
+ kernel_size: int = 3,
49
+ dilation: tuple = (1, 3, 5),
50
+ activation: str = None,
51
+ ):
52
+ super().__init__()
53
+
54
+ self.h = h
55
+
56
+ self.convs1 = nn.ModuleList(
57
+ [
58
+ weight_norm(
59
+ Conv1d(
60
+ channels,
61
+ channels,
62
+ kernel_size,
63
+ stride=1,
64
+ dilation=d,
65
+ padding=get_padding(kernel_size, d),
66
+ )
67
+ )
68
+ for d in dilation
69
+ ]
70
+ )
71
+ self.convs1.apply(init_weights)
72
+
73
+ self.convs2 = nn.ModuleList(
74
+ [
75
+ weight_norm(
76
+ Conv1d(
77
+ channels,
78
+ channels,
79
+ kernel_size,
80
+ stride=1,
81
+ dilation=1,
82
+ padding=get_padding(kernel_size, 1),
83
+ )
84
+ )
85
+ for _ in range(len(dilation))
86
+ ]
87
+ )
88
+ self.convs2.apply(init_weights)
89
+
90
+ self.num_layers = len(self.convs1) + len(self.convs2) # Total number of conv layers
91
+
92
+ # Select which Activation1d, lazy-load cuda version to ensure backward compatibility
93
+ if self.h.get("use_cuda_kernel", False):
94
+ from .alias_free_activation.cuda.activation1d import (
95
+ Activation1d as CudaActivation1d,
96
+ )
97
+
98
+ Activation1d = CudaActivation1d
99
+ else:
100
+ Activation1d = TorchActivation1d
101
+
102
+ # Activation functions
103
+ if activation == "snake":
104
+ self.activations = nn.ModuleList(
105
+ [
106
+ Activation1d(activation=activations.Snake(channels, alpha_logscale=h.snake_logscale))
107
+ for _ in range(self.num_layers)
108
+ ]
109
+ )
110
+ elif activation == "snakebeta":
111
+ self.activations = nn.ModuleList(
112
+ [
113
+ Activation1d(activation=activations.SnakeBeta(channels, alpha_logscale=h.snake_logscale))
114
+ for _ in range(self.num_layers)
115
+ ]
116
+ )
117
+ else:
118
+ raise NotImplementedError(
119
+ "activation incorrectly specified. check the config file and look for 'activation'."
120
+ )
121
+
122
+ def forward(self, x):
123
+ acts1, acts2 = self.activations[::2], self.activations[1::2]
124
+ for c1, c2, a1, a2 in zip(self.convs1, self.convs2, acts1, acts2):
125
+ xt = a1(x)
126
+ xt = c1(xt)
127
+ xt = a2(xt)
128
+ xt = c2(xt)
129
+ x = xt + x
130
+
131
+ return x
132
+
133
+ def remove_weight_norm(self):
134
+ for l in self.convs1:
135
+ remove_weight_norm(l)
136
+ for l in self.convs2:
137
+ remove_weight_norm(l)
138
+
139
+
140
+ class AMPBlock2(torch.nn.Module):
141
+ """
142
+ AMPBlock applies Snake / SnakeBeta activation functions with trainable parameters that control periodicity, defined for each layer.
143
+ Unlike AMPBlock1, AMPBlock2 does not contain extra Conv1d layers with fixed dilation=1
144
+
145
+ Args:
146
+ h (AttrDict): Hyperparameters.
147
+ channels (int): Number of convolution channels.
148
+ kernel_size (int): Size of the convolution kernel. Default is 3.
149
+ dilation (tuple): Dilation rates for the convolutions. Each dilation layer has two convolutions. Default is (1, 3, 5).
150
+ activation (str): Activation function type. Should be either 'snake' or 'snakebeta'. Default is None.
151
+ """
152
+
153
+ def __init__(
154
+ self,
155
+ h: AttrDict,
156
+ channels: int,
157
+ kernel_size: int = 3,
158
+ dilation: tuple = (1, 3, 5),
159
+ activation: str = None,
160
+ ):
161
+ super().__init__()
162
+
163
+ self.h = h
164
+
165
+ self.convs = nn.ModuleList(
166
+ [
167
+ weight_norm(
168
+ Conv1d(
169
+ channels,
170
+ channels,
171
+ kernel_size,
172
+ stride=1,
173
+ dilation=d,
174
+ padding=get_padding(kernel_size, d),
175
+ )
176
+ )
177
+ for d in dilation
178
+ ]
179
+ )
180
+ self.convs.apply(init_weights)
181
+
182
+ self.num_layers = len(self.convs) # Total number of conv layers
183
+
184
+ # Select which Activation1d, lazy-load cuda version to ensure backward compatibility
185
+ if self.h.get("use_cuda_kernel", False):
186
+ from .alias_free_activation.cuda.activation1d import (
187
+ Activation1d as CudaActivation1d,
188
+ )
189
+
190
+ Activation1d = CudaActivation1d
191
+ else:
192
+ Activation1d = TorchActivation1d
193
+
194
+ # Activation functions
195
+ if activation == "snake":
196
+ self.activations = nn.ModuleList(
197
+ [
198
+ Activation1d(activation=activations.Snake(channels, alpha_logscale=h.snake_logscale))
199
+ for _ in range(self.num_layers)
200
+ ]
201
+ )
202
+ elif activation == "snakebeta":
203
+ self.activations = nn.ModuleList(
204
+ [
205
+ Activation1d(activation=activations.SnakeBeta(channels, alpha_logscale=h.snake_logscale))
206
+ for _ in range(self.num_layers)
207
+ ]
208
+ )
209
+ else:
210
+ raise NotImplementedError(
211
+ "activation incorrectly specified. check the config file and look for 'activation'."
212
+ )
213
+
214
+ def forward(self, x):
215
+ for c, a in zip(self.convs, self.activations):
216
+ xt = a(x)
217
+ xt = c(xt)
218
+ x = xt + x
219
+ return x
220
+
221
+ def remove_weight_norm(self):
222
+ for l in self.convs:
223
+ remove_weight_norm(l)
224
+
225
+
226
+ class BigVGAN(
227
+ torch.nn.Module,
228
+ PyTorchModelHubMixin,
229
+ # library_name="bigvgan",
230
+ # repo_url="https://github.com/NVIDIA/BigVGAN",
231
+ # docs_url="https://github.com/NVIDIA/BigVGAN/blob/main/README.md",
232
+ # pipeline_tag="audio-to-audio",
233
+ # license="mit",
234
+ # tags=["neural-vocoder", "audio-generation", "arxiv:2206.04658"],
235
+ ):
236
+ """
237
+ BigVGAN is a neural vocoder model that applies anti-aliased periodic activation for residual blocks (resblocks).
238
+ New in BigVGAN-v2: it can optionally use optimized CUDA kernels for AMP (anti-aliased multi-periodicity) blocks.
239
+
240
+ Args:
241
+ h (AttrDict): Hyperparameters.
242
+ use_cuda_kernel (bool): If set to True, loads optimized CUDA kernels for AMP. This should be used for inference only, as training is not supported with CUDA kernels.
243
+
244
+ Note:
245
+ - The `use_cuda_kernel` parameter should be used for inference only, as training with CUDA kernels is not supported.
246
+ - Ensure that the activation function is correctly specified in the hyperparameters (h.activation).
247
+ """
248
+
249
+ def __init__(self, h: AttrDict, use_cuda_kernel: bool = False):
250
+ super().__init__()
251
+ self.h = h
252
+ self.h["use_cuda_kernel"] = use_cuda_kernel
253
+
254
+ # Select which Activation1d, lazy-load cuda version to ensure backward compatibility
255
+ if self.h.get("use_cuda_kernel", False):
256
+ from .alias_free_activation.cuda.activation1d import (
257
+ Activation1d as CudaActivation1d,
258
+ )
259
+
260
+ Activation1d = CudaActivation1d
261
+ else:
262
+ Activation1d = TorchActivation1d
263
+
264
+ self.num_kernels = len(h.resblock_kernel_sizes)
265
+ self.num_upsamples = len(h.upsample_rates)
266
+
267
+ # Pre-conv
268
+ self.conv_pre = weight_norm(Conv1d(h.num_mels, h.upsample_initial_channel, 7, 1, padding=3))
269
+
270
+ # Define which AMPBlock to use. BigVGAN uses AMPBlock1 as default
271
+ if h.resblock == "1":
272
+ resblock_class = AMPBlock1
273
+ elif h.resblock == "2":
274
+ resblock_class = AMPBlock2
275
+ else:
276
+ raise ValueError(f"Incorrect resblock class specified in hyperparameters. Got {h.resblock}")
277
+
278
+ # Transposed conv-based upsamplers. does not apply anti-aliasing
279
+ self.ups = nn.ModuleList()
280
+ for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)):
281
+ self.ups.append(
282
+ nn.ModuleList(
283
+ [
284
+ weight_norm(
285
+ ConvTranspose1d(
286
+ h.upsample_initial_channel // (2**i),
287
+ h.upsample_initial_channel // (2 ** (i + 1)),
288
+ k,
289
+ u,
290
+ padding=(k - u) // 2,
291
+ )
292
+ )
293
+ ]
294
+ )
295
+ )
296
+
297
+ # Residual blocks using anti-aliased multi-periodicity composition modules (AMP)
298
+ self.resblocks = nn.ModuleList()
299
+ for i in range(len(self.ups)):
300
+ ch = h.upsample_initial_channel // (2 ** (i + 1))
301
+ for j, (k, d) in enumerate(zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)):
302
+ self.resblocks.append(resblock_class(h, ch, k, d, activation=h.activation))
303
+
304
+ # Post-conv
305
+ activation_post = (
306
+ activations.Snake(ch, alpha_logscale=h.snake_logscale)
307
+ if h.activation == "snake"
308
+ else (activations.SnakeBeta(ch, alpha_logscale=h.snake_logscale) if h.activation == "snakebeta" else None)
309
+ )
310
+ if activation_post is None:
311
+ raise NotImplementedError(
312
+ "activation incorrectly specified. check the config file and look for 'activation'."
313
+ )
314
+
315
+ self.activation_post = Activation1d(activation=activation_post)
316
+
317
+ # Whether to use bias for the final conv_post. Default to True for backward compatibility
318
+ self.use_bias_at_final = h.get("use_bias_at_final", True)
319
+ self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3, bias=self.use_bias_at_final))
320
+
321
+ # Weight initialization
322
+ for i in range(len(self.ups)):
323
+ self.ups[i].apply(init_weights)
324
+ self.conv_post.apply(init_weights)
325
+
326
+ # Final tanh activation. Defaults to True for backward compatibility
327
+ self.use_tanh_at_final = h.get("use_tanh_at_final", True)
328
+
329
+ def forward(self, x):
330
+ # Pre-conv
331
+ x = self.conv_pre(x)
332
+
333
+ for i in range(self.num_upsamples):
334
+ # Upsampling
335
+ for i_up in range(len(self.ups[i])):
336
+ x = self.ups[i][i_up](x)
337
+ # AMP blocks
338
+ xs = None
339
+ for j in range(self.num_kernels):
340
+ if xs is None:
341
+ xs = self.resblocks[i * self.num_kernels + j](x)
342
+ else:
343
+ xs += self.resblocks[i * self.num_kernels + j](x)
344
+ x = xs / self.num_kernels
345
+
346
+ # Post-conv
347
+ x = self.activation_post(x)
348
+ x = self.conv_post(x)
349
+ # Final tanh activation
350
+ if self.use_tanh_at_final:
351
+ x = torch.tanh(x)
352
+ else:
353
+ x = torch.clamp(x, min=-1.0, max=1.0) # Bound the output to [-1, 1]
354
+
355
+ return x
356
+
357
+ def remove_weight_norm(self):
358
+ try:
359
+ # print("Removing weight norm...")
360
+ for l in self.ups:
361
+ for l_i in l:
362
+ remove_weight_norm(l_i)
363
+ for l in self.resblocks:
364
+ l.remove_weight_norm()
365
+ remove_weight_norm(self.conv_pre)
366
+ remove_weight_norm(self.conv_post)
367
+ except ValueError:
368
+ print("[INFO] Model already removed weight norm. Skipping!")
369
+ pass
370
+
371
+ # Additional methods for huggingface_hub support
372
+ def _save_pretrained(self, save_directory: Path) -> None:
373
+ """Save weights and config.json from a Pytorch model to a local directory."""
374
+
375
+ model_path = save_directory / "bigvgan_generator.pt"
376
+ torch.save({"generator": self.state_dict()}, model_path)
377
+
378
+ config_path = save_directory / "config.json"
379
+ with open(config_path, "w") as config_file:
380
+ json.dump(self.h, config_file, indent=4)
381
+
382
+ @classmethod
383
+ def _from_pretrained(
384
+ cls,
385
+ *,
386
+ model_id: str,
387
+ revision: str,
388
+ cache_dir: str,
389
+ force_download: bool,
390
+ proxies: Optional[Dict],
391
+ resume_download: bool,
392
+ local_files_only: bool,
393
+ token: Union[str, bool, None],
394
+ map_location: str = "cpu", # Additional argument
395
+ strict: bool = False, # Additional argument
396
+ use_cuda_kernel: bool = False,
397
+ **model_kwargs,
398
+ ):
399
+ """Load Pytorch pretrained weights and return the loaded model."""
400
+
401
+ # Download and load hyperparameters (h) used by BigVGAN
402
+ if os.path.isdir(model_id):
403
+ # print("Loading config.json from local directory")
404
+ config_file = os.path.join(model_id, "config.json")
405
+ else:
406
+ config_file = hf_hub_download(
407
+ repo_id=model_id,
408
+ filename="config.json",
409
+ revision=revision,
410
+ cache_dir=cache_dir,
411
+ force_download=force_download,
412
+ proxies=proxies,
413
+ resume_download=resume_download,
414
+ token=token,
415
+ local_files_only=local_files_only,
416
+ )
417
+ h = load_hparams_from_json(config_file)
418
+
419
+ # instantiate BigVGAN using h
420
+ if use_cuda_kernel:
421
+ print(
422
+ "[WARNING] You have specified use_cuda_kernel=True during BigVGAN.from_pretrained(). Only inference is supported (training is not implemented)!"
423
+ )
424
+ print(
425
+ "[WARNING] You need nvcc and ninja installed in your system that matches your PyTorch build is using to build the kernel. If not, the model will fail to initialize or generate incorrect waveform!"
426
+ )
427
+ print(
428
+ "[WARNING] For detail, see the official GitHub repository: https://github.com/NVIDIA/BigVGAN?tab=readme-ov-file#using-custom-cuda-kernel-for-synthesis"
429
+ )
430
+ model = cls(h, use_cuda_kernel=use_cuda_kernel)
431
+
432
+ # Download and load pretrained generator weight
433
+ if os.path.isdir(model_id):
434
+ # print("Loading weights from local directory")
435
+ model_file = os.path.join(model_id, "bigvgan_generator.pt")
436
+ else:
437
+ # print(f"Loading weights from {model_id}")
438
+ model_file = hf_hub_download(
439
+ repo_id=model_id,
440
+ filename="bigvgan_generator.pt",
441
+ revision=revision,
442
+ cache_dir=cache_dir,
443
+ force_download=force_download,
444
+ proxies=proxies,
445
+ resume_download=resume_download,
446
+ token=token,
447
+ local_files_only=local_files_only,
448
+ )
449
+
450
+ checkpoint_dict = torch.load(model_file, map_location=map_location)
451
+
452
+ try:
453
+ model.load_state_dict(checkpoint_dict["generator"])
454
+ except RuntimeError:
455
+ print(
456
+ "[INFO] the pretrained checkpoint does not contain weight norm. Loading the checkpoint after removing weight norm!"
457
+ )
458
+ model.remove_weight_norm()
459
+ model.load_state_dict(checkpoint_dict["generator"])
460
+
461
+ return model
GPT_SoVITS/BigVGAN/configs/bigvgan_22khz_80band.json ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "resblock": "1",
3
+ "num_gpus": 0,
4
+ "batch_size": 32,
5
+ "learning_rate": 0.0001,
6
+ "adam_b1": 0.8,
7
+ "adam_b2": 0.99,
8
+ "lr_decay": 0.9999996,
9
+ "seed": 1234,
10
+
11
+ "upsample_rates": [4,4,2,2,2,2],
12
+ "upsample_kernel_sizes": [8,8,4,4,4,4],
13
+ "upsample_initial_channel": 1536,
14
+ "resblock_kernel_sizes": [3,7,11],
15
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
16
+
17
+ "activation": "snakebeta",
18
+ "snake_logscale": true,
19
+
20
+ "resolutions": [[1024, 120, 600], [2048, 240, 1200], [512, 50, 240]],
21
+ "mpd_reshapes": [2, 3, 5, 7, 11],
22
+ "use_spectral_norm": false,
23
+ "discriminator_channel_mult": 1,
24
+
25
+ "segment_size": 8192,
26
+ "num_mels": 80,
27
+ "num_freq": 1025,
28
+ "n_fft": 1024,
29
+ "hop_size": 256,
30
+ "win_size": 1024,
31
+
32
+ "sampling_rate": 22050,
33
+
34
+ "fmin": 0,
35
+ "fmax": 8000,
36
+ "fmax_for_loss": null,
37
+
38
+ "num_workers": 4,
39
+
40
+ "dist_config": {
41
+ "dist_backend": "nccl",
42
+ "dist_url": "tcp://localhost:54321",
43
+ "world_size": 1
44
+ }
45
+ }
GPT_SoVITS/BigVGAN/configs/bigvgan_24khz_100band.json ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "resblock": "1",
3
+ "num_gpus": 0,
4
+ "batch_size": 32,
5
+ "learning_rate": 0.0001,
6
+ "adam_b1": 0.8,
7
+ "adam_b2": 0.99,
8
+ "lr_decay": 0.9999996,
9
+ "seed": 1234,
10
+
11
+ "upsample_rates": [4,4,2,2,2,2],
12
+ "upsample_kernel_sizes": [8,8,4,4,4,4],
13
+ "upsample_initial_channel": 1536,
14
+ "resblock_kernel_sizes": [3,7,11],
15
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
16
+
17
+ "activation": "snakebeta",
18
+ "snake_logscale": true,
19
+
20
+ "resolutions": [[1024, 120, 600], [2048, 240, 1200], [512, 50, 240]],
21
+ "mpd_reshapes": [2, 3, 5, 7, 11],
22
+ "use_spectral_norm": false,
23
+ "discriminator_channel_mult": 1,
24
+
25
+ "segment_size": 8192,
26
+ "num_mels": 100,
27
+ "num_freq": 1025,
28
+ "n_fft": 1024,
29
+ "hop_size": 256,
30
+ "win_size": 1024,
31
+
32
+ "sampling_rate": 24000,
33
+
34
+ "fmin": 0,
35
+ "fmax": 12000,
36
+ "fmax_for_loss": null,
37
+
38
+ "num_workers": 4,
39
+
40
+ "dist_config": {
41
+ "dist_backend": "nccl",
42
+ "dist_url": "tcp://localhost:54321",
43
+ "world_size": 1
44
+ }
45
+ }
GPT_SoVITS/BigVGAN/configs/bigvgan_base_22khz_80band.json ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "resblock": "1",
3
+ "num_gpus": 0,
4
+ "batch_size": 32,
5
+ "learning_rate": 0.0001,
6
+ "adam_b1": 0.8,
7
+ "adam_b2": 0.99,
8
+ "lr_decay": 0.9999996,
9
+ "seed": 1234,
10
+
11
+ "upsample_rates": [8,8,2,2],
12
+ "upsample_kernel_sizes": [16,16,4,4],
13
+ "upsample_initial_channel": 512,
14
+ "resblock_kernel_sizes": [3,7,11],
15
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
16
+
17
+ "activation": "snakebeta",
18
+ "snake_logscale": true,
19
+
20
+ "resolutions": [[1024, 120, 600], [2048, 240, 1200], [512, 50, 240]],
21
+ "mpd_reshapes": [2, 3, 5, 7, 11],
22
+ "use_spectral_norm": false,
23
+ "discriminator_channel_mult": 1,
24
+
25
+ "segment_size": 8192,
26
+ "num_mels": 80,
27
+ "num_freq": 1025,
28
+ "n_fft": 1024,
29
+ "hop_size": 256,
30
+ "win_size": 1024,
31
+
32
+ "sampling_rate": 22050,
33
+
34
+ "fmin": 0,
35
+ "fmax": 8000,
36
+ "fmax_for_loss": null,
37
+
38
+ "num_workers": 4,
39
+
40
+ "dist_config": {
41
+ "dist_backend": "nccl",
42
+ "dist_url": "tcp://localhost:54321",
43
+ "world_size": 1
44
+ }
45
+ }
GPT_SoVITS/BigVGAN/configs/bigvgan_base_24khz_100band.json ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "resblock": "1",
3
+ "num_gpus": 0,
4
+ "batch_size": 32,
5
+ "learning_rate": 0.0001,
6
+ "adam_b1": 0.8,
7
+ "adam_b2": 0.99,
8
+ "lr_decay": 0.9999996,
9
+ "seed": 1234,
10
+
11
+ "upsample_rates": [8,8,2,2],
12
+ "upsample_kernel_sizes": [16,16,4,4],
13
+ "upsample_initial_channel": 512,
14
+ "resblock_kernel_sizes": [3,7,11],
15
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
16
+
17
+ "activation": "snakebeta",
18
+ "snake_logscale": true,
19
+
20
+ "resolutions": [[1024, 120, 600], [2048, 240, 1200], [512, 50, 240]],
21
+ "mpd_reshapes": [2, 3, 5, 7, 11],
22
+ "use_spectral_norm": false,
23
+ "discriminator_channel_mult": 1,
24
+
25
+ "segment_size": 8192,
26
+ "num_mels": 100,
27
+ "num_freq": 1025,
28
+ "n_fft": 1024,
29
+ "hop_size": 256,
30
+ "win_size": 1024,
31
+
32
+ "sampling_rate": 24000,
33
+
34
+ "fmin": 0,
35
+ "fmax": 12000,
36
+ "fmax_for_loss": null,
37
+
38
+ "num_workers": 4,
39
+
40
+ "dist_config": {
41
+ "dist_backend": "nccl",
42
+ "dist_url": "tcp://localhost:54321",
43
+ "world_size": 1
44
+ }
45
+ }
GPT_SoVITS/BigVGAN/configs/bigvgan_v2_22khz_80band_256x.json ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "resblock": "1",
3
+ "num_gpus": 0,
4
+ "batch_size": 4,
5
+ "learning_rate": 0.0001,
6
+ "adam_b1": 0.8,
7
+ "adam_b2": 0.99,
8
+ "lr_decay": 0.9999996,
9
+ "seed": 1234,
10
+
11
+ "upsample_rates": [4,4,2,2,2,2],
12
+ "upsample_kernel_sizes": [8,8,4,4,4,4],
13
+ "upsample_initial_channel": 1536,
14
+ "resblock_kernel_sizes": [3,7,11],
15
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
16
+
17
+ "use_tanh_at_final": false,
18
+ "use_bias_at_final": false,
19
+
20
+ "activation": "snakebeta",
21
+ "snake_logscale": true,
22
+
23
+ "use_cqtd_instead_of_mrd": true,
24
+ "cqtd_filters": 128,
25
+ "cqtd_max_filters": 1024,
26
+ "cqtd_filters_scale": 1,
27
+ "cqtd_dilations": [1, 2, 4],
28
+ "cqtd_hop_lengths": [512, 256, 256],
29
+ "cqtd_n_octaves": [9, 9, 9],
30
+ "cqtd_bins_per_octaves": [24, 36, 48],
31
+
32
+ "mpd_reshapes": [2, 3, 5, 7, 11],
33
+ "use_spectral_norm": false,
34
+ "discriminator_channel_mult": 1,
35
+
36
+ "use_multiscale_melloss": true,
37
+ "lambda_melloss": 15,
38
+
39
+ "clip_grad_norm": 500,
40
+
41
+ "segment_size": 65536,
42
+ "num_mels": 80,
43
+ "num_freq": 1025,
44
+ "n_fft": 1024,
45
+ "hop_size": 256,
46
+ "win_size": 1024,
47
+
48
+ "sampling_rate": 22050,
49
+
50
+ "fmin": 0,
51
+ "fmax": null,
52
+ "fmax_for_loss": null,
53
+
54
+ "num_workers": 4,
55
+
56
+ "dist_config": {
57
+ "dist_backend": "nccl",
58
+ "dist_url": "tcp://localhost:54321",
59
+ "world_size": 1
60
+ }
61
+ }
GPT_SoVITS/BigVGAN/configs/bigvgan_v2_22khz_80band_fmax8k_256x.json ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "resblock": "1",
3
+ "num_gpus": 0,
4
+ "batch_size": 4,
5
+ "learning_rate": 0.0001,
6
+ "adam_b1": 0.8,
7
+ "adam_b2": 0.99,
8
+ "lr_decay": 0.9999996,
9
+ "seed": 1234,
10
+
11
+ "upsample_rates": [4,4,2,2,2,2],
12
+ "upsample_kernel_sizes": [8,8,4,4,4,4],
13
+ "upsample_initial_channel": 1536,
14
+ "resblock_kernel_sizes": [3,7,11],
15
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
16
+
17
+ "use_tanh_at_final": false,
18
+ "use_bias_at_final": false,
19
+
20
+ "activation": "snakebeta",
21
+ "snake_logscale": true,
22
+
23
+ "use_cqtd_instead_of_mrd": true,
24
+ "cqtd_filters": 128,
25
+ "cqtd_max_filters": 1024,
26
+ "cqtd_filters_scale": 1,
27
+ "cqtd_dilations": [1, 2, 4],
28
+ "cqtd_hop_lengths": [512, 256, 256],
29
+ "cqtd_n_octaves": [9, 9, 9],
30
+ "cqtd_bins_per_octaves": [24, 36, 48],
31
+
32
+ "mpd_reshapes": [2, 3, 5, 7, 11],
33
+ "use_spectral_norm": false,
34
+ "discriminator_channel_mult": 1,
35
+
36
+ "use_multiscale_melloss": true,
37
+ "lambda_melloss": 15,
38
+
39
+ "clip_grad_norm": 500,
40
+
41
+ "segment_size": 65536,
42
+ "num_mels": 80,
43
+ "num_freq": 1025,
44
+ "n_fft": 1024,
45
+ "hop_size": 256,
46
+ "win_size": 1024,
47
+
48
+ "sampling_rate": 22050,
49
+
50
+ "fmin": 0,
51
+ "fmax": 8000,
52
+ "fmax_for_loss": null,
53
+
54
+ "num_workers": 4,
55
+
56
+ "dist_config": {
57
+ "dist_backend": "nccl",
58
+ "dist_url": "tcp://localhost:54321",
59
+ "world_size": 1
60
+ }
61
+ }
GPT_SoVITS/BigVGAN/configs/bigvgan_v2_24khz_100band_256x.json ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "resblock": "1",
3
+ "num_gpus": 0,
4
+ "batch_size": 4,
5
+ "learning_rate": 0.0001,
6
+ "adam_b1": 0.8,
7
+ "adam_b2": 0.99,
8
+ "lr_decay": 0.9999996,
9
+ "seed": 1234,
10
+
11
+ "upsample_rates": [4,4,2,2,2,2],
12
+ "upsample_kernel_sizes": [8,8,4,4,4,4],
13
+ "upsample_initial_channel": 1536,
14
+ "resblock_kernel_sizes": [3,7,11],
15
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
16
+
17
+ "use_tanh_at_final": false,
18
+ "use_bias_at_final": false,
19
+
20
+ "activation": "snakebeta",
21
+ "snake_logscale": true,
22
+
23
+ "use_cqtd_instead_of_mrd": true,
24
+ "cqtd_filters": 128,
25
+ "cqtd_max_filters": 1024,
26
+ "cqtd_filters_scale": 1,
27
+ "cqtd_dilations": [1, 2, 4],
28
+ "cqtd_hop_lengths": [512, 256, 256],
29
+ "cqtd_n_octaves": [9, 9, 9],
30
+ "cqtd_bins_per_octaves": [24, 36, 48],
31
+
32
+ "mpd_reshapes": [2, 3, 5, 7, 11],
33
+ "use_spectral_norm": false,
34
+ "discriminator_channel_mult": 1,
35
+
36
+ "use_multiscale_melloss": true,
37
+ "lambda_melloss": 15,
38
+
39
+ "clip_grad_norm": 500,
40
+
41
+ "segment_size": 65536,
42
+ "num_mels": 100,
43
+ "num_freq": 1025,
44
+ "n_fft": 1024,
45
+ "hop_size": 256,
46
+ "win_size": 1024,
47
+
48
+ "sampling_rate": 24000,
49
+
50
+ "fmin": 0,
51
+ "fmax": null,
52
+ "fmax_for_loss": null,
53
+
54
+ "num_workers": 4,
55
+
56
+ "dist_config": {
57
+ "dist_backend": "nccl",
58
+ "dist_url": "tcp://localhost:54321",
59
+ "world_size": 1
60
+ }
61
+ }
GPT_SoVITS/BigVGAN/configs/bigvgan_v2_44khz_128band_256x.json ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "resblock": "1",
3
+ "num_gpus": 0,
4
+ "batch_size": 4,
5
+ "learning_rate": 0.0001,
6
+ "adam_b1": 0.8,
7
+ "adam_b2": 0.99,
8
+ "lr_decay": 0.9999996,
9
+ "seed": 1234,
10
+
11
+ "upsample_rates": [4,4,2,2,2,2],
12
+ "upsample_kernel_sizes": [8,8,4,4,4,4],
13
+ "upsample_initial_channel": 1536,
14
+ "resblock_kernel_sizes": [3,7,11],
15
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
16
+
17
+ "use_tanh_at_final": false,
18
+ "use_bias_at_final": false,
19
+
20
+ "activation": "snakebeta",
21
+ "snake_logscale": true,
22
+
23
+ "use_cqtd_instead_of_mrd": true,
24
+ "cqtd_filters": 128,
25
+ "cqtd_max_filters": 1024,
26
+ "cqtd_filters_scale": 1,
27
+ "cqtd_dilations": [1, 2, 4],
28
+ "cqtd_hop_lengths": [512, 256, 256],
29
+ "cqtd_n_octaves": [9, 9, 9],
30
+ "cqtd_bins_per_octaves": [24, 36, 48],
31
+
32
+ "mpd_reshapes": [2, 3, 5, 7, 11],
33
+ "use_spectral_norm": false,
34
+ "discriminator_channel_mult": 1,
35
+
36
+ "use_multiscale_melloss": true,
37
+ "lambda_melloss": 15,
38
+
39
+ "clip_grad_norm": 500,
40
+
41
+ "segment_size": 65536,
42
+ "num_mels": 128,
43
+ "num_freq": 1025,
44
+ "n_fft": 1024,
45
+ "hop_size": 256,
46
+ "win_size": 1024,
47
+
48
+ "sampling_rate": 44100,
49
+
50
+ "fmin": 0,
51
+ "fmax": null,
52
+ "fmax_for_loss": null,
53
+
54
+ "num_workers": 4,
55
+
56
+ "dist_config": {
57
+ "dist_backend": "nccl",
58
+ "dist_url": "tcp://localhost:54321",
59
+ "world_size": 1
60
+ }
61
+ }
GPT_SoVITS/BigVGAN/configs/bigvgan_v2_44khz_128band_512x.json ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "resblock": "1",
3
+ "num_gpus": 0,
4
+ "batch_size": 4,
5
+ "learning_rate": 0.0001,
6
+ "adam_b1": 0.8,
7
+ "adam_b2": 0.99,
8
+ "lr_decay": 0.9999996,
9
+ "seed": 1234,
10
+
11
+ "upsample_rates": [8,4,2,2,2,2],
12
+ "upsample_kernel_sizes": [16,8,4,4,4,4],
13
+ "upsample_initial_channel": 1536,
14
+ "resblock_kernel_sizes": [3,7,11],
15
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
16
+
17
+ "use_tanh_at_final": false,
18
+ "use_bias_at_final": false,
19
+
20
+ "activation": "snakebeta",
21
+ "snake_logscale": true,
22
+
23
+ "use_cqtd_instead_of_mrd": true,
24
+ "cqtd_filters": 128,
25
+ "cqtd_max_filters": 1024,
26
+ "cqtd_filters_scale": 1,
27
+ "cqtd_dilations": [1, 2, 4],
28
+ "cqtd_hop_lengths": [512, 256, 256],
29
+ "cqtd_n_octaves": [9, 9, 9],
30
+ "cqtd_bins_per_octaves": [24, 36, 48],
31
+
32
+ "mpd_reshapes": [2, 3, 5, 7, 11],
33
+ "use_spectral_norm": false,
34
+ "discriminator_channel_mult": 1,
35
+
36
+ "use_multiscale_melloss": true,
37
+ "lambda_melloss": 15,
38
+
39
+ "clip_grad_norm": 500,
40
+
41
+ "segment_size": 65536,
42
+ "num_mels": 128,
43
+ "num_freq": 2049,
44
+ "n_fft": 2048,
45
+ "hop_size": 512,
46
+ "win_size": 2048,
47
+
48
+ "sampling_rate": 44100,
49
+
50
+ "fmin": 0,
51
+ "fmax": null,
52
+ "fmax_for_loss": null,
53
+
54
+ "num_workers": 4,
55
+
56
+ "dist_config": {
57
+ "dist_backend": "nccl",
58
+ "dist_url": "tcp://localhost:54321",
59
+ "world_size": 1
60
+ }
61
+ }
GPT_SoVITS/BigVGAN/discriminators.py ADDED
@@ -0,0 +1,625 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 NVIDIA CORPORATION.
2
+ # Licensed under the MIT license.
3
+
4
+ # Adapted from https://github.com/jik876/hifi-gan under the MIT license.
5
+ # LICENSE is in incl_licenses directory.
6
+
7
+
8
+ import torch
9
+ import torch.nn.functional as F
10
+ import torch.nn as nn
11
+ from torch.nn import Conv2d
12
+ from torch.nn.utils import weight_norm, spectral_norm
13
+ from torchaudio.transforms import Spectrogram, Resample
14
+
15
+ from env import AttrDict
16
+ from utils import get_padding
17
+ import typing
18
+ from typing import List, Tuple
19
+
20
+
21
+ class DiscriminatorP(torch.nn.Module):
22
+ def __init__(
23
+ self,
24
+ h: AttrDict,
25
+ period: List[int],
26
+ kernel_size: int = 5,
27
+ stride: int = 3,
28
+ use_spectral_norm: bool = False,
29
+ ):
30
+ super().__init__()
31
+ self.period = period
32
+ self.d_mult = h.discriminator_channel_mult
33
+ norm_f = weight_norm if not use_spectral_norm else spectral_norm
34
+
35
+ self.convs = nn.ModuleList(
36
+ [
37
+ norm_f(
38
+ Conv2d(
39
+ 1,
40
+ int(32 * self.d_mult),
41
+ (kernel_size, 1),
42
+ (stride, 1),
43
+ padding=(get_padding(5, 1), 0),
44
+ )
45
+ ),
46
+ norm_f(
47
+ Conv2d(
48
+ int(32 * self.d_mult),
49
+ int(128 * self.d_mult),
50
+ (kernel_size, 1),
51
+ (stride, 1),
52
+ padding=(get_padding(5, 1), 0),
53
+ )
54
+ ),
55
+ norm_f(
56
+ Conv2d(
57
+ int(128 * self.d_mult),
58
+ int(512 * self.d_mult),
59
+ (kernel_size, 1),
60
+ (stride, 1),
61
+ padding=(get_padding(5, 1), 0),
62
+ )
63
+ ),
64
+ norm_f(
65
+ Conv2d(
66
+ int(512 * self.d_mult),
67
+ int(1024 * self.d_mult),
68
+ (kernel_size, 1),
69
+ (stride, 1),
70
+ padding=(get_padding(5, 1), 0),
71
+ )
72
+ ),
73
+ norm_f(
74
+ Conv2d(
75
+ int(1024 * self.d_mult),
76
+ int(1024 * self.d_mult),
77
+ (kernel_size, 1),
78
+ 1,
79
+ padding=(2, 0),
80
+ )
81
+ ),
82
+ ]
83
+ )
84
+ self.conv_post = norm_f(Conv2d(int(1024 * self.d_mult), 1, (3, 1), 1, padding=(1, 0)))
85
+
86
+ def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, List[torch.Tensor]]:
87
+ fmap = []
88
+
89
+ # 1d to 2d
90
+ b, c, t = x.shape
91
+ if t % self.period != 0: # pad first
92
+ n_pad = self.period - (t % self.period)
93
+ x = F.pad(x, (0, n_pad), "reflect")
94
+ t = t + n_pad
95
+ x = x.view(b, c, t // self.period, self.period)
96
+
97
+ for l in self.convs:
98
+ x = l(x)
99
+ x = F.leaky_relu(x, 0.1)
100
+ fmap.append(x)
101
+ x = self.conv_post(x)
102
+ fmap.append(x)
103
+ x = torch.flatten(x, 1, -1)
104
+
105
+ return x, fmap
106
+
107
+
108
+ class MultiPeriodDiscriminator(torch.nn.Module):
109
+ def __init__(self, h: AttrDict):
110
+ super().__init__()
111
+ self.mpd_reshapes = h.mpd_reshapes
112
+ print(f"mpd_reshapes: {self.mpd_reshapes}")
113
+ self.discriminators = nn.ModuleList(
114
+ [DiscriminatorP(h, rs, use_spectral_norm=h.use_spectral_norm) for rs in self.mpd_reshapes]
115
+ )
116
+
117
+ def forward(
118
+ self, y: torch.Tensor, y_hat: torch.Tensor
119
+ ) -> Tuple[
120
+ List[torch.Tensor],
121
+ List[torch.Tensor],
122
+ List[List[torch.Tensor]],
123
+ List[List[torch.Tensor]],
124
+ ]:
125
+ y_d_rs = []
126
+ y_d_gs = []
127
+ fmap_rs = []
128
+ fmap_gs = []
129
+ for i, d in enumerate(self.discriminators):
130
+ y_d_r, fmap_r = d(y)
131
+ y_d_g, fmap_g = d(y_hat)
132
+ y_d_rs.append(y_d_r)
133
+ fmap_rs.append(fmap_r)
134
+ y_d_gs.append(y_d_g)
135
+ fmap_gs.append(fmap_g)
136
+
137
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
138
+
139
+
140
+ class DiscriminatorR(nn.Module):
141
+ def __init__(self, cfg: AttrDict, resolution: List[List[int]]):
142
+ super().__init__()
143
+
144
+ self.resolution = resolution
145
+ assert len(self.resolution) == 3, f"MRD layer requires list with len=3, got {self.resolution}"
146
+ self.lrelu_slope = 0.1
147
+
148
+ norm_f = weight_norm if cfg.use_spectral_norm == False else spectral_norm
149
+ if hasattr(cfg, "mrd_use_spectral_norm"):
150
+ print(f"[INFO] overriding MRD use_spectral_norm as {cfg.mrd_use_spectral_norm}")
151
+ norm_f = weight_norm if cfg.mrd_use_spectral_norm == False else spectral_norm
152
+ self.d_mult = cfg.discriminator_channel_mult
153
+ if hasattr(cfg, "mrd_channel_mult"):
154
+ print(f"[INFO] overriding mrd channel multiplier as {cfg.mrd_channel_mult}")
155
+ self.d_mult = cfg.mrd_channel_mult
156
+
157
+ self.convs = nn.ModuleList(
158
+ [
159
+ norm_f(nn.Conv2d(1, int(32 * self.d_mult), (3, 9), padding=(1, 4))),
160
+ norm_f(
161
+ nn.Conv2d(
162
+ int(32 * self.d_mult),
163
+ int(32 * self.d_mult),
164
+ (3, 9),
165
+ stride=(1, 2),
166
+ padding=(1, 4),
167
+ )
168
+ ),
169
+ norm_f(
170
+ nn.Conv2d(
171
+ int(32 * self.d_mult),
172
+ int(32 * self.d_mult),
173
+ (3, 9),
174
+ stride=(1, 2),
175
+ padding=(1, 4),
176
+ )
177
+ ),
178
+ norm_f(
179
+ nn.Conv2d(
180
+ int(32 * self.d_mult),
181
+ int(32 * self.d_mult),
182
+ (3, 9),
183
+ stride=(1, 2),
184
+ padding=(1, 4),
185
+ )
186
+ ),
187
+ norm_f(
188
+ nn.Conv2d(
189
+ int(32 * self.d_mult),
190
+ int(32 * self.d_mult),
191
+ (3, 3),
192
+ padding=(1, 1),
193
+ )
194
+ ),
195
+ ]
196
+ )
197
+ self.conv_post = norm_f(nn.Conv2d(int(32 * self.d_mult), 1, (3, 3), padding=(1, 1)))
198
+
199
+ def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, List[torch.Tensor]]:
200
+ fmap = []
201
+
202
+ x = self.spectrogram(x)
203
+ x = x.unsqueeze(1)
204
+ for l in self.convs:
205
+ x = l(x)
206
+ x = F.leaky_relu(x, self.lrelu_slope)
207
+ fmap.append(x)
208
+ x = self.conv_post(x)
209
+ fmap.append(x)
210
+ x = torch.flatten(x, 1, -1)
211
+
212
+ return x, fmap
213
+
214
+ def spectrogram(self, x: torch.Tensor) -> torch.Tensor:
215
+ n_fft, hop_length, win_length = self.resolution
216
+ x = F.pad(
217
+ x,
218
+ (int((n_fft - hop_length) / 2), int((n_fft - hop_length) / 2)),
219
+ mode="reflect",
220
+ )
221
+ x = x.squeeze(1)
222
+ x = torch.stft(
223
+ x,
224
+ n_fft=n_fft,
225
+ hop_length=hop_length,
226
+ win_length=win_length,
227
+ center=False,
228
+ return_complex=True,
229
+ )
230
+ x = torch.view_as_real(x) # [B, F, TT, 2]
231
+ mag = torch.norm(x, p=2, dim=-1) # [B, F, TT]
232
+
233
+ return mag
234
+
235
+
236
+ class MultiResolutionDiscriminator(nn.Module):
237
+ def __init__(self, cfg, debug=False):
238
+ super().__init__()
239
+ self.resolutions = cfg.resolutions
240
+ assert len(self.resolutions) == 3, (
241
+ f"MRD requires list of list with len=3, each element having a list with len=3. Got {self.resolutions}"
242
+ )
243
+ self.discriminators = nn.ModuleList([DiscriminatorR(cfg, resolution) for resolution in self.resolutions])
244
+
245
+ def forward(
246
+ self, y: torch.Tensor, y_hat: torch.Tensor
247
+ ) -> Tuple[
248
+ List[torch.Tensor],
249
+ List[torch.Tensor],
250
+ List[List[torch.Tensor]],
251
+ List[List[torch.Tensor]],
252
+ ]:
253
+ y_d_rs = []
254
+ y_d_gs = []
255
+ fmap_rs = []
256
+ fmap_gs = []
257
+
258
+ for i, d in enumerate(self.discriminators):
259
+ y_d_r, fmap_r = d(x=y)
260
+ y_d_g, fmap_g = d(x=y_hat)
261
+ y_d_rs.append(y_d_r)
262
+ fmap_rs.append(fmap_r)
263
+ y_d_gs.append(y_d_g)
264
+ fmap_gs.append(fmap_g)
265
+
266
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
267
+
268
+
269
+ # Method based on descript-audio-codec: https://github.com/descriptinc/descript-audio-codec
270
+ # Modified code adapted from https://github.com/gemelo-ai/vocos under the MIT license.
271
+ # LICENSE is in incl_licenses directory.
272
+ class DiscriminatorB(nn.Module):
273
+ def __init__(
274
+ self,
275
+ window_length: int,
276
+ channels: int = 32,
277
+ hop_factor: float = 0.25,
278
+ bands: Tuple[Tuple[float, float], ...] = (
279
+ (0.0, 0.1),
280
+ (0.1, 0.25),
281
+ (0.25, 0.5),
282
+ (0.5, 0.75),
283
+ (0.75, 1.0),
284
+ ),
285
+ ):
286
+ super().__init__()
287
+ self.window_length = window_length
288
+ self.hop_factor = hop_factor
289
+ self.spec_fn = Spectrogram(
290
+ n_fft=window_length,
291
+ hop_length=int(window_length * hop_factor),
292
+ win_length=window_length,
293
+ power=None,
294
+ )
295
+ n_fft = window_length // 2 + 1
296
+ bands = [(int(b[0] * n_fft), int(b[1] * n_fft)) for b in bands]
297
+ self.bands = bands
298
+ convs = lambda: nn.ModuleList(
299
+ [
300
+ weight_norm(nn.Conv2d(2, channels, (3, 9), (1, 1), padding=(1, 4))),
301
+ weight_norm(nn.Conv2d(channels, channels, (3, 9), (1, 2), padding=(1, 4))),
302
+ weight_norm(nn.Conv2d(channels, channels, (3, 9), (1, 2), padding=(1, 4))),
303
+ weight_norm(nn.Conv2d(channels, channels, (3, 9), (1, 2), padding=(1, 4))),
304
+ weight_norm(nn.Conv2d(channels, channels, (3, 3), (1, 1), padding=(1, 1))),
305
+ ]
306
+ )
307
+ self.band_convs = nn.ModuleList([convs() for _ in range(len(self.bands))])
308
+
309
+ self.conv_post = weight_norm(nn.Conv2d(channels, 1, (3, 3), (1, 1), padding=(1, 1)))
310
+
311
+ def spectrogram(self, x: torch.Tensor) -> List[torch.Tensor]:
312
+ # Remove DC offset
313
+ x = x - x.mean(dim=-1, keepdims=True)
314
+ # Peak normalize the volume of input audio
315
+ x = 0.8 * x / (x.abs().max(dim=-1, keepdim=True)[0] + 1e-9)
316
+ x = self.spec_fn(x)
317
+ x = torch.view_as_real(x)
318
+ x = x.permute(0, 3, 2, 1) # [B, F, T, C] -> [B, C, T, F]
319
+ # Split into bands
320
+ x_bands = [x[..., b[0] : b[1]] for b in self.bands]
321
+ return x_bands
322
+
323
+ def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, List[torch.Tensor]]:
324
+ x_bands = self.spectrogram(x.squeeze(1))
325
+ fmap = []
326
+ x = []
327
+
328
+ for band, stack in zip(x_bands, self.band_convs):
329
+ for i, layer in enumerate(stack):
330
+ band = layer(band)
331
+ band = torch.nn.functional.leaky_relu(band, 0.1)
332
+ if i > 0:
333
+ fmap.append(band)
334
+ x.append(band)
335
+
336
+ x = torch.cat(x, dim=-1)
337
+ x = self.conv_post(x)
338
+ fmap.append(x)
339
+
340
+ return x, fmap
341
+
342
+
343
+ # Method based on descript-audio-codec: https://github.com/descriptinc/descript-audio-codec
344
+ # Modified code adapted from https://github.com/gemelo-ai/vocos under the MIT license.
345
+ # LICENSE is in incl_licenses directory.
346
+ class MultiBandDiscriminator(nn.Module):
347
+ def __init__(
348
+ self,
349
+ h,
350
+ ):
351
+ """
352
+ Multi-band multi-scale STFT discriminator, with the architecture based on https://github.com/descriptinc/descript-audio-codec.
353
+ and the modified code adapted from https://github.com/gemelo-ai/vocos.
354
+ """
355
+ super().__init__()
356
+ # fft_sizes (list[int]): Tuple of window lengths for FFT. Defaults to [2048, 1024, 512] if not set in h.
357
+ self.fft_sizes = h.get("mbd_fft_sizes", [2048, 1024, 512])
358
+ self.discriminators = nn.ModuleList([DiscriminatorB(window_length=w) for w in self.fft_sizes])
359
+
360
+ def forward(
361
+ self, y: torch.Tensor, y_hat: torch.Tensor
362
+ ) -> Tuple[
363
+ List[torch.Tensor],
364
+ List[torch.Tensor],
365
+ List[List[torch.Tensor]],
366
+ List[List[torch.Tensor]],
367
+ ]:
368
+ y_d_rs = []
369
+ y_d_gs = []
370
+ fmap_rs = []
371
+ fmap_gs = []
372
+
373
+ for d in self.discriminators:
374
+ y_d_r, fmap_r = d(x=y)
375
+ y_d_g, fmap_g = d(x=y_hat)
376
+ y_d_rs.append(y_d_r)
377
+ fmap_rs.append(fmap_r)
378
+ y_d_gs.append(y_d_g)
379
+ fmap_gs.append(fmap_g)
380
+
381
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
382
+
383
+
384
+ # Adapted from https://github.com/open-mmlab/Amphion/blob/main/models/vocoders/gan/discriminator/mssbcqtd.py under the MIT license.
385
+ # LICENSE is in incl_licenses directory.
386
+ class DiscriminatorCQT(nn.Module):
387
+ def __init__(self, cfg: AttrDict, hop_length: int, n_octaves: int, bins_per_octave: int):
388
+ super().__init__()
389
+ self.cfg = cfg
390
+
391
+ self.filters = cfg["cqtd_filters"]
392
+ self.max_filters = cfg["cqtd_max_filters"]
393
+ self.filters_scale = cfg["cqtd_filters_scale"]
394
+ self.kernel_size = (3, 9)
395
+ self.dilations = cfg["cqtd_dilations"]
396
+ self.stride = (1, 2)
397
+
398
+ self.in_channels = cfg["cqtd_in_channels"]
399
+ self.out_channels = cfg["cqtd_out_channels"]
400
+ self.fs = cfg["sampling_rate"]
401
+ self.hop_length = hop_length
402
+ self.n_octaves = n_octaves
403
+ self.bins_per_octave = bins_per_octave
404
+
405
+ # Lazy-load
406
+ from nnAudio import features
407
+
408
+ self.cqt_transform = features.cqt.CQT2010v2(
409
+ sr=self.fs * 2,
410
+ hop_length=self.hop_length,
411
+ n_bins=self.bins_per_octave * self.n_octaves,
412
+ bins_per_octave=self.bins_per_octave,
413
+ output_format="Complex",
414
+ pad_mode="constant",
415
+ )
416
+
417
+ self.conv_pres = nn.ModuleList()
418
+ for _ in range(self.n_octaves):
419
+ self.conv_pres.append(
420
+ nn.Conv2d(
421
+ self.in_channels * 2,
422
+ self.in_channels * 2,
423
+ kernel_size=self.kernel_size,
424
+ padding=self.get_2d_padding(self.kernel_size),
425
+ )
426
+ )
427
+
428
+ self.convs = nn.ModuleList()
429
+
430
+ self.convs.append(
431
+ nn.Conv2d(
432
+ self.in_channels * 2,
433
+ self.filters,
434
+ kernel_size=self.kernel_size,
435
+ padding=self.get_2d_padding(self.kernel_size),
436
+ )
437
+ )
438
+
439
+ in_chs = min(self.filters_scale * self.filters, self.max_filters)
440
+ for i, dilation in enumerate(self.dilations):
441
+ out_chs = min((self.filters_scale ** (i + 1)) * self.filters, self.max_filters)
442
+ self.convs.append(
443
+ weight_norm(
444
+ nn.Conv2d(
445
+ in_chs,
446
+ out_chs,
447
+ kernel_size=self.kernel_size,
448
+ stride=self.stride,
449
+ dilation=(dilation, 1),
450
+ padding=self.get_2d_padding(self.kernel_size, (dilation, 1)),
451
+ )
452
+ )
453
+ )
454
+ in_chs = out_chs
455
+ out_chs = min(
456
+ (self.filters_scale ** (len(self.dilations) + 1)) * self.filters,
457
+ self.max_filters,
458
+ )
459
+ self.convs.append(
460
+ weight_norm(
461
+ nn.Conv2d(
462
+ in_chs,
463
+ out_chs,
464
+ kernel_size=(self.kernel_size[0], self.kernel_size[0]),
465
+ padding=self.get_2d_padding((self.kernel_size[0], self.kernel_size[0])),
466
+ )
467
+ )
468
+ )
469
+
470
+ self.conv_post = weight_norm(
471
+ nn.Conv2d(
472
+ out_chs,
473
+ self.out_channels,
474
+ kernel_size=(self.kernel_size[0], self.kernel_size[0]),
475
+ padding=self.get_2d_padding((self.kernel_size[0], self.kernel_size[0])),
476
+ )
477
+ )
478
+
479
+ self.activation = torch.nn.LeakyReLU(negative_slope=0.1)
480
+ self.resample = Resample(orig_freq=self.fs, new_freq=self.fs * 2)
481
+
482
+ self.cqtd_normalize_volume = self.cfg.get("cqtd_normalize_volume", False)
483
+ if self.cqtd_normalize_volume:
484
+ print(
485
+ "[INFO] cqtd_normalize_volume set to True. Will apply DC offset removal & peak volume normalization in CQTD!"
486
+ )
487
+
488
+ def get_2d_padding(
489
+ self,
490
+ kernel_size: typing.Tuple[int, int],
491
+ dilation: typing.Tuple[int, int] = (1, 1),
492
+ ):
493
+ return (
494
+ ((kernel_size[0] - 1) * dilation[0]) // 2,
495
+ ((kernel_size[1] - 1) * dilation[1]) // 2,
496
+ )
497
+
498
+ def forward(self, x: torch.tensor) -> Tuple[torch.Tensor, List[torch.Tensor]]:
499
+ fmap = []
500
+
501
+ if self.cqtd_normalize_volume:
502
+ # Remove DC offset
503
+ x = x - x.mean(dim=-1, keepdims=True)
504
+ # Peak normalize the volume of input audio
505
+ x = 0.8 * x / (x.abs().max(dim=-1, keepdim=True)[0] + 1e-9)
506
+
507
+ x = self.resample(x)
508
+
509
+ z = self.cqt_transform(x)
510
+
511
+ z_amplitude = z[:, :, :, 0].unsqueeze(1)
512
+ z_phase = z[:, :, :, 1].unsqueeze(1)
513
+
514
+ z = torch.cat([z_amplitude, z_phase], dim=1)
515
+ z = torch.permute(z, (0, 1, 3, 2)) # [B, C, W, T] -> [B, C, T, W]
516
+
517
+ latent_z = []
518
+ for i in range(self.n_octaves):
519
+ latent_z.append(
520
+ self.conv_pres[i](
521
+ z[
522
+ :,
523
+ :,
524
+ :,
525
+ i * self.bins_per_octave : (i + 1) * self.bins_per_octave,
526
+ ]
527
+ )
528
+ )
529
+ latent_z = torch.cat(latent_z, dim=-1)
530
+
531
+ for i, l in enumerate(self.convs):
532
+ latent_z = l(latent_z)
533
+
534
+ latent_z = self.activation(latent_z)
535
+ fmap.append(latent_z)
536
+
537
+ latent_z = self.conv_post(latent_z)
538
+
539
+ return latent_z, fmap
540
+
541
+
542
+ class MultiScaleSubbandCQTDiscriminator(nn.Module):
543
+ def __init__(self, cfg: AttrDict):
544
+ super().__init__()
545
+
546
+ self.cfg = cfg
547
+ # Using get with defaults
548
+ self.cfg["cqtd_filters"] = self.cfg.get("cqtd_filters", 32)
549
+ self.cfg["cqtd_max_filters"] = self.cfg.get("cqtd_max_filters", 1024)
550
+ self.cfg["cqtd_filters_scale"] = self.cfg.get("cqtd_filters_scale", 1)
551
+ self.cfg["cqtd_dilations"] = self.cfg.get("cqtd_dilations", [1, 2, 4])
552
+ self.cfg["cqtd_in_channels"] = self.cfg.get("cqtd_in_channels", 1)
553
+ self.cfg["cqtd_out_channels"] = self.cfg.get("cqtd_out_channels", 1)
554
+ # Multi-scale params to loop over
555
+ self.cfg["cqtd_hop_lengths"] = self.cfg.get("cqtd_hop_lengths", [512, 256, 256])
556
+ self.cfg["cqtd_n_octaves"] = self.cfg.get("cqtd_n_octaves", [9, 9, 9])
557
+ self.cfg["cqtd_bins_per_octaves"] = self.cfg.get("cqtd_bins_per_octaves", [24, 36, 48])
558
+
559
+ self.discriminators = nn.ModuleList(
560
+ [
561
+ DiscriminatorCQT(
562
+ self.cfg,
563
+ hop_length=self.cfg["cqtd_hop_lengths"][i],
564
+ n_octaves=self.cfg["cqtd_n_octaves"][i],
565
+ bins_per_octave=self.cfg["cqtd_bins_per_octaves"][i],
566
+ )
567
+ for i in range(len(self.cfg["cqtd_hop_lengths"]))
568
+ ]
569
+ )
570
+
571
+ def forward(
572
+ self, y: torch.Tensor, y_hat: torch.Tensor
573
+ ) -> Tuple[
574
+ List[torch.Tensor],
575
+ List[torch.Tensor],
576
+ List[List[torch.Tensor]],
577
+ List[List[torch.Tensor]],
578
+ ]:
579
+ y_d_rs = []
580
+ y_d_gs = []
581
+ fmap_rs = []
582
+ fmap_gs = []
583
+
584
+ for disc in self.discriminators:
585
+ y_d_r, fmap_r = disc(y)
586
+ y_d_g, fmap_g = disc(y_hat)
587
+ y_d_rs.append(y_d_r)
588
+ fmap_rs.append(fmap_r)
589
+ y_d_gs.append(y_d_g)
590
+ fmap_gs.append(fmap_g)
591
+
592
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
593
+
594
+
595
+ class CombinedDiscriminator(nn.Module):
596
+ """
597
+ Wrapper of chaining multiple discrimiantor architectures.
598
+ Example: combine mbd and cqtd as a single class
599
+ """
600
+
601
+ def __init__(self, list_discriminator: List[nn.Module]):
602
+ super().__init__()
603
+ self.discrimiantor = nn.ModuleList(list_discriminator)
604
+
605
+ def forward(
606
+ self, y: torch.Tensor, y_hat: torch.Tensor
607
+ ) -> Tuple[
608
+ List[torch.Tensor],
609
+ List[torch.Tensor],
610
+ List[List[torch.Tensor]],
611
+ List[List[torch.Tensor]],
612
+ ]:
613
+ y_d_rs = []
614
+ y_d_gs = []
615
+ fmap_rs = []
616
+ fmap_gs = []
617
+
618
+ for disc in self.discrimiantor:
619
+ y_d_r, y_d_g, fmap_r, fmap_g = disc(y, y_hat)
620
+ y_d_rs.extend(y_d_r)
621
+ fmap_rs.extend(fmap_r)
622
+ y_d_gs.extend(y_d_g)
623
+ fmap_gs.extend(fmap_g)
624
+
625
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
GPT_SoVITS/BigVGAN/env.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adapted from https://github.com/jik876/hifi-gan under the MIT license.
2
+ # LICENSE is in incl_licenses directory.
3
+
4
+ import os
5
+ import shutil
6
+
7
+
8
+ class AttrDict(dict):
9
+ def __init__(self, *args, **kwargs):
10
+ super(AttrDict, self).__init__(*args, **kwargs)
11
+ self.__dict__ = self
12
+
13
+
14
+ def build_env(config, config_name, path):
15
+ t_path = os.path.join(path, config_name)
16
+ if config != t_path:
17
+ os.makedirs(path, exist_ok=True)
18
+ shutil.copyfile(config, os.path.join(path, config_name))
GPT_SoVITS/BigVGAN/incl_licenses/LICENSE_1 ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2020 Jungil Kong
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
GPT_SoVITS/BigVGAN/incl_licenses/LICENSE_2 ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2020 Edward Dixon
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
GPT_SoVITS/BigVGAN/incl_licenses/LICENSE_3 ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
GPT_SoVITS/BigVGAN/incl_licenses/LICENSE_4 ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ BSD 3-Clause License
2
+
3
+ Copyright (c) 2019, Seungwon Park 박승원
4
+ All rights reserved.
5
+
6
+ Redistribution and use in source and binary forms, with or without
7
+ modification, are permitted provided that the following conditions are met:
8
+
9
+ 1. Redistributions of source code must retain the above copyright notice, this
10
+ list of conditions and the following disclaimer.
11
+
12
+ 2. Redistributions in binary form must reproduce the above copyright notice,
13
+ this list of conditions and the following disclaimer in the documentation
14
+ and/or other materials provided with the distribution.
15
+
16
+ 3. Neither the name of the copyright holder nor the names of its
17
+ contributors may be used to endorse or promote products derived from
18
+ this software without specific prior written permission.
19
+
20
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
GPT_SoVITS/BigVGAN/incl_licenses/LICENSE_5 ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright 2020 Alexandre Défossez
2
+
3
+ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
4
+ associated documentation files (the "Software"), to deal in the Software without restriction,
5
+ including without limitation the rights to use, copy, modify, merge, publish, distribute,
6
+ sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
7
+ furnished to do so, subject to the following conditions:
8
+
9
+ The above copyright notice and this permission notice shall be included in all copies or
10
+ substantial portions of the Software.
11
+
12
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
13
+ NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
14
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
15
+ DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
16
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
GPT_SoVITS/BigVGAN/incl_licenses/LICENSE_6 ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2023-present, Descript
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
GPT_SoVITS/BigVGAN/incl_licenses/LICENSE_7 ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2023 Charactr Inc.
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
GPT_SoVITS/BigVGAN/incl_licenses/LICENSE_8 ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2023 Amphion
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
GPT_SoVITS/BigVGAN/inference.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adapted from https://github.com/jik876/hifi-gan under the MIT license.
2
+ # LICENSE is in incl_licenses directory.
3
+
4
+ from __future__ import absolute_import, division, print_function, unicode_literals
5
+
6
+ import os
7
+ import argparse
8
+ import json
9
+ import torch
10
+ import librosa
11
+ from utils import load_checkpoint
12
+ from meldataset import get_mel_spectrogram
13
+ from scipy.io.wavfile import write
14
+ from env import AttrDict
15
+ from meldataset import MAX_WAV_VALUE
16
+ from bigvgan import BigVGAN as Generator
17
+
18
+ h = None
19
+ device = None
20
+ torch.backends.cudnn.benchmark = False
21
+
22
+
23
+ def inference(a, h):
24
+ generator = Generator(h, use_cuda_kernel=a.use_cuda_kernel).to(device)
25
+
26
+ state_dict_g = load_checkpoint(a.checkpoint_file, device)
27
+ generator.load_state_dict(state_dict_g["generator"])
28
+
29
+ filelist = os.listdir(a.input_wavs_dir)
30
+
31
+ os.makedirs(a.output_dir, exist_ok=True)
32
+
33
+ generator.eval()
34
+ generator.remove_weight_norm()
35
+ with torch.no_grad():
36
+ for i, filname in enumerate(filelist):
37
+ # Load the ground truth audio and resample if necessary
38
+ wav, sr = librosa.load(os.path.join(a.input_wavs_dir, filname), sr=h.sampling_rate, mono=True)
39
+ wav = torch.FloatTensor(wav).to(device)
40
+ # Compute mel spectrogram from the ground truth audio
41
+ x = get_mel_spectrogram(wav.unsqueeze(0), generator.h)
42
+
43
+ y_g_hat = generator(x)
44
+
45
+ audio = y_g_hat.squeeze()
46
+ audio = audio * MAX_WAV_VALUE
47
+ audio = audio.cpu().numpy().astype("int16")
48
+
49
+ output_file = os.path.join(a.output_dir, os.path.splitext(filname)[0] + "_generated.wav")
50
+ write(output_file, h.sampling_rate, audio)
51
+ print(output_file)
52
+
53
+
54
+ def main():
55
+ print("Initializing Inference Process..")
56
+
57
+ parser = argparse.ArgumentParser()
58
+ parser.add_argument("--input_wavs_dir", default="test_files")
59
+ parser.add_argument("--output_dir", default="generated_files")
60
+ parser.add_argument("--checkpoint_file", required=True)
61
+ parser.add_argument("--use_cuda_kernel", action="store_true", default=False)
62
+
63
+ a = parser.parse_args()
64
+
65
+ config_file = os.path.join(os.path.split(a.checkpoint_file)[0], "config.json")
66
+ with open(config_file) as f:
67
+ data = f.read()
68
+
69
+ global h
70
+ json_config = json.loads(data)
71
+ h = AttrDict(json_config)
72
+
73
+ torch.manual_seed(h.seed)
74
+ global device
75
+ if torch.cuda.is_available():
76
+ torch.cuda.manual_seed(h.seed)
77
+ device = torch.device("cuda")
78
+ else:
79
+ device = torch.device("cpu")
80
+
81
+ inference(a, h)
82
+
83
+
84
+ if __name__ == "__main__":
85
+ main()
GPT_SoVITS/BigVGAN/inference_e2e.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adapted from https://github.com/jik876/hifi-gan under the MIT license.
2
+ # LICENSE is in incl_licenses directory.
3
+
4
+ from __future__ import absolute_import, division, print_function, unicode_literals
5
+
6
+ import glob
7
+ import os
8
+ import numpy as np
9
+ import argparse
10
+ import json
11
+ import torch
12
+ from scipy.io.wavfile import write
13
+ from env import AttrDict
14
+ from meldataset import MAX_WAV_VALUE
15
+ from bigvgan import BigVGAN as Generator
16
+
17
+ h = None
18
+ device = None
19
+ torch.backends.cudnn.benchmark = False
20
+
21
+
22
+ def load_checkpoint(filepath, device):
23
+ assert os.path.isfile(filepath)
24
+ print(f"Loading '{filepath}'")
25
+ checkpoint_dict = torch.load(filepath, map_location=device)
26
+ print("Complete.")
27
+ return checkpoint_dict
28
+
29
+
30
+ def scan_checkpoint(cp_dir, prefix):
31
+ pattern = os.path.join(cp_dir, prefix + "*")
32
+ cp_list = glob.glob(pattern)
33
+ if len(cp_list) == 0:
34
+ return ""
35
+ return sorted(cp_list)[-1]
36
+
37
+
38
+ def inference(a, h):
39
+ generator = Generator(h, use_cuda_kernel=a.use_cuda_kernel).to(device)
40
+
41
+ state_dict_g = load_checkpoint(a.checkpoint_file, device)
42
+ generator.load_state_dict(state_dict_g["generator"])
43
+
44
+ filelist = os.listdir(a.input_mels_dir)
45
+
46
+ os.makedirs(a.output_dir, exist_ok=True)
47
+
48
+ generator.eval()
49
+ generator.remove_weight_norm()
50
+ with torch.no_grad():
51
+ for i, filname in enumerate(filelist):
52
+ # Load the mel spectrogram in .npy format
53
+ x = np.load(os.path.join(a.input_mels_dir, filname))
54
+ x = torch.FloatTensor(x).to(device)
55
+ if len(x.shape) == 2:
56
+ x = x.unsqueeze(0)
57
+
58
+ y_g_hat = generator(x)
59
+
60
+ audio = y_g_hat.squeeze()
61
+ audio = audio * MAX_WAV_VALUE
62
+ audio = audio.cpu().numpy().astype("int16")
63
+
64
+ output_file = os.path.join(a.output_dir, os.path.splitext(filname)[0] + "_generated_e2e.wav")
65
+ write(output_file, h.sampling_rate, audio)
66
+ print(output_file)
67
+
68
+
69
+ def main():
70
+ print("Initializing Inference Process..")
71
+
72
+ parser = argparse.ArgumentParser()
73
+ parser.add_argument("--input_mels_dir", default="test_mel_files")
74
+ parser.add_argument("--output_dir", default="generated_files_from_mel")
75
+ parser.add_argument("--checkpoint_file", required=True)
76
+ parser.add_argument("--use_cuda_kernel", action="store_true", default=False)
77
+
78
+ a = parser.parse_args()
79
+
80
+ config_file = os.path.join(os.path.split(a.checkpoint_file)[0], "config.json")
81
+ with open(config_file) as f:
82
+ data = f.read()
83
+
84
+ global h
85
+ json_config = json.loads(data)
86
+ h = AttrDict(json_config)
87
+
88
+ torch.manual_seed(h.seed)
89
+ global device
90
+ if torch.cuda.is_available():
91
+ torch.cuda.manual_seed(h.seed)
92
+ device = torch.device("cuda")
93
+ else:
94
+ device = torch.device("cpu")
95
+
96
+ inference(a, h)
97
+
98
+
99
+ if __name__ == "__main__":
100
+ main()
GPT_SoVITS/BigVGAN/loss.py ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 NVIDIA CORPORATION.
2
+ # Licensed under the MIT license.
3
+
4
+ # Adapted from https://github.com/jik876/hifi-gan under the MIT license.
5
+ # LICENSE is in incl_licenses directory.
6
+
7
+
8
+ import torch
9
+ import torch.nn as nn
10
+ from librosa.filters import mel as librosa_mel_fn
11
+ from scipy import signal
12
+
13
+ import typing
14
+ from typing import List, Tuple
15
+ from collections import namedtuple
16
+ import math
17
+ import functools
18
+
19
+
20
+ # Adapted from https://github.com/descriptinc/descript-audio-codec/blob/main/dac/nn/loss.py under the MIT license.
21
+ # LICENSE is in incl_licenses directory.
22
+ class MultiScaleMelSpectrogramLoss(nn.Module):
23
+ """Compute distance between mel spectrograms. Can be used
24
+ in a multi-scale way.
25
+
26
+ Parameters
27
+ ----------
28
+ n_mels : List[int]
29
+ Number of mels per STFT, by default [5, 10, 20, 40, 80, 160, 320],
30
+ window_lengths : List[int], optional
31
+ Length of each window of each STFT, by default [32, 64, 128, 256, 512, 1024, 2048]
32
+ loss_fn : typing.Callable, optional
33
+ How to compare each loss, by default nn.L1Loss()
34
+ clamp_eps : float, optional
35
+ Clamp on the log magnitude, below, by default 1e-5
36
+ mag_weight : float, optional
37
+ Weight of raw magnitude portion of loss, by default 0.0 (no ampliciation on mag part)
38
+ log_weight : float, optional
39
+ Weight of log magnitude portion of loss, by default 1.0
40
+ pow : float, optional
41
+ Power to raise magnitude to before taking log, by default 1.0
42
+ weight : float, optional
43
+ Weight of this loss, by default 1.0
44
+ match_stride : bool, optional
45
+ Whether to match the stride of convolutional layers, by default False
46
+
47
+ Implementation copied from: https://github.com/descriptinc/lyrebird-audiotools/blob/961786aa1a9d628cca0c0486e5885a457fe70c1a/audiotools/metrics/spectral.py
48
+ Additional code copied and modified from https://github.com/descriptinc/audiotools/blob/master/audiotools/core/audio_signal.py
49
+ """
50
+
51
+ def __init__(
52
+ self,
53
+ sampling_rate: int,
54
+ n_mels: List[int] = [5, 10, 20, 40, 80, 160, 320],
55
+ window_lengths: List[int] = [32, 64, 128, 256, 512, 1024, 2048],
56
+ loss_fn: typing.Callable = nn.L1Loss(),
57
+ clamp_eps: float = 1e-5,
58
+ mag_weight: float = 0.0,
59
+ log_weight: float = 1.0,
60
+ pow: float = 1.0,
61
+ weight: float = 1.0,
62
+ match_stride: bool = False,
63
+ mel_fmin: List[float] = [0, 0, 0, 0, 0, 0, 0],
64
+ mel_fmax: List[float] = [None, None, None, None, None, None, None],
65
+ window_type: str = "hann",
66
+ ):
67
+ super().__init__()
68
+ self.sampling_rate = sampling_rate
69
+
70
+ STFTParams = namedtuple(
71
+ "STFTParams",
72
+ ["window_length", "hop_length", "window_type", "match_stride"],
73
+ )
74
+
75
+ self.stft_params = [
76
+ STFTParams(
77
+ window_length=w,
78
+ hop_length=w // 4,
79
+ match_stride=match_stride,
80
+ window_type=window_type,
81
+ )
82
+ for w in window_lengths
83
+ ]
84
+ self.n_mels = n_mels
85
+ self.loss_fn = loss_fn
86
+ self.clamp_eps = clamp_eps
87
+ self.log_weight = log_weight
88
+ self.mag_weight = mag_weight
89
+ self.weight = weight
90
+ self.mel_fmin = mel_fmin
91
+ self.mel_fmax = mel_fmax
92
+ self.pow = pow
93
+
94
+ @staticmethod
95
+ @functools.lru_cache(None)
96
+ def get_window(
97
+ window_type,
98
+ window_length,
99
+ ):
100
+ return signal.get_window(window_type, window_length)
101
+
102
+ @staticmethod
103
+ @functools.lru_cache(None)
104
+ def get_mel_filters(sr, n_fft, n_mels, fmin, fmax):
105
+ return librosa_mel_fn(sr=sr, n_fft=n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax)
106
+
107
+ def mel_spectrogram(
108
+ self,
109
+ wav,
110
+ n_mels,
111
+ fmin,
112
+ fmax,
113
+ window_length,
114
+ hop_length,
115
+ match_stride,
116
+ window_type,
117
+ ):
118
+ """
119
+ Mirrors AudioSignal.mel_spectrogram used by BigVGAN-v2 training from:
120
+ https://github.com/descriptinc/audiotools/blob/master/audiotools/core/audio_signal.py
121
+ """
122
+ B, C, T = wav.shape
123
+
124
+ if match_stride:
125
+ assert hop_length == window_length // 4, "For match_stride, hop must equal n_fft // 4"
126
+ right_pad = math.ceil(T / hop_length) * hop_length - T
127
+ pad = (window_length - hop_length) // 2
128
+ else:
129
+ right_pad = 0
130
+ pad = 0
131
+
132
+ wav = torch.nn.functional.pad(wav, (pad, pad + right_pad), mode="reflect")
133
+
134
+ window = self.get_window(window_type, window_length)
135
+ window = torch.from_numpy(window).to(wav.device).float()
136
+
137
+ stft = torch.stft(
138
+ wav.reshape(-1, T),
139
+ n_fft=window_length,
140
+ hop_length=hop_length,
141
+ window=window,
142
+ return_complex=True,
143
+ center=True,
144
+ )
145
+ _, nf, nt = stft.shape
146
+ stft = stft.reshape(B, C, nf, nt)
147
+ if match_stride:
148
+ """
149
+ Drop first two and last two frames, which are added, because of padding. Now num_frames * hop_length = num_samples.
150
+ """
151
+ stft = stft[..., 2:-2]
152
+ magnitude = torch.abs(stft)
153
+
154
+ nf = magnitude.shape[2]
155
+ mel_basis = self.get_mel_filters(self.sampling_rate, 2 * (nf - 1), n_mels, fmin, fmax)
156
+ mel_basis = torch.from_numpy(mel_basis).to(wav.device)
157
+ mel_spectrogram = magnitude.transpose(2, -1) @ mel_basis.T
158
+ mel_spectrogram = mel_spectrogram.transpose(-1, 2)
159
+
160
+ return mel_spectrogram
161
+
162
+ def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
163
+ """Computes mel loss between an estimate and a reference
164
+ signal.
165
+
166
+ Parameters
167
+ ----------
168
+ x : torch.Tensor
169
+ Estimate signal
170
+ y : torch.Tensor
171
+ Reference signal
172
+
173
+ Returns
174
+ -------
175
+ torch.Tensor
176
+ Mel loss.
177
+ """
178
+
179
+ loss = 0.0
180
+ for n_mels, fmin, fmax, s in zip(self.n_mels, self.mel_fmin, self.mel_fmax, self.stft_params):
181
+ kwargs = {
182
+ "n_mels": n_mels,
183
+ "fmin": fmin,
184
+ "fmax": fmax,
185
+ "window_length": s.window_length,
186
+ "hop_length": s.hop_length,
187
+ "match_stride": s.match_stride,
188
+ "window_type": s.window_type,
189
+ }
190
+
191
+ x_mels = self.mel_spectrogram(x, **kwargs)
192
+ y_mels = self.mel_spectrogram(y, **kwargs)
193
+ x_logmels = torch.log(x_mels.clamp(min=self.clamp_eps).pow(self.pow)) / torch.log(torch.tensor(10.0))
194
+ y_logmels = torch.log(y_mels.clamp(min=self.clamp_eps).pow(self.pow)) / torch.log(torch.tensor(10.0))
195
+
196
+ loss += self.log_weight * self.loss_fn(x_logmels, y_logmels)
197
+ loss += self.mag_weight * self.loss_fn(x_logmels, y_logmels)
198
+
199
+ return loss
200
+
201
+
202
+ # Loss functions
203
+ def feature_loss(fmap_r: List[List[torch.Tensor]], fmap_g: List[List[torch.Tensor]]) -> torch.Tensor:
204
+ loss = 0
205
+ for dr, dg in zip(fmap_r, fmap_g):
206
+ for rl, gl in zip(dr, dg):
207
+ loss += torch.mean(torch.abs(rl - gl))
208
+
209
+ return loss * 2 # This equates to lambda=2.0 for the feature matching loss
210
+
211
+
212
+ def discriminator_loss(
213
+ disc_real_outputs: List[torch.Tensor], disc_generated_outputs: List[torch.Tensor]
214
+ ) -> Tuple[torch.Tensor, List[torch.Tensor], List[torch.Tensor]]:
215
+ loss = 0
216
+ r_losses = []
217
+ g_losses = []
218
+ for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
219
+ r_loss = torch.mean((1 - dr) ** 2)
220
+ g_loss = torch.mean(dg**2)
221
+ loss += r_loss + g_loss
222
+ r_losses.append(r_loss.item())
223
+ g_losses.append(g_loss.item())
224
+
225
+ return loss, r_losses, g_losses
226
+
227
+
228
+ def generator_loss(
229
+ disc_outputs: List[torch.Tensor],
230
+ ) -> Tuple[torch.Tensor, List[torch.Tensor]]:
231
+ loss = 0
232
+ gen_losses = []
233
+ for dg in disc_outputs:
234
+ l = torch.mean((1 - dg) ** 2)
235
+ gen_losses.append(l)
236
+ loss += l
237
+
238
+ return loss, gen_losses
GPT_SoVITS/BigVGAN/meldataset.py ADDED
@@ -0,0 +1,370 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 NVIDIA CORPORATION.
2
+ # Licensed under the MIT license.
3
+
4
+ # Adapted from https://github.com/jik876/hifi-gan under the MIT license.
5
+ # LICENSE is in incl_licenses directory.
6
+
7
+ import math
8
+ import os
9
+ import random
10
+ import torch
11
+ import torch.utils.data
12
+ import numpy as np
13
+ import librosa
14
+ from librosa.filters import mel as librosa_mel_fn
15
+ import pathlib
16
+ from tqdm import tqdm
17
+ from typing import List, Tuple, Optional
18
+ from .env import AttrDict
19
+
20
+ MAX_WAV_VALUE = 32767.0 # NOTE: 32768.0 -1 to prevent int16 overflow (results in popping sound in corner cases)
21
+
22
+
23
+ def dynamic_range_compression(x, C=1, clip_val=1e-5):
24
+ return np.log(np.clip(x, a_min=clip_val, a_max=None) * C)
25
+
26
+
27
+ def dynamic_range_decompression(x, C=1):
28
+ return np.exp(x) / C
29
+
30
+
31
+ def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
32
+ return torch.log(torch.clamp(x, min=clip_val) * C)
33
+
34
+
35
+ def dynamic_range_decompression_torch(x, C=1):
36
+ return torch.exp(x) / C
37
+
38
+
39
+ def spectral_normalize_torch(magnitudes):
40
+ return dynamic_range_compression_torch(magnitudes)
41
+
42
+
43
+ def spectral_de_normalize_torch(magnitudes):
44
+ return dynamic_range_decompression_torch(magnitudes)
45
+
46
+
47
+ mel_basis_cache = {}
48
+ hann_window_cache = {}
49
+
50
+
51
+ def mel_spectrogram(
52
+ y: torch.Tensor,
53
+ n_fft: int,
54
+ num_mels: int,
55
+ sampling_rate: int,
56
+ hop_size: int,
57
+ win_size: int,
58
+ fmin: int,
59
+ fmax: int = None,
60
+ center: bool = False,
61
+ ) -> torch.Tensor:
62
+ """
63
+ Calculate the mel spectrogram of an input signal.
64
+ This function uses slaney norm for the librosa mel filterbank (using librosa.filters.mel) and uses Hann window for STFT (using torch.stft).
65
+
66
+ Args:
67
+ y (torch.Tensor): Input signal.
68
+ n_fft (int): FFT size.
69
+ num_mels (int): Number of mel bins.
70
+ sampling_rate (int): Sampling rate of the input signal.
71
+ hop_size (int): Hop size for STFT.
72
+ win_size (int): Window size for STFT.
73
+ fmin (int): Minimum frequency for mel filterbank.
74
+ fmax (int): Maximum frequency for mel filterbank. If None, defaults to half the sampling rate (fmax = sr / 2.0) inside librosa_mel_fn
75
+ center (bool): Whether to pad the input to center the frames. Default is False.
76
+
77
+ Returns:
78
+ torch.Tensor: Mel spectrogram.
79
+ """
80
+ if torch.min(y) < -1.0:
81
+ print(f"[WARNING] Min value of input waveform signal is {torch.min(y)}")
82
+ if torch.max(y) > 1.0:
83
+ print(f"[WARNING] Max value of input waveform signal is {torch.max(y)}")
84
+
85
+ device = y.device
86
+ key = f"{n_fft}_{num_mels}_{sampling_rate}_{hop_size}_{win_size}_{fmin}_{fmax}_{device}"
87
+
88
+ if key not in mel_basis_cache:
89
+ mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax)
90
+ mel_basis_cache[key] = torch.from_numpy(mel).float().to(device)
91
+ hann_window_cache[key] = torch.hann_window(win_size).to(device)
92
+
93
+ mel_basis = mel_basis_cache[key]
94
+ hann_window = hann_window_cache[key]
95
+
96
+ padding = (n_fft - hop_size) // 2
97
+ y = torch.nn.functional.pad(y.unsqueeze(1), (padding, padding), mode="reflect").squeeze(1)
98
+
99
+ spec = torch.stft(
100
+ y,
101
+ n_fft,
102
+ hop_length=hop_size,
103
+ win_length=win_size,
104
+ window=hann_window,
105
+ center=center,
106
+ pad_mode="reflect",
107
+ normalized=False,
108
+ onesided=True,
109
+ return_complex=True,
110
+ )
111
+ spec = torch.sqrt(torch.view_as_real(spec).pow(2).sum(-1) + 1e-9)
112
+
113
+ mel_spec = torch.matmul(mel_basis, spec)
114
+ mel_spec = spectral_normalize_torch(mel_spec)
115
+
116
+ return mel_spec
117
+
118
+
119
+ def get_mel_spectrogram(wav, h):
120
+ """
121
+ Generate mel spectrogram from a waveform using given hyperparameters.
122
+
123
+ Args:
124
+ wav (torch.Tensor): Input waveform.
125
+ h: Hyperparameters object with attributes n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax.
126
+
127
+ Returns:
128
+ torch.Tensor: Mel spectrogram.
129
+ """
130
+ return mel_spectrogram(
131
+ wav,
132
+ h.n_fft,
133
+ h.num_mels,
134
+ h.sampling_rate,
135
+ h.hop_size,
136
+ h.win_size,
137
+ h.fmin,
138
+ h.fmax,
139
+ )
140
+
141
+
142
+ def get_dataset_filelist(a):
143
+ training_files = []
144
+ validation_files = []
145
+ list_unseen_validation_files = []
146
+
147
+ with open(a.input_training_file, "r", encoding="utf-8") as fi:
148
+ training_files = [
149
+ os.path.join(a.input_wavs_dir, x.split("|")[0] + ".wav") for x in fi.read().split("\n") if len(x) > 0
150
+ ]
151
+ print(f"first training file: {training_files[0]}")
152
+
153
+ with open(a.input_validation_file, "r", encoding="utf-8") as fi:
154
+ validation_files = [
155
+ os.path.join(a.input_wavs_dir, x.split("|")[0] + ".wav") for x in fi.read().split("\n") if len(x) > 0
156
+ ]
157
+ print(f"first validation file: {validation_files[0]}")
158
+
159
+ for i in range(len(a.list_input_unseen_validation_file)):
160
+ with open(a.list_input_unseen_validation_file[i], "r", encoding="utf-8") as fi:
161
+ unseen_validation_files = [
162
+ os.path.join(a.list_input_unseen_wavs_dir[i], x.split("|")[0] + ".wav")
163
+ for x in fi.read().split("\n")
164
+ if len(x) > 0
165
+ ]
166
+ print(f"first unseen {i}th validation fileset: {unseen_validation_files[0]}")
167
+ list_unseen_validation_files.append(unseen_validation_files)
168
+
169
+ return training_files, validation_files, list_unseen_validation_files
170
+
171
+
172
+ class MelDataset(torch.utils.data.Dataset):
173
+ def __init__(
174
+ self,
175
+ training_files: List[str],
176
+ hparams: AttrDict,
177
+ segment_size: int,
178
+ n_fft: int,
179
+ num_mels: int,
180
+ hop_size: int,
181
+ win_size: int,
182
+ sampling_rate: int,
183
+ fmin: int,
184
+ fmax: Optional[int],
185
+ split: bool = True,
186
+ shuffle: bool = True,
187
+ device: str = None,
188
+ fmax_loss: Optional[int] = None,
189
+ fine_tuning: bool = False,
190
+ base_mels_path: str = None,
191
+ is_seen: bool = True,
192
+ ):
193
+ self.audio_files = training_files
194
+ random.seed(1234)
195
+ if shuffle:
196
+ random.shuffle(self.audio_files)
197
+ self.hparams = hparams
198
+ self.is_seen = is_seen
199
+ if self.is_seen:
200
+ self.name = pathlib.Path(self.audio_files[0]).parts[0]
201
+ else:
202
+ self.name = "-".join(pathlib.Path(self.audio_files[0]).parts[:2]).strip("/")
203
+
204
+ self.segment_size = segment_size
205
+ self.sampling_rate = sampling_rate
206
+ self.split = split
207
+ self.n_fft = n_fft
208
+ self.num_mels = num_mels
209
+ self.hop_size = hop_size
210
+ self.win_size = win_size
211
+ self.fmin = fmin
212
+ self.fmax = fmax
213
+ self.fmax_loss = fmax_loss
214
+ self.device = device
215
+ self.fine_tuning = fine_tuning
216
+ self.base_mels_path = base_mels_path
217
+
218
+ print("[INFO] checking dataset integrity...")
219
+ for i in tqdm(range(len(self.audio_files))):
220
+ assert os.path.exists(self.audio_files[i]), f"{self.audio_files[i]} not found"
221
+
222
+ def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor, str, torch.Tensor]:
223
+ try:
224
+ filename = self.audio_files[index]
225
+
226
+ # Use librosa.load that ensures loading waveform into mono with [-1, 1] float values
227
+ # Audio is ndarray with shape [T_time]. Disable auto-resampling here to minimize overhead
228
+ # The on-the-fly resampling during training will be done only for the obtained random chunk
229
+ audio, source_sampling_rate = librosa.load(filename, sr=None, mono=True)
230
+
231
+ # Main logic that uses <mel, audio> pair for training BigVGAN
232
+ if not self.fine_tuning:
233
+ if self.split: # Training step
234
+ # Obtain randomized audio chunk
235
+ if source_sampling_rate != self.sampling_rate:
236
+ # Adjust segment size to crop if the source sr is different
237
+ target_segment_size = math.ceil(self.segment_size * (source_sampling_rate / self.sampling_rate))
238
+ else:
239
+ target_segment_size = self.segment_size
240
+
241
+ # Compute upper bound index for the random chunk
242
+ random_chunk_upper_bound = max(0, audio.shape[0] - target_segment_size)
243
+
244
+ # Crop or pad audio to obtain random chunk with target_segment_size
245
+ if audio.shape[0] >= target_segment_size:
246
+ audio_start = random.randint(0, random_chunk_upper_bound)
247
+ audio = audio[audio_start : audio_start + target_segment_size]
248
+ else:
249
+ audio = np.pad(
250
+ audio,
251
+ (0, target_segment_size - audio.shape[0]),
252
+ mode="constant",
253
+ )
254
+
255
+ # Resample audio chunk to self.sampling rate
256
+ if source_sampling_rate != self.sampling_rate:
257
+ audio = librosa.resample(
258
+ audio,
259
+ orig_sr=source_sampling_rate,
260
+ target_sr=self.sampling_rate,
261
+ )
262
+ if audio.shape[0] > self.segment_size:
263
+ # trim last elements to match self.segment_size (e.g., 16385 for 44khz downsampled to 24khz -> 16384)
264
+ audio = audio[: self.segment_size]
265
+
266
+ else: # Validation step
267
+ # Resample full audio clip to target sampling rate
268
+ if source_sampling_rate != self.sampling_rate:
269
+ audio = librosa.resample(
270
+ audio,
271
+ orig_sr=source_sampling_rate,
272
+ target_sr=self.sampling_rate,
273
+ )
274
+ # Trim last elements to match audio length to self.hop_size * n for evaluation
275
+ if (audio.shape[0] % self.hop_size) != 0:
276
+ audio = audio[: -(audio.shape[0] % self.hop_size)]
277
+
278
+ # BigVGAN is trained using volume-normalized waveform
279
+ audio = librosa.util.normalize(audio) * 0.95
280
+
281
+ # Cast ndarray to torch tensor
282
+ audio = torch.FloatTensor(audio)
283
+ audio = audio.unsqueeze(0) # [B(1), self.segment_size]
284
+
285
+ # Compute mel spectrogram corresponding to audio
286
+ mel = mel_spectrogram(
287
+ audio,
288
+ self.n_fft,
289
+ self.num_mels,
290
+ self.sampling_rate,
291
+ self.hop_size,
292
+ self.win_size,
293
+ self.fmin,
294
+ self.fmax,
295
+ center=False,
296
+ ) # [B(1), self.num_mels, self.segment_size // self.hop_size]
297
+
298
+ # Fine-tuning logic that uses pre-computed mel. Example: Using TTS model-generated mel as input
299
+ else:
300
+ # For fine-tuning, assert that the waveform is in the defined sampling_rate
301
+ # Fine-tuning won't support on-the-fly resampling to be fool-proof (the dataset should have been prepared properly)
302
+ assert source_sampling_rate == self.sampling_rate, (
303
+ f"For fine_tuning, waveform must be in the spcified sampling rate {self.sampling_rate}, got {source_sampling_rate}"
304
+ )
305
+
306
+ # Cast ndarray to torch tensor
307
+ audio = torch.FloatTensor(audio)
308
+ audio = audio.unsqueeze(0) # [B(1), T_time]
309
+
310
+ # Load pre-computed mel from disk
311
+ mel = np.load(
312
+ os.path.join(
313
+ self.base_mels_path,
314
+ os.path.splitext(os.path.split(filename)[-1])[0] + ".npy",
315
+ )
316
+ )
317
+ mel = torch.from_numpy(mel)
318
+
319
+ if len(mel.shape) < 3:
320
+ mel = mel.unsqueeze(0) # ensure [B, C, T]
321
+
322
+ if self.split:
323
+ frames_per_seg = math.ceil(self.segment_size / self.hop_size)
324
+
325
+ if audio.size(1) >= self.segment_size:
326
+ mel_start = random.randint(0, mel.size(2) - frames_per_seg - 1)
327
+ mel = mel[:, :, mel_start : mel_start + frames_per_seg]
328
+ audio = audio[
329
+ :,
330
+ mel_start * self.hop_size : (mel_start + frames_per_seg) * self.hop_size,
331
+ ]
332
+
333
+ # Pad pre-computed mel and audio to match length to ensuring fine-tuning without error.
334
+ # NOTE: this may introduce a single-frame misalignment of the <pre-computed mel, audio>
335
+ # To remove possible misalignment, it is recommended to prepare the <pre-computed mel, audio> pair where the audio length is the integer multiple of self.hop_size
336
+ mel = torch.nn.functional.pad(mel, (0, frames_per_seg - mel.size(2)), "constant")
337
+ audio = torch.nn.functional.pad(audio, (0, self.segment_size - audio.size(1)), "constant")
338
+
339
+ # Compute mel_loss used by spectral regression objective. Uses self.fmax_loss instead (usually None)
340
+ mel_loss = mel_spectrogram(
341
+ audio,
342
+ self.n_fft,
343
+ self.num_mels,
344
+ self.sampling_rate,
345
+ self.hop_size,
346
+ self.win_size,
347
+ self.fmin,
348
+ self.fmax_loss,
349
+ center=False,
350
+ ) # [B(1), self.num_mels, self.segment_size // self.hop_size]
351
+
352
+ # Shape sanity checks
353
+ assert (
354
+ audio.shape[1] == mel.shape[2] * self.hop_size and audio.shape[1] == mel_loss.shape[2] * self.hop_size
355
+ ), (
356
+ f"Audio length must be mel frame length * hop_size. Got audio shape {audio.shape} mel shape {mel.shape} mel_loss shape {mel_loss.shape}"
357
+ )
358
+
359
+ return (mel.squeeze(), audio.squeeze(0), filename, mel_loss.squeeze())
360
+
361
+ # If it encounters error during loading the data, skip this sample and load random other sample to the batch
362
+ except Exception as e:
363
+ if self.fine_tuning:
364
+ raise e # Terminate training if it is fine-tuning. The dataset should have been prepared properly.
365
+ else:
366
+ print(f"[WARNING] Failed to load waveform, skipping! filename: {filename} Error: {e}")
367
+ return self[random.randrange(len(self))]
368
+
369
+ def __len__(self):
370
+ return len(self.audio_files)
GPT_SoVITS/BigVGAN/nv-modelcard++/.gitkeep ADDED
@@ -0,0 +1 @@
 
 
1
+
GPT_SoVITS/BigVGAN/nv-modelcard++/bias.md ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ | Field | Response |
2
+ | :--------------------------------------------------------------------------------------------------------- | :--------------------------------------------------- |
3
+ | Participation considerations from adversely impacted groups protected classes in model design and testing: | None |
4
+ | Measures taken to mitigate against unwanted bias: | No measures taken to mitigate against unwanted bias. |
GPT_SoVITS/BigVGAN/nv-modelcard++/explainability.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ | Field | Response |
2
+ | :---------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
3
+ | Intended Application & Domain: | Generating waveform from mel spectrogram. |
4
+ | Model Type: | Convolutional Neural Network (CNN) |
5
+ | Intended Users: | This model is intended for developers to synthesize and generate waveforms from the AI-generated mel spectrograms. |
6
+ | Output: | Audio Waveform |
7
+ | Describe how the model works: | Model generates audio waveform corresponding to the input mel spectrogram. |
8
+ | Name the adversely impacted groups this has been tested to deliver comparable outcomes regardless of: | Not Applicable |
9
+ | Technical Limitations: | This may not perform well on synthetically-generated mel spectrograms that deviate significantly from the profile of mel spectrograms on which this was trained. |
10
+ | Verified to have met prescribed NVIDIA quality standards: | Yes |
11
+ | Performance Metrics: | Perceptual Evaluation of Speech Quality (PESQ), Virtual Speech Quality Objective Listener (VISQOL), Multi-resolution STFT (MRSTFT), Mel cepstral distortion (MCD), Periodicity RMSE, Voice/Unvoiced F1 Score (V/UV F1) |
12
+ | Potential Known Risks: | This model may generate low-quality or distorted soundwaves. |
13
+ | Licensing: | https://github.com/NVIDIA/BigVGAN/blob/main/LICENSE |
GPT_SoVITS/BigVGAN/nv-modelcard++/overview.md ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Model Overview
2
+
3
+ ## Description:
4
+
5
+ BigVGAN is a generative AI model specialized in synthesizing audio waveforms using Mel spectrogram as inputs.
6
+
7
+ <center><img src="https://user-images.githubusercontent.com/15963413/218609148-881e39df-33af-4af9-ab95-1427c4ebf062.png" width="800"></center>
8
+
9
+ BigVGAN is a fully convolutional architecture with several upsampling blocks using transposed convolution followed by multiple residual dilated convolution layers.
10
+
11
+ BigVGAN consists of a novel module, called anti-aliased multi-periodicity composition (AMP), which is specifically designed for generating waveforms. AMP is specialized in synthesizing high-frequency and periodic soundwaves drawing inspiration from audio signal processing principles.
12
+
13
+ It applies a periodic activation function, called Snake, which provides an inductive bias to the architecture in generating periodic soundwaves. It also applies anti-aliasing filters to reduce undesired artifacts in the generated waveforms. <br>
14
+
15
+ This model is ready for commercial use.<br>
16
+
17
+ ## References(s):
18
+
19
+ - [BigVGAN: A Universal Neural Vocoder with Large-Scale Training](https://arxiv.org/abs/2206.04658) <br>
20
+ - [Project Page](https://research.nvidia.com/labs/adlr/projects/bigvgan/) <br>
21
+ - [Audio Demo](https://bigvgan-demo.github.io/) <br>
22
+
23
+ ## Model Architecture:
24
+
25
+ **Architecture Type:** Convolution Neural Network (CNN) <br>
26
+ **Network Architecture:** You can see the details of this model on this link: https://github.com/NVIDIA/BigVGAN and the related paper can be found here: https://arxiv.org/abs/2206.04658<br>
27
+ **Model Version:** 2.0 <br>
28
+
29
+ ## Input:
30
+
31
+ **Input Type:** Audio <br>
32
+ **Input Format:** Mel Spectrogram <br>
33
+ **Input Parameters:** None <br>
34
+ **Other Properties Related to Input:** The input mel spectrogram has shape `[batch, channels, frames]`, where `channels` refers to the number of mel bands defined by the model and `frames` refers to the temporal length. The model supports arbitrary long `frames` that fits into the GPU memory.
35
+
36
+ ## Output:
37
+
38
+ **Input Type:** Audio <br>
39
+ **Output Format:** Audio Waveform <br>
40
+ **Output Parameters:** None <br>
41
+ **Other Properties Related to Output:** The output audio waveform has shape `[batch, 1, time]`, where `1` refers to the mono audio channels and `time` refers to the temporal length. `time` is defined as a fixed integer multiple of input `frames`, which is an upsampling ratio of the model (`time = upsampling ratio * frames`). The output audio waveform consitutes float values with a range of `[-1, 1]`.
42
+
43
+ ## Software Integration:
44
+
45
+ **Runtime Engine(s):** PyTorch
46
+
47
+ **Supported Hardware Microarchitecture Compatibility:** NVIDIA Ampere, NVIDIA Hopper, NVIDIA Lovelace, NVIDIA Turing, NVIDIA Volta <br>
48
+
49
+ ## Preferred/Supported Operating System(s):
50
+
51
+ Linux
52
+
53
+ ## Model Version(s):
54
+
55
+ v2.0
56
+
57
+ ## Training, Testing, and Evaluation Datasets:
58
+
59
+ ### Training Dataset:
60
+
61
+ The dataset contains diverse audio types, including speech in multiple languages, environmental sounds, and instruments.
62
+
63
+ **Links:**
64
+
65
+ - [AAM: Artificial Audio Multitracks Dataset](https://zenodo.org/records/5794629)
66
+ - [AudioCaps](https://audiocaps.github.io/)
67
+ - [AudioSet](https://research.google.com/audioset/index.html)
68
+ - [common-accent](https://huggingface.co/datasets/DTU54DL/common-accent)
69
+ - [Crowd Sourced Emotional Multimodal Actors Dataset (CREMA-D)](https://ieeexplore.ieee.org/document/6849440)
70
+ - [DCASE2017 Challenge, Task 4: Large-scale weakly supervised sound event detection for smart cars](https://dcase.community/challenge2017/task-large-scale-sound-event-detection)
71
+ - [FSDnoisy18k](https://zenodo.org/records/2529934)
72
+ - [Free Universal Sound Separation Dataset](https://zenodo.org/records/3694384)
73
+ - [Greatest Hits dataset](https://andrewowens.com/vis/)
74
+ - [GTZAN](https://ieeexplore.ieee.org/document/1021072)
75
+ - [JL corpus](https://www.kaggle.com/datasets/tli725/jl-corpus)
76
+ - [Medley-solos-DB: a cross-collection dataset for musical instrument recognition](https://zenodo.org/records/3464194)
77
+ - [MUSAN: A Music, Speech, and Noise Corpus](https://www.openslr.org/17/)
78
+ - [MusicBench](https://huggingface.co/datasets/amaai-lab/MusicBench)
79
+ - [MusicCaps](https://www.kaggle.com/datasets/googleai/musiccaps)
80
+ - [MusicNet](https://www.kaggle.com/datasets/imsparsh/musicnet-dataset)
81
+ - [NSynth](https://magenta.tensorflow.org/datasets/nsynth)
82
+ - [OnAir-Music-Dataset](https://github.com/sevagh/OnAir-Music-Dataset)
83
+ - [Audio Piano Triads Dataset](https://zenodo.org/records/4740877)
84
+ - [Pitch Audio Dataset (Surge synthesizer)](https://zenodo.org/records/4677097)
85
+ - [SONYC Urban Sound Tagging (SONYC-UST): a multilabel dataset from an urban acoustic sensor network](https://zenodo.org/records/3966543)
86
+ - [VocalSound: A Dataset for Improving Human Vocal Sounds Recognition](https://arxiv.org/abs/2205.03433)
87
+ - [WavText5K](https://github.com/microsoft/WavText5K)
88
+ - [CSS10: A Collection of Single Speaker Speech Datasets for 10 Languages](https://github.com/Kyubyong/css10)
89
+ - [Hi-Fi Multi-Speaker English TTS Dataset (Hi-Fi TTS)](https://www.openslr.org/109/)
90
+ - [IIIT-H Indic Speech Databases](http://festvox.org/databases/iiit_voices/)
91
+ - [Libri-Light: A Benchmark for ASR with Limited or No Supervision](https://arxiv.org/abs/1912.07875)
92
+ - [LibriTTS: A Corpus Derived from LibriSpeech for Text-to-Speech](https://www.openslr.org/60)
93
+ - [LibriTTS-R: A Restored Multi-Speaker Text-to-Speech Corpus](https://www.openslr.org/141/)
94
+ - [The SIWIS French Speech Synthesis Database](https://datashare.ed.ac.uk/handle/10283/2353)
95
+ - [Crowdsourced high-quality Colombian Spanish speech data set](https://openslr.org/72/)
96
+ - [TTS-Portuguese Corpus](https://github.com/Edresson/TTS-Portuguese-Corpus)
97
+ - [CSTR VCTK Corpus: English Multi-speaker Corpus for CSTR Voice Cloning Toolkit](https://datashare.ed.ac.uk/handle/10283/3443)
98
+
99
+ \*\* Data Collection Method by dataset <br>
100
+
101
+ - Human <br>
102
+
103
+ \*\* Labeling Method by dataset (for those with labels) <br>
104
+
105
+ - Hybrid: Automated, Human, Unknown <br>
106
+
107
+ ### Evaluating Dataset:
108
+
109
+ Properties: The audio generation quality of BigVGAN is evaluated using `dev` splits of the [LibriTTS dataset](https://www.openslr.org/60/) and [Hi-Fi TTS dataset](https://www.openslr.org/109/). The datasets include speech in English language with equal balance of genders.
110
+
111
+ \*\* Data Collection Method by dataset <br>
112
+
113
+ - Human <br>
114
+
115
+ \*\* Labeling Method by dataset <br>
116
+
117
+ - Automated <br>
118
+
119
+ ## Inference:
120
+
121
+ **Engine:** PyTorch <br>
122
+ **Test Hardware:** NVIDIA A100 GPU <br>
123
+
124
+ ## Ethical Considerations:
125
+
126
+ NVIDIA believes Trustworthy AI is a shared responsibility and we have established policies and practices to enable development for a wide array of AI applications. When downloaded or used in accordance with our terms of service, developers should work with their internal model team to ensure this model meets requirements for the relevant industry and use case and addresses unforeseen product misuse. For more detailed information on ethical considerations for this model, please see the Model Card++ Explainability, Bias, Safety & Security, and Privacy Subcards. Please report security vulnerabilities or NVIDIA AI Concerns [here](https://www.nvidia.com/en-us/support/submit-security-vulnerability/).
GPT_SoVITS/BigVGAN/nv-modelcard++/privacy.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ | Field | Response |
2
+ | :------------------------------------------------------------------------------------------------------------------------------------- | :--------------------------------------------- |
3
+ | Generatable or reverse engineerable personal information? | None |
4
+ | Protected class data used to create this model? | None |
5
+ | Was consent obtained for any personal data used? | Not Applicable (No Personal Data) |
6
+ | How often is dataset reviewed? | Before Release |
7
+ | Is a mechanism in place to honor data subject right of access or deletion of personal data? | Not Applicable |
8
+ | If personal collected for the development of the model, was it collected directly by NVIDIA? | Not Applicable |
9
+ | If personal collected for the development of the model by NVIDIA, do you maintain or have access to disclosures made to data subjects? | Not Applicable |
10
+ | If personal collected for the development of this AI model, was it minimized to only what was required? | Not Applicable |
11
+ | Is data in dataset traceable? | Yes |
12
+ | Is there provenance for all datasets used in training? | Yes |
13
+ | Does data labeling (annotation, metadata) comply with privacy laws? | Yes |
14
+ | Is data compliant with data subject requests for data correction or removal, if such a request was made? | No, not possible with externally-sourced data. |
GPT_SoVITS/BigVGAN/nv-modelcard++/safety.md ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ | Field | Response |
2
+ | :---------------------------------------------- | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
3
+ | Model Application(s): | Synethic Audio Generation |
4
+ | Describe the life critical impact (if present). | Not Applicable |
5
+ | Use Case Restrictions: | None |
6
+ | Model and dataset restrictions: | The Principle of least privilege (PoLP) is applied limiting access for dataset generation and model development. Restrictions enforce dataset access during training, and dataset license constraints adhered to. |
GPT_SoVITS/BigVGAN/requirements.txt ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ torch
2
+ numpy
3
+ librosa>=0.8.1
4
+ scipy
5
+ tensorboard
6
+ soundfile
7
+ matplotlib
8
+ pesq
9
+ auraloss
10
+ tqdm
11
+ nnAudio
12
+ ninja
13
+ huggingface_hub>=0.23.4
GPT_SoVITS/BigVGAN/tests/test_activation.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 NVIDIA CORPORATION.
2
+ # Licensed under the MIT license.
3
+
4
+ import os
5
+ import sys
6
+
7
+ # to import modules from parent_dir
8
+ parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
9
+ sys.path.append(parent_dir)
10
+
11
+ import torch
12
+ from alias_free_activation.cuda import activation1d
13
+ from activations import Snake
14
+
15
+
16
+ def test_load_fused_kernels():
17
+ try:
18
+ print("[Success] load_fused_kernels")
19
+ except ImportError as e:
20
+ print("[Fail] load_fused_kernels")
21
+ raise e
22
+
23
+
24
+ def test_anti_alias_activation():
25
+ data = torch.rand((10, 10, 200), device="cuda")
26
+
27
+ # Check activations.Snake cuda vs. torch
28
+ fused_anti_alias_activation = activation1d.Activation1d(activation=Snake(10), fused=True).cuda()
29
+ fused_activation_output = fused_anti_alias_activation(data)
30
+
31
+ torch_anti_alias_activation = activation1d.Activation1d(activation=Snake(10), fused=False).cuda()
32
+ torch_activation_output = torch_anti_alias_activation(data)
33
+
34
+ test_result = (fused_activation_output - torch_activation_output).abs()
35
+
36
+ while test_result.dim() != 1:
37
+ test_result = test_result.mean(dim=-1)
38
+
39
+ diff = test_result.mean(dim=-1)
40
+
41
+ if diff <= 1e-3:
42
+ print(
43
+ f"\n[Success] test_fused_anti_alias_activation"
44
+ f"\n > mean_difference={diff}"
45
+ f"\n > fused_values={fused_activation_output[-1][-1][:].tolist()}"
46
+ f"\n > torch_values={torch_activation_output[-1][-1][:].tolist()}"
47
+ )
48
+ else:
49
+ print(
50
+ f"\n[Fail] test_fused_anti_alias_activation"
51
+ f"\n > mean_difference={diff}, "
52
+ f"\n > fused_values={fused_activation_output[-1][-1][:].tolist()}, "
53
+ f"\n > torch_values={torch_activation_output[-1][-1][:].tolist()}"
54
+ )
55
+
56
+
57
+ if __name__ == "__main__":
58
+ from alias_free_activation.cuda import load
59
+
60
+ load.load()
61
+ test_load_fused_kernels()
62
+ test_anti_alias_activation()
GPT_SoVITS/BigVGAN/tests/test_activation_snake_beta.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 NVIDIA CORPORATION.
2
+ # Licensed under the MIT license.
3
+
4
+ import os
5
+ import sys
6
+
7
+ # to import modules from parent_dir
8
+ parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
9
+ sys.path.append(parent_dir)
10
+
11
+ import torch
12
+ from alias_free_activation.cuda import activation1d
13
+ from activations import SnakeBeta
14
+
15
+
16
+ def test_load_fused_kernels():
17
+ try:
18
+ print("[Success] load_fused_kernels")
19
+ except ImportError as e:
20
+ print("[Fail] load_fused_kernels")
21
+ raise e
22
+
23
+
24
+ def test_anti_alias_activation():
25
+ data = torch.rand((10, 10, 200), device="cuda")
26
+
27
+ # Check activations, Snake CUDA vs. Torch
28
+ fused_anti_alias_activation = activation1d.Activation1d(activation=SnakeBeta(10), fused=True).cuda()
29
+ fused_activation_output = fused_anti_alias_activation(data)
30
+
31
+ torch_anti_alias_activation = activation1d.Activation1d(activation=SnakeBeta(10), fused=False).cuda()
32
+ torch_activation_output = torch_anti_alias_activation(data)
33
+
34
+ test_result = (fused_activation_output - torch_activation_output).abs()
35
+
36
+ while test_result.dim() != 1:
37
+ test_result = test_result.mean(dim=-1)
38
+
39
+ diff = test_result.mean(dim=-1)
40
+
41
+ if diff <= 1e-3:
42
+ print(
43
+ f"\n[Success] test_fused_anti_alias_activation"
44
+ f"\n > mean_difference={diff}"
45
+ f"\n > fused_values={fused_activation_output[-1][-1][:].tolist()}"
46
+ f"\n > torch_values={torch_activation_output[-1][-1][:].tolist()}"
47
+ )
48
+ else:
49
+ print(
50
+ f"\n[Fail] test_fused_anti_alias_activation"
51
+ f"\n > mean_difference={diff}, "
52
+ f"\n > fused_values={fused_activation_output[-1][-1][:].tolist()}, "
53
+ f"\n > torch_values={torch_activation_output[-1][-1][:].tolist()}"
54
+ )
55
+
56
+
57
+ if __name__ == "__main__":
58
+ from alias_free_activation.cuda import load
59
+
60
+ load.load()
61
+ test_load_fused_kernels()
62
+ test_anti_alias_activation()
GPT_SoVITS/BigVGAN/tests/test_cuda_vs_torch_model.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 NVIDIA CORPORATION.
2
+ # Licensed under the MIT license.
3
+
4
+ import os
5
+ import sys
6
+
7
+ # to import modules from parent_dir
8
+ parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
9
+ sys.path.append(parent_dir)
10
+
11
+ import torch
12
+ import json
13
+ from env import AttrDict
14
+ from bigvgan import BigVGAN
15
+ from time import time
16
+ from tqdm import tqdm
17
+ from meldataset import mel_spectrogram, MAX_WAV_VALUE
18
+ from scipy.io.wavfile import write
19
+ import numpy as np
20
+
21
+ import argparse
22
+
23
+ torch.backends.cudnn.benchmark = True
24
+
25
+ # For easier debugging
26
+ torch.set_printoptions(linewidth=200, threshold=10_000)
27
+
28
+
29
+ def generate_soundwave(duration=5.0, sr=24000):
30
+ t = np.linspace(0, duration, int(sr * duration), False, dtype=np.float32)
31
+
32
+ modulation = np.sin(2 * np.pi * t / duration)
33
+
34
+ min_freq = 220
35
+ max_freq = 1760
36
+ frequencies = min_freq + (max_freq - min_freq) * (modulation + 1) / 2
37
+ soundwave = np.sin(2 * np.pi * frequencies * t)
38
+
39
+ soundwave = soundwave / np.max(np.abs(soundwave)) * 0.95
40
+
41
+ return soundwave, sr
42
+
43
+
44
+ def get_mel(x, h):
45
+ return mel_spectrogram(x, h.n_fft, h.num_mels, h.sampling_rate, h.hop_size, h.win_size, h.fmin, h.fmax)
46
+
47
+
48
+ def load_checkpoint(filepath, device):
49
+ assert os.path.isfile(filepath)
50
+ print(f"Loading '{filepath}'")
51
+ checkpoint_dict = torch.load(filepath, map_location=device)
52
+ print("Complete.")
53
+ return checkpoint_dict
54
+
55
+
56
+ if __name__ == "__main__":
57
+ parser = argparse.ArgumentParser(description="Test script to check CUDA kernel correctness.")
58
+ parser.add_argument(
59
+ "--checkpoint_file",
60
+ type=str,
61
+ required=True,
62
+ help="Path to the checkpoint file. Assumes config.json exists in the directory.",
63
+ )
64
+
65
+ args = parser.parse_args()
66
+
67
+ config_file = os.path.join(os.path.split(args.checkpoint_file)[0], "config.json")
68
+ with open(config_file) as f:
69
+ config = f.read()
70
+ json_config = json.loads(config)
71
+ h = AttrDict({**json_config})
72
+
73
+ print("loading plain Pytorch BigVGAN")
74
+ generator_original = BigVGAN(h).to("cuda")
75
+ print("loading CUDA kernel BigVGAN with auto-build")
76
+ generator_cuda_kernel = BigVGAN(h, use_cuda_kernel=True).to("cuda")
77
+
78
+ state_dict_g = load_checkpoint(args.checkpoint_file, "cuda")
79
+ generator_original.load_state_dict(state_dict_g["generator"])
80
+ generator_cuda_kernel.load_state_dict(state_dict_g["generator"])
81
+
82
+ generator_original.remove_weight_norm()
83
+ generator_original.eval()
84
+ generator_cuda_kernel.remove_weight_norm()
85
+ generator_cuda_kernel.eval()
86
+
87
+ # define number of samples and length of mel frame to benchmark
88
+ num_sample = 10
89
+ num_mel_frame = 16384
90
+
91
+ # CUDA kernel correctness check
92
+ diff = 0.0
93
+ for i in tqdm(range(num_sample)):
94
+ # Random mel
95
+ data = torch.rand((1, h.num_mels, num_mel_frame), device="cuda")
96
+
97
+ with torch.inference_mode():
98
+ audio_original = generator_original(data)
99
+
100
+ with torch.inference_mode():
101
+ audio_cuda_kernel = generator_cuda_kernel(data)
102
+
103
+ # Both outputs should be (almost) the same
104
+ test_result = (audio_original - audio_cuda_kernel).abs()
105
+ diff += test_result.mean(dim=-1).item()
106
+
107
+ diff /= num_sample
108
+ if diff <= 2e-3: # We can expect a small difference (~1e-3) which does not affect perceptual quality
109
+ print(
110
+ f"\n[Success] test CUDA fused vs. plain torch BigVGAN inference"
111
+ f"\n > mean_difference={diff}"
112
+ f"\n > fused_values={audio_cuda_kernel[-1][-1][-30:].tolist()}"
113
+ f"\n > torch_values={audio_original[-1][-1][-30:].tolist()}"
114
+ )
115
+ else:
116
+ print(
117
+ f"\n[Fail] test CUDA fused vs. plain torch BigVGAN inference"
118
+ f"\n > mean_difference={diff}"
119
+ f"\n > fused_values={audio_cuda_kernel[-1][-1][-30:].tolist()}, "
120
+ f"\n > torch_values={audio_original[-1][-1][-30:].tolist()}"
121
+ )
122
+
123
+ del data, audio_original, audio_cuda_kernel
124
+
125
+ # Variables for tracking total time and VRAM usage
126
+ toc_total_original = 0
127
+ toc_total_cuda_kernel = 0
128
+ vram_used_original_total = 0
129
+ vram_used_cuda_kernel_total = 0
130
+ audio_length_total = 0
131
+
132
+ # Measure Original inference in isolation
133
+ for i in tqdm(range(num_sample)):
134
+ torch.cuda.reset_peak_memory_stats(device="cuda")
135
+ data = torch.rand((1, h.num_mels, num_mel_frame), device="cuda")
136
+ torch.cuda.synchronize()
137
+ tic = time()
138
+ with torch.inference_mode():
139
+ audio_original = generator_original(data)
140
+ torch.cuda.synchronize()
141
+ toc = time() - tic
142
+ toc_total_original += toc
143
+
144
+ vram_used_original_total += torch.cuda.max_memory_allocated(device="cuda")
145
+
146
+ del data, audio_original
147
+ torch.cuda.empty_cache()
148
+
149
+ # Measure CUDA kernel inference in isolation
150
+ for i in tqdm(range(num_sample)):
151
+ torch.cuda.reset_peak_memory_stats(device="cuda")
152
+ data = torch.rand((1, h.num_mels, num_mel_frame), device="cuda")
153
+ torch.cuda.synchronize()
154
+ tic = time()
155
+ with torch.inference_mode():
156
+ audio_cuda_kernel = generator_cuda_kernel(data)
157
+ torch.cuda.synchronize()
158
+ toc = time() - tic
159
+ toc_total_cuda_kernel += toc
160
+
161
+ audio_length_total += audio_cuda_kernel.shape[-1]
162
+
163
+ vram_used_cuda_kernel_total += torch.cuda.max_memory_allocated(device="cuda")
164
+
165
+ del data, audio_cuda_kernel
166
+ torch.cuda.empty_cache()
167
+
168
+ # Calculate metrics
169
+ audio_second = audio_length_total / h.sampling_rate
170
+ khz_original = audio_length_total / toc_total_original / 1000
171
+ khz_cuda_kernel = audio_length_total / toc_total_cuda_kernel / 1000
172
+ vram_used_original_gb = vram_used_original_total / num_sample / (1024**3)
173
+ vram_used_cuda_kernel_gb = vram_used_cuda_kernel_total / num_sample / (1024**3)
174
+
175
+ # Print results
176
+ print(
177
+ f"Original BigVGAN: took {toc_total_original:.2f} seconds to generate {audio_second:.2f} seconds of audio, {khz_original:.1f}kHz, {audio_second / toc_total_original:.1f} faster than realtime, VRAM used {vram_used_original_gb:.1f} GB"
178
+ )
179
+ print(
180
+ f"CUDA kernel BigVGAN: took {toc_total_cuda_kernel:.2f} seconds to generate {audio_second:.2f} seconds of audio, {khz_cuda_kernel:.1f}kHz, {audio_second / toc_total_cuda_kernel:.1f} faster than realtime, VRAM used {vram_used_cuda_kernel_gb:.1f} GB"
181
+ )
182
+ print(f"speedup of CUDA kernel: {khz_cuda_kernel / khz_original}")
183
+ print(f"VRAM saving of CUDA kernel: {vram_used_original_gb / vram_used_cuda_kernel_gb}")
184
+
185
+ # Use artificial sine waves for inference test
186
+ audio_real, sr = generate_soundwave(duration=5.0, sr=h.sampling_rate)
187
+ audio_real = torch.tensor(audio_real).to("cuda")
188
+ # Compute mel spectrogram from the ground truth audio
189
+ x = get_mel(audio_real.unsqueeze(0), h)
190
+
191
+ with torch.inference_mode():
192
+ y_g_hat_original = generator_original(x)
193
+ y_g_hat_cuda_kernel = generator_cuda_kernel(x)
194
+
195
+ audio_real = audio_real.squeeze()
196
+ audio_real = audio_real * MAX_WAV_VALUE
197
+ audio_real = audio_real.cpu().numpy().astype("int16")
198
+
199
+ audio_original = y_g_hat_original.squeeze()
200
+ audio_original = audio_original * MAX_WAV_VALUE
201
+ audio_original = audio_original.cpu().numpy().astype("int16")
202
+
203
+ audio_cuda_kernel = y_g_hat_cuda_kernel.squeeze()
204
+ audio_cuda_kernel = audio_cuda_kernel * MAX_WAV_VALUE
205
+ audio_cuda_kernel = audio_cuda_kernel.cpu().numpy().astype("int16")
206
+
207
+ os.makedirs("tmp", exist_ok=True)
208
+ output_file_real = os.path.join("tmp", "audio_real.wav")
209
+ output_file_original = os.path.join("tmp", "audio_generated_original.wav")
210
+ output_file_cuda_kernel = os.path.join("tmp", "audio_generated_cuda_kernel.wav")
211
+ write(output_file_real, h.sampling_rate, audio_real)
212
+ write(output_file_original, h.sampling_rate, audio_original)
213
+ write(output_file_cuda_kernel, h.sampling_rate, audio_cuda_kernel)
214
+ print("Example generated audios of original vs. fused CUDA kernel written to tmp!")
215
+ print("Done")
GPT_SoVITS/BigVGAN/train.py ADDED
@@ -0,0 +1,716 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 NVIDIA CORPORATION.
2
+ # Licensed under the MIT license.
3
+
4
+ # Adapted from https://github.com/jik876/hifi-gan under the MIT license.
5
+ # LICENSE is in incl_licenses directory.
6
+
7
+
8
+ import warnings
9
+
10
+ warnings.simplefilter(action="ignore", category=FutureWarning)
11
+ import itertools
12
+ import os
13
+ import time
14
+ import argparse
15
+ import json
16
+ import torch
17
+ import torch.nn.functional as F
18
+ from torch.utils.tensorboard import SummaryWriter
19
+ from torch.utils.data import DistributedSampler, DataLoader
20
+ import torch.multiprocessing as mp
21
+ from torch.distributed import init_process_group
22
+ from torch.nn.parallel import DistributedDataParallel
23
+ from env import AttrDict, build_env
24
+ from meldataset import MelDataset, mel_spectrogram, get_dataset_filelist, MAX_WAV_VALUE
25
+
26
+ from bigvgan import BigVGAN
27
+ from discriminators import (
28
+ MultiPeriodDiscriminator,
29
+ MultiResolutionDiscriminator,
30
+ MultiBandDiscriminator,
31
+ MultiScaleSubbandCQTDiscriminator,
32
+ )
33
+ from loss import (
34
+ feature_loss,
35
+ generator_loss,
36
+ discriminator_loss,
37
+ MultiScaleMelSpectrogramLoss,
38
+ )
39
+
40
+ from utils import (
41
+ plot_spectrogram,
42
+ plot_spectrogram_clipped,
43
+ scan_checkpoint,
44
+ load_checkpoint,
45
+ save_checkpoint,
46
+ save_audio,
47
+ )
48
+ import torchaudio as ta
49
+ from pesq import pesq
50
+ from tqdm import tqdm
51
+ import auraloss
52
+
53
+ torch.backends.cudnn.benchmark = False
54
+
55
+
56
+ def train(rank, a, h):
57
+ if h.num_gpus > 1:
58
+ # initialize distributed
59
+ init_process_group(
60
+ backend=h.dist_config["dist_backend"],
61
+ init_method=h.dist_config["dist_url"],
62
+ world_size=h.dist_config["world_size"] * h.num_gpus,
63
+ rank=rank,
64
+ )
65
+
66
+ # Set seed and device
67
+ torch.cuda.manual_seed(h.seed)
68
+ torch.cuda.set_device(rank)
69
+ device = torch.device(f"cuda:{rank:d}")
70
+
71
+ # Define BigVGAN generator
72
+ generator = BigVGAN(h).to(device)
73
+
74
+ # Define discriminators. MPD is used by default
75
+ mpd = MultiPeriodDiscriminator(h).to(device)
76
+
77
+ # Define additional discriminators. BigVGAN-v1 uses UnivNet's MRD as default
78
+ # New in BigVGAN-v2: option to switch to new discriminators: MultiBandDiscriminator / MultiScaleSubbandCQTDiscriminator
79
+ if h.get("use_mbd_instead_of_mrd", False): # Switch to MBD
80
+ print("[INFO] using MultiBandDiscriminator of BigVGAN-v2 instead of MultiResolutionDiscriminator")
81
+ # Variable name is kept as "mrd" for backward compatibility & minimal code change
82
+ mrd = MultiBandDiscriminator(h).to(device)
83
+ elif h.get("use_cqtd_instead_of_mrd", False): # Switch to CQTD
84
+ print("[INFO] using MultiScaleSubbandCQTDiscriminator of BigVGAN-v2 instead of MultiResolutionDiscriminator")
85
+ mrd = MultiScaleSubbandCQTDiscriminator(h).to(device)
86
+ else: # Fallback to original MRD in BigVGAN-v1
87
+ mrd = MultiResolutionDiscriminator(h).to(device)
88
+
89
+ # New in BigVGAN-v2: option to switch to multi-scale L1 mel loss
90
+ if h.get("use_multiscale_melloss", False):
91
+ print("[INFO] using multi-scale Mel l1 loss of BigVGAN-v2 instead of the original single-scale loss")
92
+ fn_mel_loss_multiscale = MultiScaleMelSpectrogramLoss(
93
+ sampling_rate=h.sampling_rate
94
+ ) # NOTE: accepts waveform as input
95
+ else:
96
+ fn_mel_loss_singlescale = F.l1_loss
97
+
98
+ # Print the model & number of parameters, and create or scan the latest checkpoint from checkpoints directory
99
+ if rank == 0:
100
+ print(generator)
101
+ print(mpd)
102
+ print(mrd)
103
+ print(f"Generator params: {sum(p.numel() for p in generator.parameters())}")
104
+ print(f"Discriminator mpd params: {sum(p.numel() for p in mpd.parameters())}")
105
+ print(f"Discriminator mrd params: {sum(p.numel() for p in mrd.parameters())}")
106
+ os.makedirs(a.checkpoint_path, exist_ok=True)
107
+ print(f"Checkpoints directory: {a.checkpoint_path}")
108
+
109
+ if os.path.isdir(a.checkpoint_path):
110
+ # New in v2.1: If the step prefix pattern-based checkpoints are not found, also check for renamed files in Hugging Face Hub to resume training
111
+ cp_g = scan_checkpoint(a.checkpoint_path, prefix="g_", renamed_file="bigvgan_generator.pt")
112
+ cp_do = scan_checkpoint(
113
+ a.checkpoint_path,
114
+ prefix="do_",
115
+ renamed_file="bigvgan_discriminator_optimizer.pt",
116
+ )
117
+
118
+ # Load the latest checkpoint if exists
119
+ steps = 0
120
+ if cp_g is None or cp_do is None:
121
+ state_dict_do = None
122
+ last_epoch = -1
123
+ else:
124
+ state_dict_g = load_checkpoint(cp_g, device)
125
+ state_dict_do = load_checkpoint(cp_do, device)
126
+ generator.load_state_dict(state_dict_g["generator"])
127
+ mpd.load_state_dict(state_dict_do["mpd"])
128
+ mrd.load_state_dict(state_dict_do["mrd"])
129
+ steps = state_dict_do["steps"] + 1
130
+ last_epoch = state_dict_do["epoch"]
131
+
132
+ # Initialize DDP, optimizers, and schedulers
133
+ if h.num_gpus > 1:
134
+ generator = DistributedDataParallel(generator, device_ids=[rank]).to(device)
135
+ mpd = DistributedDataParallel(mpd, device_ids=[rank]).to(device)
136
+ mrd = DistributedDataParallel(mrd, device_ids=[rank]).to(device)
137
+
138
+ optim_g = torch.optim.AdamW(generator.parameters(), h.learning_rate, betas=[h.adam_b1, h.adam_b2])
139
+ optim_d = torch.optim.AdamW(
140
+ itertools.chain(mrd.parameters(), mpd.parameters()),
141
+ h.learning_rate,
142
+ betas=[h.adam_b1, h.adam_b2],
143
+ )
144
+
145
+ if state_dict_do is not None:
146
+ optim_g.load_state_dict(state_dict_do["optim_g"])
147
+ optim_d.load_state_dict(state_dict_do["optim_d"])
148
+
149
+ scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=h.lr_decay, last_epoch=last_epoch)
150
+ scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=h.lr_decay, last_epoch=last_epoch)
151
+
152
+ # Define training and validation datasets
153
+
154
+ """
155
+ unseen_validation_filelist will contain sample filepaths outside the seen training & validation dataset
156
+ Example: trained on LibriTTS, validate on VCTK
157
+ """
158
+ training_filelist, validation_filelist, list_unseen_validation_filelist = get_dataset_filelist(a)
159
+
160
+ trainset = MelDataset(
161
+ training_filelist,
162
+ h,
163
+ h.segment_size,
164
+ h.n_fft,
165
+ h.num_mels,
166
+ h.hop_size,
167
+ h.win_size,
168
+ h.sampling_rate,
169
+ h.fmin,
170
+ h.fmax,
171
+ shuffle=False if h.num_gpus > 1 else True,
172
+ fmax_loss=h.fmax_for_loss,
173
+ device=device,
174
+ fine_tuning=a.fine_tuning,
175
+ base_mels_path=a.input_mels_dir,
176
+ is_seen=True,
177
+ )
178
+
179
+ train_sampler = DistributedSampler(trainset) if h.num_gpus > 1 else None
180
+
181
+ train_loader = DataLoader(
182
+ trainset,
183
+ num_workers=h.num_workers,
184
+ shuffle=False,
185
+ sampler=train_sampler,
186
+ batch_size=h.batch_size,
187
+ pin_memory=True,
188
+ drop_last=True,
189
+ )
190
+
191
+ if rank == 0:
192
+ validset = MelDataset(
193
+ validation_filelist,
194
+ h,
195
+ h.segment_size,
196
+ h.n_fft,
197
+ h.num_mels,
198
+ h.hop_size,
199
+ h.win_size,
200
+ h.sampling_rate,
201
+ h.fmin,
202
+ h.fmax,
203
+ False,
204
+ False,
205
+ fmax_loss=h.fmax_for_loss,
206
+ device=device,
207
+ fine_tuning=a.fine_tuning,
208
+ base_mels_path=a.input_mels_dir,
209
+ is_seen=True,
210
+ )
211
+ validation_loader = DataLoader(
212
+ validset,
213
+ num_workers=1,
214
+ shuffle=False,
215
+ sampler=None,
216
+ batch_size=1,
217
+ pin_memory=True,
218
+ drop_last=True,
219
+ )
220
+
221
+ list_unseen_validset = []
222
+ list_unseen_validation_loader = []
223
+ for i in range(len(list_unseen_validation_filelist)):
224
+ unseen_validset = MelDataset(
225
+ list_unseen_validation_filelist[i],
226
+ h,
227
+ h.segment_size,
228
+ h.n_fft,
229
+ h.num_mels,
230
+ h.hop_size,
231
+ h.win_size,
232
+ h.sampling_rate,
233
+ h.fmin,
234
+ h.fmax,
235
+ False,
236
+ False,
237
+ fmax_loss=h.fmax_for_loss,
238
+ device=device,
239
+ fine_tuning=a.fine_tuning,
240
+ base_mels_path=a.input_mels_dir,
241
+ is_seen=False,
242
+ )
243
+ unseen_validation_loader = DataLoader(
244
+ unseen_validset,
245
+ num_workers=1,
246
+ shuffle=False,
247
+ sampler=None,
248
+ batch_size=1,
249
+ pin_memory=True,
250
+ drop_last=True,
251
+ )
252
+ list_unseen_validset.append(unseen_validset)
253
+ list_unseen_validation_loader.append(unseen_validation_loader)
254
+
255
+ # Tensorboard logger
256
+ sw = SummaryWriter(os.path.join(a.checkpoint_path, "logs"))
257
+ if a.save_audio: # Also save audio to disk if --save_audio is set to True
258
+ os.makedirs(os.path.join(a.checkpoint_path, "samples"), exist_ok=True)
259
+
260
+ """
261
+ Validation loop, "mode" parameter is automatically defined as (seen or unseen)_(name of the dataset).
262
+ If the name of the dataset contains "nonspeech", it skips PESQ calculation to prevent errors
263
+ """
264
+
265
+ def validate(rank, a, h, loader, mode="seen"):
266
+ assert rank == 0, "validate should only run on rank=0"
267
+ generator.eval()
268
+ torch.cuda.empty_cache()
269
+
270
+ val_err_tot = 0
271
+ val_pesq_tot = 0
272
+ val_mrstft_tot = 0
273
+
274
+ # Modules for evaluation metrics
275
+ pesq_resampler = ta.transforms.Resample(h.sampling_rate, 16000).cuda()
276
+ loss_mrstft = auraloss.freq.MultiResolutionSTFTLoss(device="cuda")
277
+
278
+ if a.save_audio: # Also save audio to disk if --save_audio is set to True
279
+ os.makedirs(
280
+ os.path.join(a.checkpoint_path, "samples", f"gt_{mode}"),
281
+ exist_ok=True,
282
+ )
283
+ os.makedirs(
284
+ os.path.join(a.checkpoint_path, "samples", f"{mode}_{steps:08d}"),
285
+ exist_ok=True,
286
+ )
287
+
288
+ with torch.no_grad():
289
+ print(f"step {steps} {mode} speaker validation...")
290
+
291
+ # Loop over validation set and compute metrics
292
+ for j, batch in enumerate(tqdm(loader)):
293
+ x, y, _, y_mel = batch
294
+ y = y.to(device)
295
+ if hasattr(generator, "module"):
296
+ y_g_hat = generator.module(x.to(device))
297
+ else:
298
+ y_g_hat = generator(x.to(device))
299
+ y_mel = y_mel.to(device, non_blocking=True)
300
+ y_g_hat_mel = mel_spectrogram(
301
+ y_g_hat.squeeze(1),
302
+ h.n_fft,
303
+ h.num_mels,
304
+ h.sampling_rate,
305
+ h.hop_size,
306
+ h.win_size,
307
+ h.fmin,
308
+ h.fmax_for_loss,
309
+ )
310
+ min_t = min(y_mel.size(-1), y_g_hat_mel.size(-1))
311
+ val_err_tot += F.l1_loss(y_mel[..., :min_t], y_g_hat_mel[..., :min_t]).item()
312
+
313
+ # PESQ calculation. only evaluate PESQ if it's speech signal (nonspeech PESQ will error out)
314
+ if "nonspeech" not in mode: # Skips if the name of dataset (in mode string) contains "nonspeech"
315
+ # Resample to 16000 for pesq
316
+ y_16k = pesq_resampler(y)
317
+ y_g_hat_16k = pesq_resampler(y_g_hat.squeeze(1))
318
+ y_int_16k = (y_16k[0] * MAX_WAV_VALUE).short().cpu().numpy()
319
+ y_g_hat_int_16k = (y_g_hat_16k[0] * MAX_WAV_VALUE).short().cpu().numpy()
320
+ val_pesq_tot += pesq(16000, y_int_16k, y_g_hat_int_16k, "wb")
321
+
322
+ # MRSTFT calculation
323
+ min_t = min(y.size(-1), y_g_hat.size(-1))
324
+ val_mrstft_tot += loss_mrstft(y_g_hat[..., :min_t], y[..., :min_t]).item()
325
+
326
+ # Log audio and figures to Tensorboard
327
+ if j % a.eval_subsample == 0: # Subsample every nth from validation set
328
+ if steps >= 0:
329
+ sw.add_audio(f"gt_{mode}/y_{j}", y[0], steps, h.sampling_rate)
330
+ if a.save_audio: # Also save audio to disk if --save_audio is set to True
331
+ save_audio(
332
+ y[0],
333
+ os.path.join(
334
+ a.checkpoint_path,
335
+ "samples",
336
+ f"gt_{mode}",
337
+ f"{j:04d}.wav",
338
+ ),
339
+ h.sampling_rate,
340
+ )
341
+ sw.add_figure(
342
+ f"gt_{mode}/y_spec_{j}",
343
+ plot_spectrogram(x[0]),
344
+ steps,
345
+ )
346
+
347
+ sw.add_audio(
348
+ f"generated_{mode}/y_hat_{j}",
349
+ y_g_hat[0],
350
+ steps,
351
+ h.sampling_rate,
352
+ )
353
+ if a.save_audio: # Also save audio to disk if --save_audio is set to True
354
+ save_audio(
355
+ y_g_hat[0, 0],
356
+ os.path.join(
357
+ a.checkpoint_path,
358
+ "samples",
359
+ f"{mode}_{steps:08d}",
360
+ f"{j:04d}.wav",
361
+ ),
362
+ h.sampling_rate,
363
+ )
364
+ # Spectrogram of synthesized audio
365
+ y_hat_spec = mel_spectrogram(
366
+ y_g_hat.squeeze(1),
367
+ h.n_fft,
368
+ h.num_mels,
369
+ h.sampling_rate,
370
+ h.hop_size,
371
+ h.win_size,
372
+ h.fmin,
373
+ h.fmax,
374
+ )
375
+ sw.add_figure(
376
+ f"generated_{mode}/y_hat_spec_{j}",
377
+ plot_spectrogram(y_hat_spec.squeeze(0).cpu().numpy()),
378
+ steps,
379
+ )
380
+
381
+ """
382
+ Visualization of spectrogram difference between GT and synthesized audio, difference higher than 1 is clipped for better visualization.
383
+ """
384
+ spec_delta = torch.clamp(
385
+ torch.abs(x[0] - y_hat_spec.squeeze(0).cpu()),
386
+ min=1e-6,
387
+ max=1.0,
388
+ )
389
+ sw.add_figure(
390
+ f"delta_dclip1_{mode}/spec_{j}",
391
+ plot_spectrogram_clipped(spec_delta.numpy(), clip_max=1.0),
392
+ steps,
393
+ )
394
+
395
+ val_err = val_err_tot / (j + 1)
396
+ val_pesq = val_pesq_tot / (j + 1)
397
+ val_mrstft = val_mrstft_tot / (j + 1)
398
+ # Log evaluation metrics to Tensorboard
399
+ sw.add_scalar(f"validation_{mode}/mel_spec_error", val_err, steps)
400
+ sw.add_scalar(f"validation_{mode}/pesq", val_pesq, steps)
401
+ sw.add_scalar(f"validation_{mode}/mrstft", val_mrstft, steps)
402
+
403
+ generator.train()
404
+
405
+ # If the checkpoint is loaded, start with validation loop
406
+ if steps != 0 and rank == 0 and not a.debug:
407
+ if not a.skip_seen:
408
+ validate(
409
+ rank,
410
+ a,
411
+ h,
412
+ validation_loader,
413
+ mode=f"seen_{train_loader.dataset.name}",
414
+ )
415
+ for i in range(len(list_unseen_validation_loader)):
416
+ validate(
417
+ rank,
418
+ a,
419
+ h,
420
+ list_unseen_validation_loader[i],
421
+ mode=f"unseen_{list_unseen_validation_loader[i].dataset.name}",
422
+ )
423
+ # Exit the script if --evaluate is set to True
424
+ if a.evaluate:
425
+ exit()
426
+
427
+ # Main training loop
428
+ generator.train()
429
+ mpd.train()
430
+ mrd.train()
431
+ for epoch in range(max(0, last_epoch), a.training_epochs):
432
+ if rank == 0:
433
+ start = time.time()
434
+ print(f"Epoch: {epoch + 1}")
435
+
436
+ if h.num_gpus > 1:
437
+ train_sampler.set_epoch(epoch)
438
+
439
+ for i, batch in enumerate(train_loader):
440
+ if rank == 0:
441
+ start_b = time.time()
442
+ x, y, _, y_mel = batch
443
+
444
+ x = x.to(device, non_blocking=True)
445
+ y = y.to(device, non_blocking=True)
446
+ y_mel = y_mel.to(device, non_blocking=True)
447
+ y = y.unsqueeze(1)
448
+
449
+ y_g_hat = generator(x)
450
+ y_g_hat_mel = mel_spectrogram(
451
+ y_g_hat.squeeze(1),
452
+ h.n_fft,
453
+ h.num_mels,
454
+ h.sampling_rate,
455
+ h.hop_size,
456
+ h.win_size,
457
+ h.fmin,
458
+ h.fmax_for_loss,
459
+ )
460
+
461
+ optim_d.zero_grad()
462
+
463
+ # MPD
464
+ y_df_hat_r, y_df_hat_g, _, _ = mpd(y, y_g_hat.detach())
465
+ loss_disc_f, losses_disc_f_r, losses_disc_f_g = discriminator_loss(y_df_hat_r, y_df_hat_g)
466
+
467
+ # MRD
468
+ y_ds_hat_r, y_ds_hat_g, _, _ = mrd(y, y_g_hat.detach())
469
+ loss_disc_s, losses_disc_s_r, losses_disc_s_g = discriminator_loss(y_ds_hat_r, y_ds_hat_g)
470
+
471
+ loss_disc_all = loss_disc_s + loss_disc_f
472
+
473
+ # Set clip_grad_norm value
474
+ clip_grad_norm = h.get("clip_grad_norm", 1000.0) # Default to 1000
475
+
476
+ # Whether to freeze D for initial training steps
477
+ if steps >= a.freeze_step:
478
+ loss_disc_all.backward()
479
+ grad_norm_mpd = torch.nn.utils.clip_grad_norm_(mpd.parameters(), clip_grad_norm)
480
+ grad_norm_mrd = torch.nn.utils.clip_grad_norm_(mrd.parameters(), clip_grad_norm)
481
+ optim_d.step()
482
+ else:
483
+ print(f"[WARNING] skipping D training for the first {a.freeze_step} steps")
484
+ grad_norm_mpd = 0.0
485
+ grad_norm_mrd = 0.0
486
+
487
+ # Generator
488
+ optim_g.zero_grad()
489
+
490
+ # L1 Mel-Spectrogram Loss
491
+ lambda_melloss = h.get("lambda_melloss", 45.0) # Defaults to 45 in BigVGAN-v1 if not set
492
+ if h.get("use_multiscale_melloss", False): # uses wav <y, y_g_hat> for loss
493
+ loss_mel = fn_mel_loss_multiscale(y, y_g_hat) * lambda_melloss
494
+ else: # Uses mel <y_mel, y_g_hat_mel> for loss
495
+ loss_mel = fn_mel_loss_singlescale(y_mel, y_g_hat_mel) * lambda_melloss
496
+
497
+ # MPD loss
498
+ y_df_hat_r, y_df_hat_g, fmap_f_r, fmap_f_g = mpd(y, y_g_hat)
499
+ loss_fm_f = feature_loss(fmap_f_r, fmap_f_g)
500
+ loss_gen_f, losses_gen_f = generator_loss(y_df_hat_g)
501
+
502
+ # MRD loss
503
+ y_ds_hat_r, y_ds_hat_g, fmap_s_r, fmap_s_g = mrd(y, y_g_hat)
504
+ loss_fm_s = feature_loss(fmap_s_r, fmap_s_g)
505
+ loss_gen_s, losses_gen_s = generator_loss(y_ds_hat_g)
506
+
507
+ if steps >= a.freeze_step:
508
+ loss_gen_all = loss_gen_s + loss_gen_f + loss_fm_s + loss_fm_f + loss_mel
509
+ else:
510
+ print(f"[WARNING] using regression loss only for G for the first {a.freeze_step} steps")
511
+ loss_gen_all = loss_mel
512
+
513
+ loss_gen_all.backward()
514
+ grad_norm_g = torch.nn.utils.clip_grad_norm_(generator.parameters(), clip_grad_norm)
515
+ optim_g.step()
516
+
517
+ if rank == 0:
518
+ # STDOUT logging
519
+ if steps % a.stdout_interval == 0:
520
+ mel_error = loss_mel.item() / lambda_melloss # Log training mel regression loss to stdout
521
+ print(
522
+ f"Steps: {steps:d}, "
523
+ f"Gen Loss Total: {loss_gen_all:4.3f}, "
524
+ f"Mel Error: {mel_error:4.3f}, "
525
+ f"s/b: {time.time() - start_b:4.3f} "
526
+ f"lr: {optim_g.param_groups[0]['lr']:4.7f} "
527
+ f"grad_norm_g: {grad_norm_g:4.3f}"
528
+ )
529
+
530
+ # Checkpointing
531
+ if steps % a.checkpoint_interval == 0 and steps != 0:
532
+ checkpoint_path = f"{a.checkpoint_path}/g_{steps:08d}"
533
+ save_checkpoint(
534
+ checkpoint_path,
535
+ {"generator": (generator.module if h.num_gpus > 1 else generator).state_dict()},
536
+ )
537
+ checkpoint_path = f"{a.checkpoint_path}/do_{steps:08d}"
538
+ save_checkpoint(
539
+ checkpoint_path,
540
+ {
541
+ "mpd": (mpd.module if h.num_gpus > 1 else mpd).state_dict(),
542
+ "mrd": (mrd.module if h.num_gpus > 1 else mrd).state_dict(),
543
+ "optim_g": optim_g.state_dict(),
544
+ "optim_d": optim_d.state_dict(),
545
+ "steps": steps,
546
+ "epoch": epoch,
547
+ },
548
+ )
549
+
550
+ # Tensorboard summary logging
551
+ if steps % a.summary_interval == 0:
552
+ mel_error = loss_mel.item() / lambda_melloss # Log training mel regression loss to tensorboard
553
+ sw.add_scalar("training/gen_loss_total", loss_gen_all.item(), steps)
554
+ sw.add_scalar("training/mel_spec_error", mel_error, steps)
555
+ sw.add_scalar("training/fm_loss_mpd", loss_fm_f.item(), steps)
556
+ sw.add_scalar("training/gen_loss_mpd", loss_gen_f.item(), steps)
557
+ sw.add_scalar("training/disc_loss_mpd", loss_disc_f.item(), steps)
558
+ sw.add_scalar("training/grad_norm_mpd", grad_norm_mpd, steps)
559
+ sw.add_scalar("training/fm_loss_mrd", loss_fm_s.item(), steps)
560
+ sw.add_scalar("training/gen_loss_mrd", loss_gen_s.item(), steps)
561
+ sw.add_scalar("training/disc_loss_mrd", loss_disc_s.item(), steps)
562
+ sw.add_scalar("training/grad_norm_mrd", grad_norm_mrd, steps)
563
+ sw.add_scalar("training/grad_norm_g", grad_norm_g, steps)
564
+ sw.add_scalar("training/learning_rate_d", scheduler_d.get_last_lr()[0], steps)
565
+ sw.add_scalar("training/learning_rate_g", scheduler_g.get_last_lr()[0], steps)
566
+ sw.add_scalar("training/epoch", epoch + 1, steps)
567
+
568
+ # Validation
569
+ if steps % a.validation_interval == 0:
570
+ # Plot training input x so far used
571
+ for i_x in range(x.shape[0]):
572
+ sw.add_figure(
573
+ f"training_input/x_{i_x}",
574
+ plot_spectrogram(x[i_x].cpu()),
575
+ steps,
576
+ )
577
+ sw.add_audio(
578
+ f"training_input/y_{i_x}",
579
+ y[i_x][0],
580
+ steps,
581
+ h.sampling_rate,
582
+ )
583
+
584
+ # Seen and unseen speakers validation loops
585
+ if not a.debug and steps != 0:
586
+ validate(
587
+ rank,
588
+ a,
589
+ h,
590
+ validation_loader,
591
+ mode=f"seen_{train_loader.dataset.name}",
592
+ )
593
+ for i in range(len(list_unseen_validation_loader)):
594
+ validate(
595
+ rank,
596
+ a,
597
+ h,
598
+ list_unseen_validation_loader[i],
599
+ mode=f"unseen_{list_unseen_validation_loader[i].dataset.name}",
600
+ )
601
+ steps += 1
602
+
603
+ # BigVGAN-v2 learning rate scheduler is changed from epoch-level to step-level
604
+ scheduler_g.step()
605
+ scheduler_d.step()
606
+
607
+ if rank == 0:
608
+ print(f"Time taken for epoch {epoch + 1} is {int(time.time() - start)} sec\n")
609
+
610
+
611
+ def main():
612
+ print("Initializing Training Process..")
613
+
614
+ parser = argparse.ArgumentParser()
615
+
616
+ parser.add_argument("--group_name", default=None)
617
+
618
+ parser.add_argument("--input_wavs_dir", default="LibriTTS")
619
+ parser.add_argument("--input_mels_dir", default="ft_dataset")
620
+ parser.add_argument("--input_training_file", default="tests/LibriTTS/train-full.txt")
621
+ parser.add_argument("--input_validation_file", default="tests/LibriTTS/val-full.txt")
622
+
623
+ parser.add_argument(
624
+ "--list_input_unseen_wavs_dir",
625
+ nargs="+",
626
+ default=["tests/LibriTTS", "tests/LibriTTS"],
627
+ )
628
+ parser.add_argument(
629
+ "--list_input_unseen_validation_file",
630
+ nargs="+",
631
+ default=["tests/LibriTTS/dev-clean.txt", "tests/LibriTTS/dev-other.txt"],
632
+ )
633
+
634
+ parser.add_argument("--checkpoint_path", default="exp/bigvgan")
635
+ parser.add_argument("--config", default="")
636
+
637
+ parser.add_argument("--training_epochs", default=100000, type=int)
638
+ parser.add_argument("--stdout_interval", default=5, type=int)
639
+ parser.add_argument("--checkpoint_interval", default=50000, type=int)
640
+ parser.add_argument("--summary_interval", default=100, type=int)
641
+ parser.add_argument("--validation_interval", default=50000, type=int)
642
+
643
+ parser.add_argument(
644
+ "--freeze_step",
645
+ default=0,
646
+ type=int,
647
+ help="freeze D for the first specified steps. G only uses regression loss for these steps.",
648
+ )
649
+
650
+ parser.add_argument("--fine_tuning", default=False, type=bool)
651
+
652
+ parser.add_argument(
653
+ "--debug",
654
+ default=False,
655
+ type=bool,
656
+ help="debug mode. skips validation loop throughout training",
657
+ )
658
+ parser.add_argument(
659
+ "--evaluate",
660
+ default=False,
661
+ type=bool,
662
+ help="only run evaluation from checkpoint and exit",
663
+ )
664
+ parser.add_argument(
665
+ "--eval_subsample",
666
+ default=5,
667
+ type=int,
668
+ help="subsampling during evaluation loop",
669
+ )
670
+ parser.add_argument(
671
+ "--skip_seen",
672
+ default=False,
673
+ type=bool,
674
+ help="skip seen dataset. useful for test set inference",
675
+ )
676
+ parser.add_argument(
677
+ "--save_audio",
678
+ default=False,
679
+ type=bool,
680
+ help="save audio of test set inference to disk",
681
+ )
682
+
683
+ a = parser.parse_args()
684
+
685
+ with open(a.config) as f:
686
+ data = f.read()
687
+
688
+ json_config = json.loads(data)
689
+ h = AttrDict(json_config)
690
+
691
+ build_env(a.config, "config.json", a.checkpoint_path)
692
+
693
+ torch.manual_seed(h.seed)
694
+ if torch.cuda.is_available():
695
+ torch.cuda.manual_seed(h.seed)
696
+ h.num_gpus = torch.cuda.device_count()
697
+ h.batch_size = int(h.batch_size / h.num_gpus)
698
+ print(f"Batch size per GPU: {h.batch_size}")
699
+ else:
700
+ pass
701
+
702
+ if h.num_gpus > 1:
703
+ mp.spawn(
704
+ train,
705
+ nprocs=h.num_gpus,
706
+ args=(
707
+ a,
708
+ h,
709
+ ),
710
+ )
711
+ else:
712
+ train(0, a, h)
713
+
714
+
715
+ if __name__ == "__main__":
716
+ main()
GPT_SoVITS/BigVGAN/utils0.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adapted from https://github.com/jik876/hifi-gan under the MIT license.
2
+ # LICENSE is in incl_licenses directory.
3
+
4
+ import glob
5
+ import os
6
+ import matplotlib
7
+ import torch
8
+ from torch.nn.utils import weight_norm
9
+
10
+ matplotlib.use("Agg")
11
+ import matplotlib.pylab as plt
12
+ from .meldataset import MAX_WAV_VALUE
13
+ from scipy.io.wavfile import write
14
+
15
+
16
+ def plot_spectrogram(spectrogram):
17
+ fig, ax = plt.subplots(figsize=(10, 2))
18
+ im = ax.imshow(spectrogram, aspect="auto", origin="lower", interpolation="none")
19
+ plt.colorbar(im, ax=ax)
20
+
21
+ fig.canvas.draw()
22
+ plt.close()
23
+
24
+ return fig
25
+
26
+
27
+ def plot_spectrogram_clipped(spectrogram, clip_max=2.0):
28
+ fig, ax = plt.subplots(figsize=(10, 2))
29
+ im = ax.imshow(
30
+ spectrogram,
31
+ aspect="auto",
32
+ origin="lower",
33
+ interpolation="none",
34
+ vmin=1e-6,
35
+ vmax=clip_max,
36
+ )
37
+ plt.colorbar(im, ax=ax)
38
+
39
+ fig.canvas.draw()
40
+ plt.close()
41
+
42
+ return fig
43
+
44
+
45
+ def init_weights(m, mean=0.0, std=0.01):
46
+ classname = m.__class__.__name__
47
+ if classname.find("Conv") != -1:
48
+ m.weight.data.normal_(mean, std)
49
+
50
+
51
+ def apply_weight_norm(m):
52
+ classname = m.__class__.__name__
53
+ if classname.find("Conv") != -1:
54
+ weight_norm(m)
55
+
56
+
57
+ def get_padding(kernel_size, dilation=1):
58
+ return int((kernel_size * dilation - dilation) / 2)
59
+
60
+
61
+ def load_checkpoint(filepath, device):
62
+ assert os.path.isfile(filepath)
63
+ print(f"Loading '{filepath}'")
64
+ checkpoint_dict = torch.load(filepath, map_location=device)
65
+ print("Complete.")
66
+ return checkpoint_dict
67
+
68
+
69
+ def save_checkpoint(filepath, obj):
70
+ print(f"Saving checkpoint to {filepath}")
71
+ torch.save(obj, filepath)
72
+ print("Complete.")
73
+
74
+
75
+ def scan_checkpoint(cp_dir, prefix, renamed_file=None):
76
+ # Fallback to original scanning logic first
77
+ pattern = os.path.join(cp_dir, prefix + "????????")
78
+ cp_list = glob.glob(pattern)
79
+
80
+ if len(cp_list) > 0:
81
+ last_checkpoint_path = sorted(cp_list)[-1]
82
+ print(f"[INFO] Resuming from checkpoint: '{last_checkpoint_path}'")
83
+ return last_checkpoint_path
84
+
85
+ # If no pattern-based checkpoints are found, check for renamed file
86
+ if renamed_file:
87
+ renamed_path = os.path.join(cp_dir, renamed_file)
88
+ if os.path.isfile(renamed_path):
89
+ print(f"[INFO] Resuming from renamed checkpoint: '{renamed_file}'")
90
+ return renamed_path
91
+
92
+ return None
93
+
94
+
95
+ def save_audio(audio, path, sr):
96
+ # wav: torch with 1d shape
97
+ audio = audio * MAX_WAV_VALUE
98
+ audio = audio.cpu().numpy().astype("int16")
99
+ write(path, sr, audio)