diff --git a/GPT_SoVITS/BigVGAN/LICENSE b/GPT_SoVITS/BigVGAN/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..4c78361c86d4f685117d60d6623e2197fcfed706 --- /dev/null +++ b/GPT_SoVITS/BigVGAN/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024 NVIDIA CORPORATION. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/GPT_SoVITS/BigVGAN/README.md b/GPT_SoVITS/BigVGAN/README.md new file mode 100644 index 0000000000000000000000000000000000000000..2fa70ceea647053933b913b329041ee8c41526db --- /dev/null +++ b/GPT_SoVITS/BigVGAN/README.md @@ -0,0 +1,266 @@ +## BigVGAN: A Universal Neural Vocoder with Large-Scale Training + +#### Sang-gil Lee, Wei Ping, Boris Ginsburg, Bryan Catanzaro, Sungroh Yoon + +[[Paper]](https://arxiv.org/abs/2206.04658) - [[Code]](https://github.com/NVIDIA/BigVGAN) - [[Showcase]](https://bigvgan-demo.github.io/) - [[Project Page]](https://research.nvidia.com/labs/adlr/projects/bigvgan/) - [[Weights]](https://huggingface.co/collections/nvidia/bigvgan-66959df3d97fd7d98d97dc9a) - [[Demo]](https://huggingface.co/spaces/nvidia/BigVGAN) + +[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/bigvgan-a-universal-neural-vocoder-with-large/speech-synthesis-on-libritts)](https://paperswithcode.com/sota/speech-synthesis-on-libritts?p=bigvgan-a-universal-neural-vocoder-with-large) + +
+ +## News +- **Sep 2024 (v2.4):** + - We have updated the pretrained checkpoints trained for 5M steps. This is final release of the BigVGAN-v2 checkpoints. + +- **Jul 2024 (v2.3):** + - General refactor and code improvements for improved readability. + - Fully fused CUDA kernel of anti-alised activation (upsampling + activation + downsampling) with inference speed benchmark. + +- **Jul 2024 (v2.2):** The repository now includes an interactive local demo using gradio. + +- **Jul 2024 (v2.1):** BigVGAN is now integrated with 🤗 Hugging Face Hub with easy access to inference using pretrained checkpoints. We also provide an interactive demo on Hugging Face Spaces. + +- **Jul 2024 (v2):** We release BigVGAN-v2 along with pretrained checkpoints. Below are the highlights: + - Custom CUDA kernel for inference: we provide a fused upsampling + activation kernel written in CUDA for accelerated inference speed. Our test shows 1.5 - 3x faster speed on a single A100 GPU. + - Improved discriminator and loss: BigVGAN-v2 is trained using a [multi-scale sub-band CQT discriminator](https://arxiv.org/abs/2311.14957) and a [multi-scale mel spectrogram loss](https://arxiv.org/abs/2306.06546). + - Larger training data: BigVGAN-v2 is trained using datasets containing diverse audio types, including speech in multiple languages, environmental sounds, and instruments. + - We provide pretrained checkpoints of BigVGAN-v2 using diverse audio configurations, supporting up to 44 kHz sampling rate and 512x upsampling ratio. + +## Installation + +The codebase has been tested on Python `3.10` and PyTorch `2.3.1` conda packages with either `pytorch-cuda=12.1` or `pytorch-cuda=11.8`. Below is an example command to create the conda environment: + +```shell +conda create -n bigvgan python=3.10 pytorch torchvision torchaudio pytorch-cuda=12.1 -c pytorch -c nvidia +conda activate bigvgan +``` + +Clone the repository and install dependencies: + +```shell +git clone https://github.com/NVIDIA/BigVGAN +cd BigVGAN +pip install -r requirements.txt +``` + +## Inference Quickstart using 🤗 Hugging Face Hub + +Below example describes how you can use BigVGAN: load the pretrained BigVGAN generator from Hugging Face Hub, compute mel spectrogram from input waveform, and generate synthesized waveform using the mel spectrogram as the model's input. + +```python +device = 'cuda' + +import torch +import bigvgan +import librosa +from meldataset import get_mel_spectrogram + +# instantiate the model. You can optionally set use_cuda_kernel=True for faster inference. +model = bigvgan.BigVGAN.from_pretrained('nvidia/bigvgan_v2_24khz_100band_256x', use_cuda_kernel=False) + +# remove weight norm in the model and set to eval mode +model.remove_weight_norm() +model = model.eval().to(device) + +# load wav file and compute mel spectrogram +wav_path = '/path/to/your/audio.wav' +wav, sr = librosa.load(wav_path, sr=model.h.sampling_rate, mono=True) # wav is np.ndarray with shape [T_time] and values in [-1, 1] +wav = torch.FloatTensor(wav).unsqueeze(0) # wav is FloatTensor with shape [B(1), T_time] + +# compute mel spectrogram from the ground truth audio +mel = get_mel_spectrogram(wav, model.h).to(device) # mel is FloatTensor with shape [B(1), C_mel, T_frame] + +# generate waveform from mel +with torch.inference_mode(): + wav_gen = model(mel) # wav_gen is FloatTensor with shape [B(1), 1, T_time] and values in [-1, 1] +wav_gen_float = wav_gen.squeeze(0).cpu() # wav_gen is FloatTensor with shape [1, T_time] + +# you can convert the generated waveform to 16 bit linear PCM +wav_gen_int16 = (wav_gen_float * 32767.0).numpy().astype('int16') # wav_gen is now np.ndarray with shape [1, T_time] and int16 dtype +``` + +## Local gradio demo + +You can run a local gradio demo using below command: + +```python +pip install -r demo/requirements.txt +python demo/app.py +``` + +## Training + +Create symbolic link to the root of the dataset. The codebase uses filelist with the relative path from the dataset. Below are the example commands for LibriTTS dataset: + +```shell +cd filelists/LibriTTS && \ +ln -s /path/to/your/LibriTTS/train-clean-100 train-clean-100 && \ +ln -s /path/to/your/LibriTTS/train-clean-360 train-clean-360 && \ +ln -s /path/to/your/LibriTTS/train-other-500 train-other-500 && \ +ln -s /path/to/your/LibriTTS/dev-clean dev-clean && \ +ln -s /path/to/your/LibriTTS/dev-other dev-other && \ +ln -s /path/to/your/LibriTTS/test-clean test-clean && \ +ln -s /path/to/your/LibriTTS/test-other test-other && \ +cd ../.. +``` + +Train BigVGAN model. Below is an example command for training BigVGAN-v2 using LibriTTS dataset at 24kHz with a full 100-band mel spectrogram as input: + +```shell +python train.py \ +--config configs/bigvgan_v2_24khz_100band_256x.json \ +--input_wavs_dir filelists/LibriTTS \ +--input_training_file filelists/LibriTTS/train-full.txt \ +--input_validation_file filelists/LibriTTS/val-full.txt \ +--list_input_unseen_wavs_dir filelists/LibriTTS filelists/LibriTTS \ +--list_input_unseen_validation_file filelists/LibriTTS/dev-clean.txt filelists/LibriTTS/dev-other.txt \ +--checkpoint_path exp/bigvgan_v2_24khz_100band_256x +``` + +## Synthesis + +Synthesize from BigVGAN model. Below is an example command for generating audio from the model. +It computes mel spectrograms using wav files from `--input_wavs_dir` and saves the generated audio to `--output_dir`. + +```shell +python inference.py \ +--checkpoint_file /path/to/your/bigvgan_v2_24khz_100band_256x/bigvgan_generator.pt \ +--input_wavs_dir /path/to/your/input_wav \ +--output_dir /path/to/your/output_wav +``` + +`inference_e2e.py` supports synthesis directly from the mel spectrogram saved in `.npy` format, with shapes `[1, channel, frame]` or `[channel, frame]`. +It loads mel spectrograms from `--input_mels_dir` and saves the generated audio to `--output_dir`. + +Make sure that the STFT hyperparameters for mel spectrogram are the same as the model, which are defined in `config.json` of the corresponding model. + +```shell +python inference_e2e.py \ +--checkpoint_file /path/to/your/bigvgan_v2_24khz_100band_256x/bigvgan_generator.pt \ +--input_mels_dir /path/to/your/input_mel \ +--output_dir /path/to/your/output_wav +``` + +## Using Custom CUDA Kernel for Synthesis + +You can apply the fast CUDA inference kernel by using a parameter `use_cuda_kernel` when instantiating BigVGAN: + +```python +generator = BigVGAN(h, use_cuda_kernel=True) +``` + +You can also pass `--use_cuda_kernel` to `inference.py` and `inference_e2e.py` to enable this feature. + +When applied for the first time, it builds the kernel using `nvcc` and `ninja`. If the build succeeds, the kernel is saved to `alias_free_activation/cuda/build` and the model automatically loads the kernel. The codebase has been tested using CUDA `12.1`. + +Please make sure that both are installed in your system and `nvcc` installed in your system matches the version your PyTorch build is using. + +We recommend running `test_cuda_vs_torch_model.py` first to build and check the correctness of the CUDA kernel. See below example command and its output, where it returns `[Success] test CUDA fused vs. plain torch BigVGAN inference`: + +```python +python tests/test_cuda_vs_torch_model.py \ +--checkpoint_file /path/to/your/bigvgan_generator.pt +``` + +```shell +loading plain Pytorch BigVGAN +... +loading CUDA kernel BigVGAN with auto-build +Detected CUDA files, patching ldflags +Emitting ninja build file /path/to/your/BigVGAN/alias_free_activation/cuda/build/build.ninja.. +Building extension module anti_alias_activation_cuda... +... +Loading extension module anti_alias_activation_cuda... +... +Loading '/path/to/your/bigvgan_generator.pt' +... +[Success] test CUDA fused vs. plain torch BigVGAN inference + > mean_difference=0.0007238413265440613 +... +``` + +If you see `[Fail] test CUDA fused vs. plain torch BigVGAN inference`, it means that the CUDA kernel inference is incorrect. Please check if `nvcc` installed in your system is compatible with your PyTorch version. + +## Pretrained Models + +We provide the [pretrained models on Hugging Face Collections](https://huggingface.co/collections/nvidia/bigvgan-66959df3d97fd7d98d97dc9a). +One can download the checkpoints of the generator weight (named `bigvgan_generator.pt`) and its discriminator/optimizer states (named `bigvgan_discriminator_optimizer.pt`) within the listed model repositories. + +| Model Name | Sampling Rate | Mel band | fmax | Upsampling Ratio | Params | Dataset | Steps | Fine-Tuned | +|:--------------------------------------------------------------------------------------------------------:|:-------------:|:--------:|:-----:|:----------------:|:------:|:--------------------------:|:-----:|:----------:| +| [bigvgan_v2_44khz_128band_512x](https://huggingface.co/nvidia/bigvgan_v2_44khz_128band_512x) | 44 kHz | 128 | 22050 | 512 | 122M | Large-scale Compilation | 5M | No | +| [bigvgan_v2_44khz_128band_256x](https://huggingface.co/nvidia/bigvgan_v2_44khz_128band_256x) | 44 kHz | 128 | 22050 | 256 | 112M | Large-scale Compilation | 5M | No | +| [bigvgan_v2_24khz_100band_256x](https://huggingface.co/nvidia/bigvgan_v2_24khz_100band_256x) | 24 kHz | 100 | 12000 | 256 | 112M | Large-scale Compilation | 5M | No | +| [bigvgan_v2_22khz_80band_256x](https://huggingface.co/nvidia/bigvgan_v2_22khz_80band_256x) | 22 kHz | 80 | 11025 | 256 | 112M | Large-scale Compilation | 5M | No | +| [bigvgan_v2_22khz_80band_fmax8k_256x](https://huggingface.co/nvidia/bigvgan_v2_22khz_80band_fmax8k_256x) | 22 kHz | 80 | 8000 | 256 | 112M | Large-scale Compilation | 5M | No | +| [bigvgan_24khz_100band](https://huggingface.co/nvidia/bigvgan_24khz_100band) | 24 kHz | 100 | 12000 | 256 | 112M | LibriTTS | 5M | No | +| [bigvgan_base_24khz_100band](https://huggingface.co/nvidia/bigvgan_base_24khz_100band) | 24 kHz | 100 | 12000 | 256 | 14M | LibriTTS | 5M | No | +| [bigvgan_22khz_80band](https://huggingface.co/nvidia/bigvgan_22khz_80band) | 22 kHz | 80 | 8000 | 256 | 112M | LibriTTS + VCTK + LJSpeech | 5M | No | +| [bigvgan_base_22khz_80band](https://huggingface.co/nvidia/bigvgan_base_22khz_80band) | 22 kHz | 80 | 8000 | 256 | 14M | LibriTTS + VCTK + LJSpeech | 5M | No | + +The paper results are based on the original 24kHz BigVGAN models (`bigvgan_24khz_100band` and `bigvgan_base_24khz_100band`) trained on LibriTTS dataset. +We also provide 22kHz BigVGAN models with band-limited setup (i.e., fmax=8000) for TTS applications. +Note that the checkpoints use `snakebeta` activation with log scale parameterization, which have the best overall quality. + +You can fine-tune the models by: + +1. downloading the checkpoints (both the generator weight and its discriminator/optimizer states) +2. resuming training using your audio dataset by specifying `--checkpoint_path` that includes the checkpoints when launching `train.py` + +## Training Details of BigVGAN-v2 + +Comapred to the original BigVGAN, the pretrained checkpoints of BigVGAN-v2 used `batch_size=32` with a longer `segment_size=65536` and are trained using 8 A100 GPUs. + +Note that the BigVGAN-v2 `json` config files in `./configs` use `batch_size=4` as default to fit in a single A100 GPU for training. You can fine-tune the models adjusting `batch_size` depending on your GPUs. + +When training BigVGAN-v2 from scratch with small batch size, it can potentially encounter the early divergence problem mentioned in the paper. In such case, we recommend lowering the `clip_grad_norm` value (e.g. `100`) for the early training iterations (e.g. 20000 steps) and increase the value to the default `500`. + +## Evaluation Results of BigVGAN-v2 + +Below are the objective results of the 24kHz model (`bigvgan_v2_24khz_100band_256x`) obtained from the LibriTTS `dev` sets. BigVGAN-v2 shows noticeable improvements of the metrics. The model also exhibits reduced perceptual artifacts, especially for non-speech audio. + +| Model | Dataset | Steps | PESQ(↑) | M-STFT(↓) | MCD(↓) | Periodicity(↓) | V/UV F1(↑) | +|:----------:|:-----------------------:|:-----:|:---------:|:----------:|:----------:|:--------------:|:----------:| +| BigVGAN | LibriTTS | 1M | 4.027 | 0.7997 | 0.3745 | 0.1018 | 0.9598 | +| BigVGAN | LibriTTS | 5M | 4.256 | 0.7409 | 0.2988 | 0.0809 | 0.9698 | +| BigVGAN-v2 | Large-scale Compilation | 3M | 4.359 | 0.7134 | 0.3060 | 0.0621 | 0.9777 | +| BigVGAN-v2 | Large-scale Compilation | 5M | **4.362** | **0.7026** | **0.2903** | **0.0593** | **0.9793** | + +## Speed Benchmark + +Below are the speed and VRAM usage benchmark results of BigVGAN from `tests/test_cuda_vs_torch_model.py`, using `bigvgan_v2_24khz_100band_256x` as a reference model. + +| GPU | num_mel_frame | use_cuda_kernel | Speed (kHz) | Real-time Factor | VRAM (GB) | +|:--------------------------:|:-------------:|:---------------:|:-----------:|:----------------:|:---------:| +| NVIDIA A100 | 256 | False | 1672.1 | 69.7x | 1.3 | +| | | True | 3916.5 | 163.2x | 1.3 | +| | 2048 | False | 1899.6 | 79.2x | 1.7 | +| | | True | 5330.1 | 222.1x | 1.7 | +| | 16384 | False | 1973.8 | 82.2x | 5.0 | +| | | True | 5761.7 | 240.1x | 4.4 | +| NVIDIA GeForce RTX 3080 | 256 | False | 841.1 | 35.0x | 1.3 | +| | | True | 1598.1 | 66.6x | 1.3 | +| | 2048 | False | 929.9 | 38.7x | 1.7 | +| | | True | 1971.3 | 82.1x | 1.6 | +| | 16384 | False | 943.4 | 39.3x | 5.0 | +| | | True | 2026.5 | 84.4x | 3.9 | +| NVIDIA GeForce RTX 2080 Ti | 256 | False | 515.6 | 21.5x | 1.3 | +| | | True | 811.3 | 33.8x | 1.3 | +| | 2048 | False | 576.5 | 24.0x | 1.7 | +| | | True | 1023.0 | 42.6x | 1.5 | +| | 16384 | False | 589.4 | 24.6x | 5.0 | +| | | True | 1068.1 | 44.5x | 3.2 | + +## Acknowledgements + +We thank Vijay Anand Korthikanti and Kevin J. Shih for their generous support in implementing the CUDA kernel for inference. + +## References + +- [HiFi-GAN](https://github.com/jik876/hifi-gan) (for generator and multi-period discriminator) +- [Snake](https://github.com/EdwardDixon/snake) (for periodic activation) +- [Alias-free-torch](https://github.com/junjun3518/alias-free-torch) (for anti-aliasing) +- [Julius](https://github.com/adefossez/julius) (for low-pass filter) +- [UnivNet](https://github.com/mindslab-ai/univnet) (for multi-resolution discriminator) +- [descript-audio-codec](https://github.com/descriptinc/descript-audio-codec) and [vocos](https://github.com/gemelo-ai/vocos) (for multi-band multi-scale STFT discriminator and multi-scale mel spectrogram loss) +- [Amphion](https://github.com/open-mmlab/Amphion) (for multi-scale sub-band CQT discriminator) diff --git a/GPT_SoVITS/BigVGAN/activations.py b/GPT_SoVITS/BigVGAN/activations.py new file mode 100644 index 0000000000000000000000000000000000000000..abe3ad9e25c6ab3d4545c6a8c60e1f85a5a8e98e --- /dev/null +++ b/GPT_SoVITS/BigVGAN/activations.py @@ -0,0 +1,122 @@ +# Implementation adapted from https://github.com/EdwardDixon/snake under the MIT license. +# LICENSE is in incl_licenses directory. + +import torch +from torch import nn, sin, pow +from torch.nn import Parameter + + +class Snake(nn.Module): + """ + Implementation of a sine-based periodic activation function + Shape: + - Input: (B, C, T) + - Output: (B, C, T), same shape as the input + Parameters: + - alpha - trainable parameter + References: + - This activation function is from this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda: + https://arxiv.org/abs/2006.08195 + Examples: + >>> a1 = snake(256) + >>> x = torch.randn(256) + >>> x = a1(x) + """ + + def __init__(self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False): + """ + Initialization. + INPUT: + - in_features: shape of the input + - alpha: trainable parameter + alpha is initialized to 1 by default, higher values = higher-frequency. + alpha will be trained along with the rest of your model. + """ + super(Snake, self).__init__() + self.in_features = in_features + + # Initialize alpha + self.alpha_logscale = alpha_logscale + if self.alpha_logscale: # Log scale alphas initialized to zeros + self.alpha = Parameter(torch.zeros(in_features) * alpha) + else: # Linear scale alphas initialized to ones + self.alpha = Parameter(torch.ones(in_features) * alpha) + + self.alpha.requires_grad = alpha_trainable + + self.no_div_by_zero = 0.000000001 + + def forward(self, x): + """ + Forward pass of the function. + Applies the function to the input elementwise. + Snake ∶= x + 1/a * sin^2 (xa) + """ + alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # Line up with x to [B, C, T] + if self.alpha_logscale: + alpha = torch.exp(alpha) + x = x + (1.0 / (alpha + self.no_div_by_zero)) * pow(sin(x * alpha), 2) + + return x + + +class SnakeBeta(nn.Module): + """ + A modified Snake function which uses separate parameters for the magnitude of the periodic components + Shape: + - Input: (B, C, T) + - Output: (B, C, T), same shape as the input + Parameters: + - alpha - trainable parameter that controls frequency + - beta - trainable parameter that controls magnitude + References: + - This activation function is a modified version based on this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda: + https://arxiv.org/abs/2006.08195 + Examples: + >>> a1 = snakebeta(256) + >>> x = torch.randn(256) + >>> x = a1(x) + """ + + def __init__(self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False): + """ + Initialization. + INPUT: + - in_features: shape of the input + - alpha - trainable parameter that controls frequency + - beta - trainable parameter that controls magnitude + alpha is initialized to 1 by default, higher values = higher-frequency. + beta is initialized to 1 by default, higher values = higher-magnitude. + alpha will be trained along with the rest of your model. + """ + super(SnakeBeta, self).__init__() + self.in_features = in_features + + # Initialize alpha + self.alpha_logscale = alpha_logscale + if self.alpha_logscale: # Log scale alphas initialized to zeros + self.alpha = Parameter(torch.zeros(in_features) * alpha) + self.beta = Parameter(torch.zeros(in_features) * alpha) + else: # Linear scale alphas initialized to ones + self.alpha = Parameter(torch.ones(in_features) * alpha) + self.beta = Parameter(torch.ones(in_features) * alpha) + + self.alpha.requires_grad = alpha_trainable + self.beta.requires_grad = alpha_trainable + + self.no_div_by_zero = 0.000000001 + + def forward(self, x): + """ + Forward pass of the function. + Applies the function to the input elementwise. + SnakeBeta ∶= x + 1/b * sin^2 (xa) + """ + alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # Line up with x to [B, C, T] + beta = self.beta.unsqueeze(0).unsqueeze(-1) + if self.alpha_logscale: + alpha = torch.exp(alpha) + beta = torch.exp(beta) + x = x + (1.0 / (beta + self.no_div_by_zero)) * pow(sin(x * alpha), 2) + + return x diff --git a/GPT_SoVITS/BigVGAN/alias_free_activation/cuda/__init__.py b/GPT_SoVITS/BigVGAN/alias_free_activation/cuda/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/GPT_SoVITS/BigVGAN/alias_free_activation/cuda/activation1d.py b/GPT_SoVITS/BigVGAN/alias_free_activation/cuda/activation1d.py new file mode 100644 index 0000000000000000000000000000000000000000..ea333cfa0d5f84de363b7b27739df3bbc457d763 --- /dev/null +++ b/GPT_SoVITS/BigVGAN/alias_free_activation/cuda/activation1d.py @@ -0,0 +1,69 @@ +# Copyright (c) 2024 NVIDIA CORPORATION. +# Licensed under the MIT license. + +import torch +import torch.nn as nn +from alias_free_activation.torch.resample import UpSample1d, DownSample1d + +# load fused CUDA kernel: this enables importing anti_alias_activation_cuda +from alias_free_activation.cuda import load + +anti_alias_activation_cuda = load.load() + + +class FusedAntiAliasActivation(torch.autograd.Function): + """ + Assumes filter size 12, replication padding on upsampling/downsampling, and logscale alpha/beta parameters as inputs. + The hyperparameters are hard-coded in the kernel to maximize speed. + NOTE: The fused kenrel is incorrect for Activation1d with different hyperparameters. + """ + + @staticmethod + def forward(ctx, inputs, up_ftr, down_ftr, alpha, beta): + activation_results = anti_alias_activation_cuda.forward(inputs, up_ftr, down_ftr, alpha, beta) + + return activation_results + + @staticmethod + def backward(ctx, output_grads): + raise NotImplementedError + return output_grads, None, None + + +class Activation1d(nn.Module): + def __init__( + self, + activation, + up_ratio: int = 2, + down_ratio: int = 2, + up_kernel_size: int = 12, + down_kernel_size: int = 12, + fused: bool = True, + ): + super().__init__() + self.up_ratio = up_ratio + self.down_ratio = down_ratio + self.act = activation + self.upsample = UpSample1d(up_ratio, up_kernel_size) + self.downsample = DownSample1d(down_ratio, down_kernel_size) + + self.fused = fused # Whether to use fused CUDA kernel or not + + def forward(self, x): + if not self.fused: + x = self.upsample(x) + x = self.act(x) + x = self.downsample(x) + return x + else: + if self.act.__class__.__name__ == "Snake": + beta = self.act.alpha.data # Snake uses same params for alpha and beta + else: + beta = self.act.beta.data # Snakebeta uses different params for alpha and beta + alpha = self.act.alpha.data + if not self.act.alpha_logscale: # Exp baked into cuda kernel, cancel it out with a log + alpha = torch.log(alpha) + beta = torch.log(beta) + + x = FusedAntiAliasActivation.apply(x, self.upsample.filter, self.downsample.lowpass.filter, alpha, beta) + return x diff --git a/GPT_SoVITS/BigVGAN/alias_free_activation/cuda/anti_alias_activation.cpp b/GPT_SoVITS/BigVGAN/alias_free_activation/cuda/anti_alias_activation.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c5651f77143bd678169eb11564a7cf7a7969a59e --- /dev/null +++ b/GPT_SoVITS/BigVGAN/alias_free_activation/cuda/anti_alias_activation.cpp @@ -0,0 +1,23 @@ +/* coding=utf-8 + * Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + #include + +extern "C" torch::Tensor fwd_cuda(torch::Tensor const &input, torch::Tensor const &up_filter, torch::Tensor const &down_filter, torch::Tensor const &alpha, torch::Tensor const &beta); + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("forward", &fwd_cuda, "Anti-Alias Activation forward (CUDA)"); +} \ No newline at end of file diff --git a/GPT_SoVITS/BigVGAN/alias_free_activation/cuda/anti_alias_activation_cuda.cu b/GPT_SoVITS/BigVGAN/alias_free_activation/cuda/anti_alias_activation_cuda.cu new file mode 100644 index 0000000000000000000000000000000000000000..8c442334869fe72d639ec203fa4fac07f96a0ee1 --- /dev/null +++ b/GPT_SoVITS/BigVGAN/alias_free_activation/cuda/anti_alias_activation_cuda.cu @@ -0,0 +1,246 @@ +/* coding=utf-8 + * Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "type_shim.h" +#include +#include +#include +#include +#include + +namespace +{ + // Hard-coded hyperparameters + // WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and + constexpr int ELEMENTS_PER_LDG_STG = 1; //(WARP_ITERATIONS < 4) ? 1 : 4; + constexpr int BUFFER_SIZE = 32; + constexpr int FILTER_SIZE = 12; + constexpr int HALF_FILTER_SIZE = 6; + constexpr int UPSAMPLE_REPLICATION_PAD = 5; // 5 on each side, matching torch impl + constexpr int DOWNSAMPLE_REPLICATION_PAD_LEFT = 5; // matching torch impl + constexpr int DOWNSAMPLE_REPLICATION_PAD_RIGHT = 6; // matching torch impl + + template + __global__ void anti_alias_activation_forward( + output_t *dst, + const input_t *src, + const input_t *up_ftr, + const input_t *down_ftr, + const input_t *alpha, + const input_t *beta, + int batch_size, + int channels, + int seq_len) + { + // Up and downsample filters + input_t up_filter[FILTER_SIZE]; + input_t down_filter[FILTER_SIZE]; + + // Load data from global memory including extra indices reserved for replication paddings + input_t elements[2 * FILTER_SIZE + 2 * BUFFER_SIZE + 2 * UPSAMPLE_REPLICATION_PAD] = {0}; + input_t intermediates[2 * FILTER_SIZE + 2 * BUFFER_SIZE + DOWNSAMPLE_REPLICATION_PAD_LEFT + DOWNSAMPLE_REPLICATION_PAD_RIGHT] = {0}; + + // Output stores downsampled output before writing to dst + output_t output[BUFFER_SIZE]; + + // blockDim/threadIdx = (128, 1, 1) + // gridDim/blockIdx = (seq_blocks, channels, batches) + int block_offset = (blockIdx.x * 128 * BUFFER_SIZE + seq_len * (blockIdx.y + gridDim.y * blockIdx.z)); + int local_offset = threadIdx.x * BUFFER_SIZE; + int seq_offset = blockIdx.x * 128 * BUFFER_SIZE + local_offset; + + // intermediate have double the seq_len + int intermediate_local_offset = threadIdx.x * BUFFER_SIZE * 2; + int intermediate_seq_offset = blockIdx.x * 128 * BUFFER_SIZE * 2 + intermediate_local_offset; + + // Get values needed for replication padding before moving pointer + const input_t *right_most_pntr = src + (seq_len * (blockIdx.y + gridDim.y * blockIdx.z)); + input_t seq_left_most_value = right_most_pntr[0]; + input_t seq_right_most_value = right_most_pntr[seq_len - 1]; + + // Move src and dst pointers + src += block_offset + local_offset; + dst += block_offset + local_offset; + + // Alpha and beta values for snake activatons. Applies exp by default + alpha = alpha + blockIdx.y; + input_t alpha_val = expf(alpha[0]); + beta = beta + blockIdx.y; + input_t beta_val = expf(beta[0]); + + #pragma unroll + for (int it = 0; it < FILTER_SIZE; it += 1) + { + up_filter[it] = up_ftr[it]; + down_filter[it] = down_ftr[it]; + } + + // Apply replication padding for upsampling, matching torch impl + #pragma unroll + for (int it = -HALF_FILTER_SIZE; it < BUFFER_SIZE + HALF_FILTER_SIZE; it += 1) + { + int element_index = seq_offset + it; // index for element + if ((element_index < 0) && (element_index >= -UPSAMPLE_REPLICATION_PAD)) + { + elements[2 * (HALF_FILTER_SIZE + it)] = 2 * seq_left_most_value; + } + if ((element_index >= seq_len) && (element_index < seq_len + UPSAMPLE_REPLICATION_PAD)) + { + elements[2 * (HALF_FILTER_SIZE + it)] = 2 * seq_right_most_value; + } + if ((element_index >= 0) && (element_index < seq_len)) + { + elements[2 * (HALF_FILTER_SIZE + it)] = 2 * src[it]; + } + } + + // Apply upsampling strided convolution and write to intermediates. It reserves DOWNSAMPLE_REPLICATION_PAD_LEFT for replication padding of the downsampilng conv later + #pragma unroll + for (int it = 0; it < (2 * BUFFER_SIZE + 2 * FILTER_SIZE); it += 1) + { + input_t acc = 0.0; + int element_index = intermediate_seq_offset + it; // index for intermediate + #pragma unroll + for (int f_idx = 0; f_idx < FILTER_SIZE; f_idx += 1) + { + if ((element_index + f_idx) >= 0) + { + acc += up_filter[f_idx] * elements[it + f_idx]; + } + } + intermediates[it + DOWNSAMPLE_REPLICATION_PAD_LEFT] = acc; + } + + // Apply activation function. It reserves DOWNSAMPLE_REPLICATION_PAD_LEFT and DOWNSAMPLE_REPLICATION_PAD_RIGHT for replication padding of the downsampilng conv later + double no_div_by_zero = 0.000000001; + #pragma unroll + for (int it = 0; it < 2 * BUFFER_SIZE + 2 * FILTER_SIZE; it += 1) + { + intermediates[it + DOWNSAMPLE_REPLICATION_PAD_LEFT] += (1.0 / (beta_val + no_div_by_zero)) * sinf(intermediates[it + DOWNSAMPLE_REPLICATION_PAD_LEFT] * alpha_val) * sinf(intermediates[it + DOWNSAMPLE_REPLICATION_PAD_LEFT] * alpha_val); + } + + // Apply replication padding before downsampling conv from intermediates + #pragma unroll + for (int it = 0; it < DOWNSAMPLE_REPLICATION_PAD_LEFT; it += 1) + { + intermediates[it] = intermediates[DOWNSAMPLE_REPLICATION_PAD_LEFT]; + } + #pragma unroll + for (int it = DOWNSAMPLE_REPLICATION_PAD_LEFT + 2 * BUFFER_SIZE + 2 * FILTER_SIZE; it < DOWNSAMPLE_REPLICATION_PAD_LEFT + 2 * BUFFER_SIZE + 2 * FILTER_SIZE + DOWNSAMPLE_REPLICATION_PAD_RIGHT; it += 1) + { + intermediates[it] = intermediates[DOWNSAMPLE_REPLICATION_PAD_LEFT + 2 * BUFFER_SIZE + 2 * FILTER_SIZE - 1]; + } + + // Apply downsample strided convolution (assuming stride=2) from intermediates + #pragma unroll + for (int it = 0; it < BUFFER_SIZE; it += 1) + { + input_t acc = 0.0; + #pragma unroll + for (int f_idx = 0; f_idx < FILTER_SIZE; f_idx += 1) + { + // Add constant DOWNSAMPLE_REPLICATION_PAD_RIGHT to match torch implementation + acc += down_filter[f_idx] * intermediates[it * 2 + f_idx + DOWNSAMPLE_REPLICATION_PAD_RIGHT]; + } + output[it] = acc; + } + + // Write output to dst + #pragma unroll + for (int it = 0; it < BUFFER_SIZE; it += ELEMENTS_PER_LDG_STG) + { + int element_index = seq_offset + it; + if (element_index < seq_len) + { + dst[it] = output[it]; + } + } + + } + + template + void dispatch_anti_alias_activation_forward( + output_t *dst, + const input_t *src, + const input_t *up_ftr, + const input_t *down_ftr, + const input_t *alpha, + const input_t *beta, + int batch_size, + int channels, + int seq_len) + { + if (seq_len == 0) + { + return; + } + else + { + // Use 128 threads per block to maximimize gpu utilization + constexpr int threads_per_block = 128; + constexpr int seq_len_per_block = 4096; + int blocks_per_seq_len = (seq_len + seq_len_per_block - 1) / seq_len_per_block; + dim3 blocks(blocks_per_seq_len, channels, batch_size); + dim3 threads(threads_per_block, 1, 1); + + anti_alias_activation_forward + <<>>(dst, src, up_ftr, down_ftr, alpha, beta, batch_size, channels, seq_len); + } + } +} + +extern "C" torch::Tensor fwd_cuda(torch::Tensor const &input, torch::Tensor const &up_filter, torch::Tensor const &down_filter, torch::Tensor const &alpha, torch::Tensor const &beta) +{ + // Input is a 3d tensor with dimensions [batches, channels, seq_len] + const int batches = input.size(0); + const int channels = input.size(1); + const int seq_len = input.size(2); + + // Output + auto act_options = input.options().requires_grad(false); + + torch::Tensor anti_alias_activation_results = + torch::empty({batches, channels, seq_len}, act_options); + + void *input_ptr = static_cast(input.data_ptr()); + void *up_filter_ptr = static_cast(up_filter.data_ptr()); + void *down_filter_ptr = static_cast(down_filter.data_ptr()); + void *alpha_ptr = static_cast(alpha.data_ptr()); + void *beta_ptr = static_cast(beta.data_ptr()); + void *anti_alias_activation_results_ptr = static_cast(anti_alias_activation_results.data_ptr()); + + DISPATCH_FLOAT_HALF_AND_BFLOAT( + input.scalar_type(), + "dispatch anti alias activation_forward", + dispatch_anti_alias_activation_forward( + reinterpret_cast(anti_alias_activation_results_ptr), + reinterpret_cast(input_ptr), + reinterpret_cast(up_filter_ptr), + reinterpret_cast(down_filter_ptr), + reinterpret_cast(alpha_ptr), + reinterpret_cast(beta_ptr), + batches, + channels, + seq_len);); + return anti_alias_activation_results; +} \ No newline at end of file diff --git a/GPT_SoVITS/BigVGAN/alias_free_activation/cuda/compat.h b/GPT_SoVITS/BigVGAN/alias_free_activation/cuda/compat.h new file mode 100644 index 0000000000000000000000000000000000000000..25818b2edf4cb0dc9130e62c7c4de8d16a01baa5 --- /dev/null +++ b/GPT_SoVITS/BigVGAN/alias_free_activation/cuda/compat.h @@ -0,0 +1,29 @@ +/* coding=utf-8 + * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*This code is copied fron NVIDIA apex: + * https://github.com/NVIDIA/apex + * with minor changes. */ + +#ifndef TORCH_CHECK +#define TORCH_CHECK AT_CHECK +#endif + +#ifdef VERSION_GE_1_3 +#define DATA_PTR data_ptr +#else +#define DATA_PTR data +#endif diff --git a/GPT_SoVITS/BigVGAN/alias_free_activation/cuda/load.py b/GPT_SoVITS/BigVGAN/alias_free_activation/cuda/load.py new file mode 100644 index 0000000000000000000000000000000000000000..14fbf0548c84f6e698e18631b59473d7b4d7c736 --- /dev/null +++ b/GPT_SoVITS/BigVGAN/alias_free_activation/cuda/load.py @@ -0,0 +1,82 @@ +# Copyright (c) 2024 NVIDIA CORPORATION. +# Licensed under the MIT license. + +import os +import pathlib +import subprocess + +from torch.utils import cpp_extension + +""" +Setting this param to a list has a problem of generating different compilation commands (with diferent order of architectures) and leading to recompilation of fused kernels. +Set it to empty stringo avoid recompilation and assign arch flags explicity in extra_cuda_cflags below +""" +os.environ["TORCH_CUDA_ARCH_LIST"] = "" + + +def load(): + # Check if cuda 11 is installed for compute capability 8.0 + cc_flag = [] + _, bare_metal_major, _ = _get_cuda_bare_metal_version(cpp_extension.CUDA_HOME) + if int(bare_metal_major) >= 11: + cc_flag.append("-gencode") + cc_flag.append("arch=compute_80,code=sm_80") + + # Build path + srcpath = pathlib.Path(__file__).parent.absolute() + buildpath = srcpath / "build" + _create_build_dir(buildpath) + + # Helper function to build the kernels. + def _cpp_extention_load_helper(name, sources, extra_cuda_flags): + return cpp_extension.load( + name=name, + sources=sources, + build_directory=buildpath, + extra_cflags=[ + "-O3", + ], + extra_cuda_cflags=[ + "-O3", + "-gencode", + "arch=compute_70,code=sm_70", + "--use_fast_math", + ] + + extra_cuda_flags + + cc_flag, + verbose=True, + ) + + extra_cuda_flags = [ + "-U__CUDA_NO_HALF_OPERATORS__", + "-U__CUDA_NO_HALF_CONVERSIONS__", + "--expt-relaxed-constexpr", + "--expt-extended-lambda", + ] + + sources = [ + srcpath / "anti_alias_activation.cpp", + srcpath / "anti_alias_activation_cuda.cu", + ] + anti_alias_activation_cuda = _cpp_extention_load_helper("anti_alias_activation_cuda", sources, extra_cuda_flags) + + return anti_alias_activation_cuda + + +def _get_cuda_bare_metal_version(cuda_dir): + raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True) + output = raw_output.split() + release_idx = output.index("release") + 1 + release = output[release_idx].split(".") + bare_metal_major = release[0] + bare_metal_minor = release[1][0] + + return raw_output, bare_metal_major, bare_metal_minor + + +def _create_build_dir(buildpath): + try: + os.mkdir(buildpath) + except OSError: + if not os.path.isdir(buildpath): + print(f"Creation of the build directory {buildpath} failed") diff --git a/GPT_SoVITS/BigVGAN/alias_free_activation/cuda/type_shim.h b/GPT_SoVITS/BigVGAN/alias_free_activation/cuda/type_shim.h new file mode 100644 index 0000000000000000000000000000000000000000..5db7e8a397e982d4d30d16ab6060814b98b7ab83 --- /dev/null +++ b/GPT_SoVITS/BigVGAN/alias_free_activation/cuda/type_shim.h @@ -0,0 +1,92 @@ +/* coding=utf-8 + * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "compat.h" + +#define DISPATCH_FLOAT_HALF_AND_BFLOAT(TYPE, NAME, ...) \ + switch (TYPE) \ + { \ + case at::ScalarType::Float: \ + { \ + using scalar_t = float; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::Half: \ + { \ + using scalar_t = at::Half; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::BFloat16: \ + { \ + using scalar_t = at::BFloat16; \ + __VA_ARGS__; \ + break; \ + } \ + default: \ + AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \ + } + +#define DISPATCH_FLOAT_HALF_AND_BFLOAT_INOUT_TYPES(TYPEIN, TYPEOUT, NAME, ...) \ + switch (TYPEIN) \ + { \ + case at::ScalarType::Float: \ + { \ + using scalar_t_in = float; \ + switch (TYPEOUT) \ + { \ + case at::ScalarType::Float: \ + { \ + using scalar_t_out = float; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::Half: \ + { \ + using scalar_t_out = at::Half; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::BFloat16: \ + { \ + using scalar_t_out = at::BFloat16; \ + __VA_ARGS__; \ + break; \ + } \ + default: \ + AT_ERROR(#NAME, " not implemented for '", toString(TYPEOUT), "'"); \ + } \ + break; \ + } \ + case at::ScalarType::Half: \ + { \ + using scalar_t_in = at::Half; \ + using scalar_t_out = at::Half; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::BFloat16: \ + { \ + using scalar_t_in = at::BFloat16; \ + using scalar_t_out = at::BFloat16; \ + __VA_ARGS__; \ + break; \ + } \ + default: \ + AT_ERROR(#NAME, " not implemented for '", toString(TYPEIN), "'"); \ + } diff --git a/GPT_SoVITS/BigVGAN/alias_free_activation/torch/__init__.py b/GPT_SoVITS/BigVGAN/alias_free_activation/torch/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8f756ed83f87f9839e457b240f60469bc187707d --- /dev/null +++ b/GPT_SoVITS/BigVGAN/alias_free_activation/torch/__init__.py @@ -0,0 +1,6 @@ +# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0 +# LICENSE is in incl_licenses directory. + +from .filter import * +from .resample import * +from .act import * diff --git a/GPT_SoVITS/BigVGAN/alias_free_activation/torch/act.py b/GPT_SoVITS/BigVGAN/alias_free_activation/torch/act.py new file mode 100644 index 0000000000000000000000000000000000000000..a6693aac602d7b331d6149522685dd512a26d277 --- /dev/null +++ b/GPT_SoVITS/BigVGAN/alias_free_activation/torch/act.py @@ -0,0 +1,30 @@ +# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0 +# LICENSE is in incl_licenses directory. + +import torch.nn as nn +from .resample import UpSample1d, DownSample1d + + +class Activation1d(nn.Module): + def __init__( + self, + activation, + up_ratio: int = 2, + down_ratio: int = 2, + up_kernel_size: int = 12, + down_kernel_size: int = 12, + ): + super().__init__() + self.up_ratio = up_ratio + self.down_ratio = down_ratio + self.act = activation + self.upsample = UpSample1d(up_ratio, up_kernel_size) + self.downsample = DownSample1d(down_ratio, down_kernel_size) + + # x: [B,C,T] + def forward(self, x): + x = self.upsample(x) + x = self.act(x) + x = self.downsample(x) + + return x diff --git a/GPT_SoVITS/BigVGAN/alias_free_activation/torch/filter.py b/GPT_SoVITS/BigVGAN/alias_free_activation/torch/filter.py new file mode 100644 index 0000000000000000000000000000000000000000..dc905b204c91a5cea04cd4f8bbf60498fbc7b97f --- /dev/null +++ b/GPT_SoVITS/BigVGAN/alias_free_activation/torch/filter.py @@ -0,0 +1,99 @@ +# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0 +# LICENSE is in incl_licenses directory. + +import torch +import torch.nn as nn +import torch.nn.functional as F +import math + +if "sinc" in dir(torch): + sinc = torch.sinc +else: + # This code is adopted from adefossez's julius.core.sinc under the MIT License + # https://adefossez.github.io/julius/julius/core.html + # LICENSE is in incl_licenses directory. + def sinc(x: torch.Tensor): + """ + Implementation of sinc, i.e. sin(pi * x) / (pi * x) + __Warning__: Different to julius.sinc, the input is multiplied by `pi`! + """ + return torch.where( + x == 0, + torch.tensor(1.0, device=x.device, dtype=x.dtype), + torch.sin(math.pi * x) / math.pi / x, + ) + + +# This code is adopted from adefossez's julius.lowpass.LowPassFilters under the MIT License +# https://adefossez.github.io/julius/julius/lowpass.html +# LICENSE is in incl_licenses directory. +def kaiser_sinc_filter1d(cutoff, half_width, kernel_size): # return filter [1,1,kernel_size] + even = kernel_size % 2 == 0 + half_size = kernel_size // 2 + + # For kaiser window + delta_f = 4 * half_width + A = 2.285 * (half_size - 1) * math.pi * delta_f + 7.95 + if A > 50.0: + beta = 0.1102 * (A - 8.7) + elif A >= 21.0: + beta = 0.5842 * (A - 21) ** 0.4 + 0.07886 * (A - 21.0) + else: + beta = 0.0 + window = torch.kaiser_window(kernel_size, beta=beta, periodic=False) + + # ratio = 0.5/cutoff -> 2 * cutoff = 1 / ratio + if even: + time = torch.arange(-half_size, half_size) + 0.5 + else: + time = torch.arange(kernel_size) - half_size + if cutoff == 0: + filter_ = torch.zeros_like(time) + else: + filter_ = 2 * cutoff * window * sinc(2 * cutoff * time) + """ + Normalize filter to have sum = 1, otherwise we will have a small leakage of the constant component in the input signal. + """ + filter_ /= filter_.sum() + filter = filter_.view(1, 1, kernel_size) + + return filter + + +class LowPassFilter1d(nn.Module): + def __init__( + self, + cutoff=0.5, + half_width=0.6, + stride: int = 1, + padding: bool = True, + padding_mode: str = "replicate", + kernel_size: int = 12, + ): + """ + kernel_size should be even number for stylegan3 setup, in this implementation, odd number is also possible. + """ + super().__init__() + if cutoff < -0.0: + raise ValueError("Minimum cutoff must be larger than zero.") + if cutoff > 0.5: + raise ValueError("A cutoff above 0.5 does not make sense.") + self.kernel_size = kernel_size + self.even = kernel_size % 2 == 0 + self.pad_left = kernel_size // 2 - int(self.even) + self.pad_right = kernel_size // 2 + self.stride = stride + self.padding = padding + self.padding_mode = padding_mode + filter = kaiser_sinc_filter1d(cutoff, half_width, kernel_size) + self.register_buffer("filter", filter) + + # Input [B, C, T] + def forward(self, x): + _, C, _ = x.shape + + if self.padding: + x = F.pad(x, (self.pad_left, self.pad_right), mode=self.padding_mode) + out = F.conv1d(x, self.filter.expand(C, -1, -1), stride=self.stride, groups=C) + + return out diff --git a/GPT_SoVITS/BigVGAN/alias_free_activation/torch/resample.py b/GPT_SoVITS/BigVGAN/alias_free_activation/torch/resample.py new file mode 100644 index 0000000000000000000000000000000000000000..e7928fadbe77d5ff04bdfefe70ab3ceb207c7580 --- /dev/null +++ b/GPT_SoVITS/BigVGAN/alias_free_activation/torch/resample.py @@ -0,0 +1,48 @@ +# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0 +# LICENSE is in incl_licenses directory. + +import torch.nn as nn +from torch.nn import functional as F +from .filter import LowPassFilter1d +from .filter import kaiser_sinc_filter1d + + +class UpSample1d(nn.Module): + def __init__(self, ratio=2, kernel_size=None): + super().__init__() + self.ratio = ratio + self.kernel_size = int(6 * ratio // 2) * 2 if kernel_size is None else kernel_size + self.stride = ratio + self.pad = self.kernel_size // ratio - 1 + self.pad_left = self.pad * self.stride + (self.kernel_size - self.stride) // 2 + self.pad_right = self.pad * self.stride + (self.kernel_size - self.stride + 1) // 2 + filter = kaiser_sinc_filter1d(cutoff=0.5 / ratio, half_width=0.6 / ratio, kernel_size=self.kernel_size) + self.register_buffer("filter", filter) + + # x: [B, C, T] + def forward(self, x): + _, C, _ = x.shape + + x = F.pad(x, (self.pad, self.pad), mode="replicate") + x = self.ratio * F.conv_transpose1d(x, self.filter.expand(C, -1, -1), stride=self.stride, groups=C) + x = x[..., self.pad_left : -self.pad_right] + + return x + + +class DownSample1d(nn.Module): + def __init__(self, ratio=2, kernel_size=None): + super().__init__() + self.ratio = ratio + self.kernel_size = int(6 * ratio // 2) * 2 if kernel_size is None else kernel_size + self.lowpass = LowPassFilter1d( + cutoff=0.5 / ratio, + half_width=0.6 / ratio, + stride=ratio, + kernel_size=self.kernel_size, + ) + + def forward(self, x): + xx = self.lowpass(x) + + return xx diff --git a/GPT_SoVITS/BigVGAN/bigvgan.py b/GPT_SoVITS/BigVGAN/bigvgan.py new file mode 100644 index 0000000000000000000000000000000000000000..febdf165c354b1fa2932f27e4ef8b7b6da10e2a6 --- /dev/null +++ b/GPT_SoVITS/BigVGAN/bigvgan.py @@ -0,0 +1,461 @@ +# Copyright (c) 2024 NVIDIA CORPORATION. +# Licensed under the MIT license. + +# Adapted from https://github.com/jik876/hifi-gan under the MIT license. +# LICENSE is in incl_licenses directory. + +import os +import json +from pathlib import Path +from typing import Optional, Union, Dict + +import torch +import torch.nn as nn +from torch.nn import Conv1d, ConvTranspose1d +from torch.nn.utils import weight_norm, remove_weight_norm + +from . import activations +from .utils0 import init_weights, get_padding +from .alias_free_activation.torch.act import Activation1d as TorchActivation1d +from .env import AttrDict + +from huggingface_hub import PyTorchModelHubMixin, hf_hub_download + + +def load_hparams_from_json(path) -> AttrDict: + with open(path) as f: + data = f.read() + return AttrDict(json.loads(data)) + + +class AMPBlock1(torch.nn.Module): + """ + AMPBlock applies Snake / SnakeBeta activation functions with trainable parameters that control periodicity, defined for each layer. + AMPBlock1 has additional self.convs2 that contains additional Conv1d layers with a fixed dilation=1 followed by each layer in self.convs1 + + Args: + h (AttrDict): Hyperparameters. + channels (int): Number of convolution channels. + kernel_size (int): Size of the convolution kernel. Default is 3. + dilation (tuple): Dilation rates for the convolutions. Each dilation layer has two convolutions. Default is (1, 3, 5). + activation (str): Activation function type. Should be either 'snake' or 'snakebeta'. Default is None. + """ + + def __init__( + self, + h: AttrDict, + channels: int, + kernel_size: int = 3, + dilation: tuple = (1, 3, 5), + activation: str = None, + ): + super().__init__() + + self.h = h + + self.convs1 = nn.ModuleList( + [ + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + stride=1, + dilation=d, + padding=get_padding(kernel_size, d), + ) + ) + for d in dilation + ] + ) + self.convs1.apply(init_weights) + + self.convs2 = nn.ModuleList( + [ + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + stride=1, + dilation=1, + padding=get_padding(kernel_size, 1), + ) + ) + for _ in range(len(dilation)) + ] + ) + self.convs2.apply(init_weights) + + self.num_layers = len(self.convs1) + len(self.convs2) # Total number of conv layers + + # Select which Activation1d, lazy-load cuda version to ensure backward compatibility + if self.h.get("use_cuda_kernel", False): + from .alias_free_activation.cuda.activation1d import ( + Activation1d as CudaActivation1d, + ) + + Activation1d = CudaActivation1d + else: + Activation1d = TorchActivation1d + + # Activation functions + if activation == "snake": + self.activations = nn.ModuleList( + [ + Activation1d(activation=activations.Snake(channels, alpha_logscale=h.snake_logscale)) + for _ in range(self.num_layers) + ] + ) + elif activation == "snakebeta": + self.activations = nn.ModuleList( + [ + Activation1d(activation=activations.SnakeBeta(channels, alpha_logscale=h.snake_logscale)) + for _ in range(self.num_layers) + ] + ) + else: + raise NotImplementedError( + "activation incorrectly specified. check the config file and look for 'activation'." + ) + + def forward(self, x): + acts1, acts2 = self.activations[::2], self.activations[1::2] + for c1, c2, a1, a2 in zip(self.convs1, self.convs2, acts1, acts2): + xt = a1(x) + xt = c1(xt) + xt = a2(xt) + xt = c2(xt) + x = xt + x + + return x + + def remove_weight_norm(self): + for l in self.convs1: + remove_weight_norm(l) + for l in self.convs2: + remove_weight_norm(l) + + +class AMPBlock2(torch.nn.Module): + """ + AMPBlock applies Snake / SnakeBeta activation functions with trainable parameters that control periodicity, defined for each layer. + Unlike AMPBlock1, AMPBlock2 does not contain extra Conv1d layers with fixed dilation=1 + + Args: + h (AttrDict): Hyperparameters. + channels (int): Number of convolution channels. + kernel_size (int): Size of the convolution kernel. Default is 3. + dilation (tuple): Dilation rates for the convolutions. Each dilation layer has two convolutions. Default is (1, 3, 5). + activation (str): Activation function type. Should be either 'snake' or 'snakebeta'. Default is None. + """ + + def __init__( + self, + h: AttrDict, + channels: int, + kernel_size: int = 3, + dilation: tuple = (1, 3, 5), + activation: str = None, + ): + super().__init__() + + self.h = h + + self.convs = nn.ModuleList( + [ + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + stride=1, + dilation=d, + padding=get_padding(kernel_size, d), + ) + ) + for d in dilation + ] + ) + self.convs.apply(init_weights) + + self.num_layers = len(self.convs) # Total number of conv layers + + # Select which Activation1d, lazy-load cuda version to ensure backward compatibility + if self.h.get("use_cuda_kernel", False): + from .alias_free_activation.cuda.activation1d import ( + Activation1d as CudaActivation1d, + ) + + Activation1d = CudaActivation1d + else: + Activation1d = TorchActivation1d + + # Activation functions + if activation == "snake": + self.activations = nn.ModuleList( + [ + Activation1d(activation=activations.Snake(channels, alpha_logscale=h.snake_logscale)) + for _ in range(self.num_layers) + ] + ) + elif activation == "snakebeta": + self.activations = nn.ModuleList( + [ + Activation1d(activation=activations.SnakeBeta(channels, alpha_logscale=h.snake_logscale)) + for _ in range(self.num_layers) + ] + ) + else: + raise NotImplementedError( + "activation incorrectly specified. check the config file and look for 'activation'." + ) + + def forward(self, x): + for c, a in zip(self.convs, self.activations): + xt = a(x) + xt = c(xt) + x = xt + x + return x + + def remove_weight_norm(self): + for l in self.convs: + remove_weight_norm(l) + + +class BigVGAN( + torch.nn.Module, + PyTorchModelHubMixin, + # library_name="bigvgan", + # repo_url="https://github.com/NVIDIA/BigVGAN", + # docs_url="https://github.com/NVIDIA/BigVGAN/blob/main/README.md", + # pipeline_tag="audio-to-audio", + # license="mit", + # tags=["neural-vocoder", "audio-generation", "arxiv:2206.04658"], +): + """ + BigVGAN is a neural vocoder model that applies anti-aliased periodic activation for residual blocks (resblocks). + New in BigVGAN-v2: it can optionally use optimized CUDA kernels for AMP (anti-aliased multi-periodicity) blocks. + + Args: + h (AttrDict): Hyperparameters. + use_cuda_kernel (bool): If set to True, loads optimized CUDA kernels for AMP. This should be used for inference only, as training is not supported with CUDA kernels. + + Note: + - The `use_cuda_kernel` parameter should be used for inference only, as training with CUDA kernels is not supported. + - Ensure that the activation function is correctly specified in the hyperparameters (h.activation). + """ + + def __init__(self, h: AttrDict, use_cuda_kernel: bool = False): + super().__init__() + self.h = h + self.h["use_cuda_kernel"] = use_cuda_kernel + + # Select which Activation1d, lazy-load cuda version to ensure backward compatibility + if self.h.get("use_cuda_kernel", False): + from .alias_free_activation.cuda.activation1d import ( + Activation1d as CudaActivation1d, + ) + + Activation1d = CudaActivation1d + else: + Activation1d = TorchActivation1d + + self.num_kernels = len(h.resblock_kernel_sizes) + self.num_upsamples = len(h.upsample_rates) + + # Pre-conv + self.conv_pre = weight_norm(Conv1d(h.num_mels, h.upsample_initial_channel, 7, 1, padding=3)) + + # Define which AMPBlock to use. BigVGAN uses AMPBlock1 as default + if h.resblock == "1": + resblock_class = AMPBlock1 + elif h.resblock == "2": + resblock_class = AMPBlock2 + else: + raise ValueError(f"Incorrect resblock class specified in hyperparameters. Got {h.resblock}") + + # Transposed conv-based upsamplers. does not apply anti-aliasing + self.ups = nn.ModuleList() + for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)): + self.ups.append( + nn.ModuleList( + [ + weight_norm( + ConvTranspose1d( + h.upsample_initial_channel // (2**i), + h.upsample_initial_channel // (2 ** (i + 1)), + k, + u, + padding=(k - u) // 2, + ) + ) + ] + ) + ) + + # Residual blocks using anti-aliased multi-periodicity composition modules (AMP) + self.resblocks = nn.ModuleList() + for i in range(len(self.ups)): + ch = h.upsample_initial_channel // (2 ** (i + 1)) + for j, (k, d) in enumerate(zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)): + self.resblocks.append(resblock_class(h, ch, k, d, activation=h.activation)) + + # Post-conv + activation_post = ( + activations.Snake(ch, alpha_logscale=h.snake_logscale) + if h.activation == "snake" + else (activations.SnakeBeta(ch, alpha_logscale=h.snake_logscale) if h.activation == "snakebeta" else None) + ) + if activation_post is None: + raise NotImplementedError( + "activation incorrectly specified. check the config file and look for 'activation'." + ) + + self.activation_post = Activation1d(activation=activation_post) + + # Whether to use bias for the final conv_post. Default to True for backward compatibility + self.use_bias_at_final = h.get("use_bias_at_final", True) + self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3, bias=self.use_bias_at_final)) + + # Weight initialization + for i in range(len(self.ups)): + self.ups[i].apply(init_weights) + self.conv_post.apply(init_weights) + + # Final tanh activation. Defaults to True for backward compatibility + self.use_tanh_at_final = h.get("use_tanh_at_final", True) + + def forward(self, x): + # Pre-conv + x = self.conv_pre(x) + + for i in range(self.num_upsamples): + # Upsampling + for i_up in range(len(self.ups[i])): + x = self.ups[i][i_up](x) + # AMP blocks + xs = None + for j in range(self.num_kernels): + if xs is None: + xs = self.resblocks[i * self.num_kernels + j](x) + else: + xs += self.resblocks[i * self.num_kernels + j](x) + x = xs / self.num_kernels + + # Post-conv + x = self.activation_post(x) + x = self.conv_post(x) + # Final tanh activation + if self.use_tanh_at_final: + x = torch.tanh(x) + else: + x = torch.clamp(x, min=-1.0, max=1.0) # Bound the output to [-1, 1] + + return x + + def remove_weight_norm(self): + try: + # print("Removing weight norm...") + for l in self.ups: + for l_i in l: + remove_weight_norm(l_i) + for l in self.resblocks: + l.remove_weight_norm() + remove_weight_norm(self.conv_pre) + remove_weight_norm(self.conv_post) + except ValueError: + print("[INFO] Model already removed weight norm. Skipping!") + pass + + # Additional methods for huggingface_hub support + def _save_pretrained(self, save_directory: Path) -> None: + """Save weights and config.json from a Pytorch model to a local directory.""" + + model_path = save_directory / "bigvgan_generator.pt" + torch.save({"generator": self.state_dict()}, model_path) + + config_path = save_directory / "config.json" + with open(config_path, "w") as config_file: + json.dump(self.h, config_file, indent=4) + + @classmethod + def _from_pretrained( + cls, + *, + model_id: str, + revision: str, + cache_dir: str, + force_download: bool, + proxies: Optional[Dict], + resume_download: bool, + local_files_only: bool, + token: Union[str, bool, None], + map_location: str = "cpu", # Additional argument + strict: bool = False, # Additional argument + use_cuda_kernel: bool = False, + **model_kwargs, + ): + """Load Pytorch pretrained weights and return the loaded model.""" + + # Download and load hyperparameters (h) used by BigVGAN + if os.path.isdir(model_id): + # print("Loading config.json from local directory") + config_file = os.path.join(model_id, "config.json") + else: + config_file = hf_hub_download( + repo_id=model_id, + filename="config.json", + revision=revision, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + token=token, + local_files_only=local_files_only, + ) + h = load_hparams_from_json(config_file) + + # instantiate BigVGAN using h + if use_cuda_kernel: + print( + "[WARNING] You have specified use_cuda_kernel=True during BigVGAN.from_pretrained(). Only inference is supported (training is not implemented)!" + ) + print( + "[WARNING] You need nvcc and ninja installed in your system that matches your PyTorch build is using to build the kernel. If not, the model will fail to initialize or generate incorrect waveform!" + ) + print( + "[WARNING] For detail, see the official GitHub repository: https://github.com/NVIDIA/BigVGAN?tab=readme-ov-file#using-custom-cuda-kernel-for-synthesis" + ) + model = cls(h, use_cuda_kernel=use_cuda_kernel) + + # Download and load pretrained generator weight + if os.path.isdir(model_id): + # print("Loading weights from local directory") + model_file = os.path.join(model_id, "bigvgan_generator.pt") + else: + # print(f"Loading weights from {model_id}") + model_file = hf_hub_download( + repo_id=model_id, + filename="bigvgan_generator.pt", + revision=revision, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + token=token, + local_files_only=local_files_only, + ) + + checkpoint_dict = torch.load(model_file, map_location=map_location) + + try: + model.load_state_dict(checkpoint_dict["generator"]) + except RuntimeError: + print( + "[INFO] the pretrained checkpoint does not contain weight norm. Loading the checkpoint after removing weight norm!" + ) + model.remove_weight_norm() + model.load_state_dict(checkpoint_dict["generator"]) + + return model diff --git a/GPT_SoVITS/BigVGAN/configs/bigvgan_22khz_80band.json b/GPT_SoVITS/BigVGAN/configs/bigvgan_22khz_80band.json new file mode 100644 index 0000000000000000000000000000000000000000..64bca7846edb4e86d7ee22d9ca7a1554cf7f1042 --- /dev/null +++ b/GPT_SoVITS/BigVGAN/configs/bigvgan_22khz_80band.json @@ -0,0 +1,45 @@ +{ + "resblock": "1", + "num_gpus": 0, + "batch_size": 32, + "learning_rate": 0.0001, + "adam_b1": 0.8, + "adam_b2": 0.99, + "lr_decay": 0.9999996, + "seed": 1234, + + "upsample_rates": [4,4,2,2,2,2], + "upsample_kernel_sizes": [8,8,4,4,4,4], + "upsample_initial_channel": 1536, + "resblock_kernel_sizes": [3,7,11], + "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]], + + "activation": "snakebeta", + "snake_logscale": true, + + "resolutions": [[1024, 120, 600], [2048, 240, 1200], [512, 50, 240]], + "mpd_reshapes": [2, 3, 5, 7, 11], + "use_spectral_norm": false, + "discriminator_channel_mult": 1, + + "segment_size": 8192, + "num_mels": 80, + "num_freq": 1025, + "n_fft": 1024, + "hop_size": 256, + "win_size": 1024, + + "sampling_rate": 22050, + + "fmin": 0, + "fmax": 8000, + "fmax_for_loss": null, + + "num_workers": 4, + + "dist_config": { + "dist_backend": "nccl", + "dist_url": "tcp://localhost:54321", + "world_size": 1 + } +} diff --git a/GPT_SoVITS/BigVGAN/configs/bigvgan_24khz_100band.json b/GPT_SoVITS/BigVGAN/configs/bigvgan_24khz_100band.json new file mode 100644 index 0000000000000000000000000000000000000000..e7f7ff08f6697a4640d8e28c0b3fe7e62d0c3fc7 --- /dev/null +++ b/GPT_SoVITS/BigVGAN/configs/bigvgan_24khz_100band.json @@ -0,0 +1,45 @@ +{ + "resblock": "1", + "num_gpus": 0, + "batch_size": 32, + "learning_rate": 0.0001, + "adam_b1": 0.8, + "adam_b2": 0.99, + "lr_decay": 0.9999996, + "seed": 1234, + + "upsample_rates": [4,4,2,2,2,2], + "upsample_kernel_sizes": [8,8,4,4,4,4], + "upsample_initial_channel": 1536, + "resblock_kernel_sizes": [3,7,11], + "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]], + + "activation": "snakebeta", + "snake_logscale": true, + + "resolutions": [[1024, 120, 600], [2048, 240, 1200], [512, 50, 240]], + "mpd_reshapes": [2, 3, 5, 7, 11], + "use_spectral_norm": false, + "discriminator_channel_mult": 1, + + "segment_size": 8192, + "num_mels": 100, + "num_freq": 1025, + "n_fft": 1024, + "hop_size": 256, + "win_size": 1024, + + "sampling_rate": 24000, + + "fmin": 0, + "fmax": 12000, + "fmax_for_loss": null, + + "num_workers": 4, + + "dist_config": { + "dist_backend": "nccl", + "dist_url": "tcp://localhost:54321", + "world_size": 1 + } +} diff --git a/GPT_SoVITS/BigVGAN/configs/bigvgan_base_22khz_80band.json b/GPT_SoVITS/BigVGAN/configs/bigvgan_base_22khz_80band.json new file mode 100644 index 0000000000000000000000000000000000000000..fd244848308917f4df7ce49bf6b76530fd04cbc2 --- /dev/null +++ b/GPT_SoVITS/BigVGAN/configs/bigvgan_base_22khz_80band.json @@ -0,0 +1,45 @@ +{ + "resblock": "1", + "num_gpus": 0, + "batch_size": 32, + "learning_rate": 0.0001, + "adam_b1": 0.8, + "adam_b2": 0.99, + "lr_decay": 0.9999996, + "seed": 1234, + + "upsample_rates": [8,8,2,2], + "upsample_kernel_sizes": [16,16,4,4], + "upsample_initial_channel": 512, + "resblock_kernel_sizes": [3,7,11], + "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]], + + "activation": "snakebeta", + "snake_logscale": true, + + "resolutions": [[1024, 120, 600], [2048, 240, 1200], [512, 50, 240]], + "mpd_reshapes": [2, 3, 5, 7, 11], + "use_spectral_norm": false, + "discriminator_channel_mult": 1, + + "segment_size": 8192, + "num_mels": 80, + "num_freq": 1025, + "n_fft": 1024, + "hop_size": 256, + "win_size": 1024, + + "sampling_rate": 22050, + + "fmin": 0, + "fmax": 8000, + "fmax_for_loss": null, + + "num_workers": 4, + + "dist_config": { + "dist_backend": "nccl", + "dist_url": "tcp://localhost:54321", + "world_size": 1 + } +} diff --git a/GPT_SoVITS/BigVGAN/configs/bigvgan_base_24khz_100band.json b/GPT_SoVITS/BigVGAN/configs/bigvgan_base_24khz_100band.json new file mode 100644 index 0000000000000000000000000000000000000000..0911508cac4a9346ada8c196bfcc228998da6f42 --- /dev/null +++ b/GPT_SoVITS/BigVGAN/configs/bigvgan_base_24khz_100band.json @@ -0,0 +1,45 @@ +{ + "resblock": "1", + "num_gpus": 0, + "batch_size": 32, + "learning_rate": 0.0001, + "adam_b1": 0.8, + "adam_b2": 0.99, + "lr_decay": 0.9999996, + "seed": 1234, + + "upsample_rates": [8,8,2,2], + "upsample_kernel_sizes": [16,16,4,4], + "upsample_initial_channel": 512, + "resblock_kernel_sizes": [3,7,11], + "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]], + + "activation": "snakebeta", + "snake_logscale": true, + + "resolutions": [[1024, 120, 600], [2048, 240, 1200], [512, 50, 240]], + "mpd_reshapes": [2, 3, 5, 7, 11], + "use_spectral_norm": false, + "discriminator_channel_mult": 1, + + "segment_size": 8192, + "num_mels": 100, + "num_freq": 1025, + "n_fft": 1024, + "hop_size": 256, + "win_size": 1024, + + "sampling_rate": 24000, + + "fmin": 0, + "fmax": 12000, + "fmax_for_loss": null, + + "num_workers": 4, + + "dist_config": { + "dist_backend": "nccl", + "dist_url": "tcp://localhost:54321", + "world_size": 1 + } +} diff --git a/GPT_SoVITS/BigVGAN/configs/bigvgan_v2_22khz_80band_256x.json b/GPT_SoVITS/BigVGAN/configs/bigvgan_v2_22khz_80band_256x.json new file mode 100644 index 0000000000000000000000000000000000000000..e96bd5fdd5b99767adba7f13bfcd1f777d5c599a --- /dev/null +++ b/GPT_SoVITS/BigVGAN/configs/bigvgan_v2_22khz_80band_256x.json @@ -0,0 +1,61 @@ +{ + "resblock": "1", + "num_gpus": 0, + "batch_size": 4, + "learning_rate": 0.0001, + "adam_b1": 0.8, + "adam_b2": 0.99, + "lr_decay": 0.9999996, + "seed": 1234, + + "upsample_rates": [4,4,2,2,2,2], + "upsample_kernel_sizes": [8,8,4,4,4,4], + "upsample_initial_channel": 1536, + "resblock_kernel_sizes": [3,7,11], + "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]], + + "use_tanh_at_final": false, + "use_bias_at_final": false, + + "activation": "snakebeta", + "snake_logscale": true, + + "use_cqtd_instead_of_mrd": true, + "cqtd_filters": 128, + "cqtd_max_filters": 1024, + "cqtd_filters_scale": 1, + "cqtd_dilations": [1, 2, 4], + "cqtd_hop_lengths": [512, 256, 256], + "cqtd_n_octaves": [9, 9, 9], + "cqtd_bins_per_octaves": [24, 36, 48], + + "mpd_reshapes": [2, 3, 5, 7, 11], + "use_spectral_norm": false, + "discriminator_channel_mult": 1, + + "use_multiscale_melloss": true, + "lambda_melloss": 15, + + "clip_grad_norm": 500, + + "segment_size": 65536, + "num_mels": 80, + "num_freq": 1025, + "n_fft": 1024, + "hop_size": 256, + "win_size": 1024, + + "sampling_rate": 22050, + + "fmin": 0, + "fmax": null, + "fmax_for_loss": null, + + "num_workers": 4, + + "dist_config": { + "dist_backend": "nccl", + "dist_url": "tcp://localhost:54321", + "world_size": 1 + } +} diff --git a/GPT_SoVITS/BigVGAN/configs/bigvgan_v2_22khz_80band_fmax8k_256x.json b/GPT_SoVITS/BigVGAN/configs/bigvgan_v2_22khz_80band_fmax8k_256x.json new file mode 100644 index 0000000000000000000000000000000000000000..a3c9699fbe11948f4fd7e3434d2e623a00c802dd --- /dev/null +++ b/GPT_SoVITS/BigVGAN/configs/bigvgan_v2_22khz_80band_fmax8k_256x.json @@ -0,0 +1,61 @@ +{ + "resblock": "1", + "num_gpus": 0, + "batch_size": 4, + "learning_rate": 0.0001, + "adam_b1": 0.8, + "adam_b2": 0.99, + "lr_decay": 0.9999996, + "seed": 1234, + + "upsample_rates": [4,4,2,2,2,2], + "upsample_kernel_sizes": [8,8,4,4,4,4], + "upsample_initial_channel": 1536, + "resblock_kernel_sizes": [3,7,11], + "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]], + + "use_tanh_at_final": false, + "use_bias_at_final": false, + + "activation": "snakebeta", + "snake_logscale": true, + + "use_cqtd_instead_of_mrd": true, + "cqtd_filters": 128, + "cqtd_max_filters": 1024, + "cqtd_filters_scale": 1, + "cqtd_dilations": [1, 2, 4], + "cqtd_hop_lengths": [512, 256, 256], + "cqtd_n_octaves": [9, 9, 9], + "cqtd_bins_per_octaves": [24, 36, 48], + + "mpd_reshapes": [2, 3, 5, 7, 11], + "use_spectral_norm": false, + "discriminator_channel_mult": 1, + + "use_multiscale_melloss": true, + "lambda_melloss": 15, + + "clip_grad_norm": 500, + + "segment_size": 65536, + "num_mels": 80, + "num_freq": 1025, + "n_fft": 1024, + "hop_size": 256, + "win_size": 1024, + + "sampling_rate": 22050, + + "fmin": 0, + "fmax": 8000, + "fmax_for_loss": null, + + "num_workers": 4, + + "dist_config": { + "dist_backend": "nccl", + "dist_url": "tcp://localhost:54321", + "world_size": 1 + } +} diff --git a/GPT_SoVITS/BigVGAN/configs/bigvgan_v2_24khz_100band_256x.json b/GPT_SoVITS/BigVGAN/configs/bigvgan_v2_24khz_100band_256x.json new file mode 100644 index 0000000000000000000000000000000000000000..8057ee267c8ed80615362a41892b923a3ccd27e5 --- /dev/null +++ b/GPT_SoVITS/BigVGAN/configs/bigvgan_v2_24khz_100band_256x.json @@ -0,0 +1,61 @@ +{ + "resblock": "1", + "num_gpus": 0, + "batch_size": 4, + "learning_rate": 0.0001, + "adam_b1": 0.8, + "adam_b2": 0.99, + "lr_decay": 0.9999996, + "seed": 1234, + + "upsample_rates": [4,4,2,2,2,2], + "upsample_kernel_sizes": [8,8,4,4,4,4], + "upsample_initial_channel": 1536, + "resblock_kernel_sizes": [3,7,11], + "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]], + + "use_tanh_at_final": false, + "use_bias_at_final": false, + + "activation": "snakebeta", + "snake_logscale": true, + + "use_cqtd_instead_of_mrd": true, + "cqtd_filters": 128, + "cqtd_max_filters": 1024, + "cqtd_filters_scale": 1, + "cqtd_dilations": [1, 2, 4], + "cqtd_hop_lengths": [512, 256, 256], + "cqtd_n_octaves": [9, 9, 9], + "cqtd_bins_per_octaves": [24, 36, 48], + + "mpd_reshapes": [2, 3, 5, 7, 11], + "use_spectral_norm": false, + "discriminator_channel_mult": 1, + + "use_multiscale_melloss": true, + "lambda_melloss": 15, + + "clip_grad_norm": 500, + + "segment_size": 65536, + "num_mels": 100, + "num_freq": 1025, + "n_fft": 1024, + "hop_size": 256, + "win_size": 1024, + + "sampling_rate": 24000, + + "fmin": 0, + "fmax": null, + "fmax_for_loss": null, + + "num_workers": 4, + + "dist_config": { + "dist_backend": "nccl", + "dist_url": "tcp://localhost:54321", + "world_size": 1 + } +} diff --git a/GPT_SoVITS/BigVGAN/configs/bigvgan_v2_44khz_128band_256x.json b/GPT_SoVITS/BigVGAN/configs/bigvgan_v2_44khz_128band_256x.json new file mode 100644 index 0000000000000000000000000000000000000000..b6999d3028e5d741ec99b16b34f153e763d0cfec --- /dev/null +++ b/GPT_SoVITS/BigVGAN/configs/bigvgan_v2_44khz_128band_256x.json @@ -0,0 +1,61 @@ +{ + "resblock": "1", + "num_gpus": 0, + "batch_size": 4, + "learning_rate": 0.0001, + "adam_b1": 0.8, + "adam_b2": 0.99, + "lr_decay": 0.9999996, + "seed": 1234, + + "upsample_rates": [4,4,2,2,2,2], + "upsample_kernel_sizes": [8,8,4,4,4,4], + "upsample_initial_channel": 1536, + "resblock_kernel_sizes": [3,7,11], + "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]], + + "use_tanh_at_final": false, + "use_bias_at_final": false, + + "activation": "snakebeta", + "snake_logscale": true, + + "use_cqtd_instead_of_mrd": true, + "cqtd_filters": 128, + "cqtd_max_filters": 1024, + "cqtd_filters_scale": 1, + "cqtd_dilations": [1, 2, 4], + "cqtd_hop_lengths": [512, 256, 256], + "cqtd_n_octaves": [9, 9, 9], + "cqtd_bins_per_octaves": [24, 36, 48], + + "mpd_reshapes": [2, 3, 5, 7, 11], + "use_spectral_norm": false, + "discriminator_channel_mult": 1, + + "use_multiscale_melloss": true, + "lambda_melloss": 15, + + "clip_grad_norm": 500, + + "segment_size": 65536, + "num_mels": 128, + "num_freq": 1025, + "n_fft": 1024, + "hop_size": 256, + "win_size": 1024, + + "sampling_rate": 44100, + + "fmin": 0, + "fmax": null, + "fmax_for_loss": null, + + "num_workers": 4, + + "dist_config": { + "dist_backend": "nccl", + "dist_url": "tcp://localhost:54321", + "world_size": 1 + } +} diff --git a/GPT_SoVITS/BigVGAN/configs/bigvgan_v2_44khz_128band_512x.json b/GPT_SoVITS/BigVGAN/configs/bigvgan_v2_44khz_128band_512x.json new file mode 100644 index 0000000000000000000000000000000000000000..2d7176c910ae0969f208f6d28e3f14abca2dbc7f --- /dev/null +++ b/GPT_SoVITS/BigVGAN/configs/bigvgan_v2_44khz_128band_512x.json @@ -0,0 +1,61 @@ +{ + "resblock": "1", + "num_gpus": 0, + "batch_size": 4, + "learning_rate": 0.0001, + "adam_b1": 0.8, + "adam_b2": 0.99, + "lr_decay": 0.9999996, + "seed": 1234, + + "upsample_rates": [8,4,2,2,2,2], + "upsample_kernel_sizes": [16,8,4,4,4,4], + "upsample_initial_channel": 1536, + "resblock_kernel_sizes": [3,7,11], + "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]], + + "use_tanh_at_final": false, + "use_bias_at_final": false, + + "activation": "snakebeta", + "snake_logscale": true, + + "use_cqtd_instead_of_mrd": true, + "cqtd_filters": 128, + "cqtd_max_filters": 1024, + "cqtd_filters_scale": 1, + "cqtd_dilations": [1, 2, 4], + "cqtd_hop_lengths": [512, 256, 256], + "cqtd_n_octaves": [9, 9, 9], + "cqtd_bins_per_octaves": [24, 36, 48], + + "mpd_reshapes": [2, 3, 5, 7, 11], + "use_spectral_norm": false, + "discriminator_channel_mult": 1, + + "use_multiscale_melloss": true, + "lambda_melloss": 15, + + "clip_grad_norm": 500, + + "segment_size": 65536, + "num_mels": 128, + "num_freq": 2049, + "n_fft": 2048, + "hop_size": 512, + "win_size": 2048, + + "sampling_rate": 44100, + + "fmin": 0, + "fmax": null, + "fmax_for_loss": null, + + "num_workers": 4, + + "dist_config": { + "dist_backend": "nccl", + "dist_url": "tcp://localhost:54321", + "world_size": 1 + } +} diff --git a/GPT_SoVITS/BigVGAN/discriminators.py b/GPT_SoVITS/BigVGAN/discriminators.py new file mode 100644 index 0000000000000000000000000000000000000000..2d44c7983955a1be15a4520f6730de272f799128 --- /dev/null +++ b/GPT_SoVITS/BigVGAN/discriminators.py @@ -0,0 +1,625 @@ +# Copyright (c) 2024 NVIDIA CORPORATION. +# Licensed under the MIT license. + +# Adapted from https://github.com/jik876/hifi-gan under the MIT license. +# LICENSE is in incl_licenses directory. + + +import torch +import torch.nn.functional as F +import torch.nn as nn +from torch.nn import Conv2d +from torch.nn.utils import weight_norm, spectral_norm +from torchaudio.transforms import Spectrogram, Resample + +from env import AttrDict +from utils import get_padding +import typing +from typing import List, Tuple + + +class DiscriminatorP(torch.nn.Module): + def __init__( + self, + h: AttrDict, + period: List[int], + kernel_size: int = 5, + stride: int = 3, + use_spectral_norm: bool = False, + ): + super().__init__() + self.period = period + self.d_mult = h.discriminator_channel_mult + norm_f = weight_norm if not use_spectral_norm else spectral_norm + + self.convs = nn.ModuleList( + [ + norm_f( + Conv2d( + 1, + int(32 * self.d_mult), + (kernel_size, 1), + (stride, 1), + padding=(get_padding(5, 1), 0), + ) + ), + norm_f( + Conv2d( + int(32 * self.d_mult), + int(128 * self.d_mult), + (kernel_size, 1), + (stride, 1), + padding=(get_padding(5, 1), 0), + ) + ), + norm_f( + Conv2d( + int(128 * self.d_mult), + int(512 * self.d_mult), + (kernel_size, 1), + (stride, 1), + padding=(get_padding(5, 1), 0), + ) + ), + norm_f( + Conv2d( + int(512 * self.d_mult), + int(1024 * self.d_mult), + (kernel_size, 1), + (stride, 1), + padding=(get_padding(5, 1), 0), + ) + ), + norm_f( + Conv2d( + int(1024 * self.d_mult), + int(1024 * self.d_mult), + (kernel_size, 1), + 1, + padding=(2, 0), + ) + ), + ] + ) + self.conv_post = norm_f(Conv2d(int(1024 * self.d_mult), 1, (3, 1), 1, padding=(1, 0))) + + def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, List[torch.Tensor]]: + fmap = [] + + # 1d to 2d + b, c, t = x.shape + if t % self.period != 0: # pad first + n_pad = self.period - (t % self.period) + x = F.pad(x, (0, n_pad), "reflect") + t = t + n_pad + x = x.view(b, c, t // self.period, self.period) + + for l in self.convs: + x = l(x) + x = F.leaky_relu(x, 0.1) + fmap.append(x) + x = self.conv_post(x) + fmap.append(x) + x = torch.flatten(x, 1, -1) + + return x, fmap + + +class MultiPeriodDiscriminator(torch.nn.Module): + def __init__(self, h: AttrDict): + super().__init__() + self.mpd_reshapes = h.mpd_reshapes + print(f"mpd_reshapes: {self.mpd_reshapes}") + self.discriminators = nn.ModuleList( + [DiscriminatorP(h, rs, use_spectral_norm=h.use_spectral_norm) for rs in self.mpd_reshapes] + ) + + def forward( + self, y: torch.Tensor, y_hat: torch.Tensor + ) -> Tuple[ + List[torch.Tensor], + List[torch.Tensor], + List[List[torch.Tensor]], + List[List[torch.Tensor]], + ]: + y_d_rs = [] + y_d_gs = [] + fmap_rs = [] + fmap_gs = [] + for i, d in enumerate(self.discriminators): + y_d_r, fmap_r = d(y) + y_d_g, fmap_g = d(y_hat) + y_d_rs.append(y_d_r) + fmap_rs.append(fmap_r) + y_d_gs.append(y_d_g) + fmap_gs.append(fmap_g) + + return y_d_rs, y_d_gs, fmap_rs, fmap_gs + + +class DiscriminatorR(nn.Module): + def __init__(self, cfg: AttrDict, resolution: List[List[int]]): + super().__init__() + + self.resolution = resolution + assert len(self.resolution) == 3, f"MRD layer requires list with len=3, got {self.resolution}" + self.lrelu_slope = 0.1 + + norm_f = weight_norm if cfg.use_spectral_norm == False else spectral_norm + if hasattr(cfg, "mrd_use_spectral_norm"): + print(f"[INFO] overriding MRD use_spectral_norm as {cfg.mrd_use_spectral_norm}") + norm_f = weight_norm if cfg.mrd_use_spectral_norm == False else spectral_norm + self.d_mult = cfg.discriminator_channel_mult + if hasattr(cfg, "mrd_channel_mult"): + print(f"[INFO] overriding mrd channel multiplier as {cfg.mrd_channel_mult}") + self.d_mult = cfg.mrd_channel_mult + + self.convs = nn.ModuleList( + [ + norm_f(nn.Conv2d(1, int(32 * self.d_mult), (3, 9), padding=(1, 4))), + norm_f( + nn.Conv2d( + int(32 * self.d_mult), + int(32 * self.d_mult), + (3, 9), + stride=(1, 2), + padding=(1, 4), + ) + ), + norm_f( + nn.Conv2d( + int(32 * self.d_mult), + int(32 * self.d_mult), + (3, 9), + stride=(1, 2), + padding=(1, 4), + ) + ), + norm_f( + nn.Conv2d( + int(32 * self.d_mult), + int(32 * self.d_mult), + (3, 9), + stride=(1, 2), + padding=(1, 4), + ) + ), + norm_f( + nn.Conv2d( + int(32 * self.d_mult), + int(32 * self.d_mult), + (3, 3), + padding=(1, 1), + ) + ), + ] + ) + self.conv_post = norm_f(nn.Conv2d(int(32 * self.d_mult), 1, (3, 3), padding=(1, 1))) + + def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, List[torch.Tensor]]: + fmap = [] + + x = self.spectrogram(x) + x = x.unsqueeze(1) + for l in self.convs: + x = l(x) + x = F.leaky_relu(x, self.lrelu_slope) + fmap.append(x) + x = self.conv_post(x) + fmap.append(x) + x = torch.flatten(x, 1, -1) + + return x, fmap + + def spectrogram(self, x: torch.Tensor) -> torch.Tensor: + n_fft, hop_length, win_length = self.resolution + x = F.pad( + x, + (int((n_fft - hop_length) / 2), int((n_fft - hop_length) / 2)), + mode="reflect", + ) + x = x.squeeze(1) + x = torch.stft( + x, + n_fft=n_fft, + hop_length=hop_length, + win_length=win_length, + center=False, + return_complex=True, + ) + x = torch.view_as_real(x) # [B, F, TT, 2] + mag = torch.norm(x, p=2, dim=-1) # [B, F, TT] + + return mag + + +class MultiResolutionDiscriminator(nn.Module): + def __init__(self, cfg, debug=False): + super().__init__() + self.resolutions = cfg.resolutions + assert len(self.resolutions) == 3, ( + f"MRD requires list of list with len=3, each element having a list with len=3. Got {self.resolutions}" + ) + self.discriminators = nn.ModuleList([DiscriminatorR(cfg, resolution) for resolution in self.resolutions]) + + def forward( + self, y: torch.Tensor, y_hat: torch.Tensor + ) -> Tuple[ + List[torch.Tensor], + List[torch.Tensor], + List[List[torch.Tensor]], + List[List[torch.Tensor]], + ]: + y_d_rs = [] + y_d_gs = [] + fmap_rs = [] + fmap_gs = [] + + for i, d in enumerate(self.discriminators): + y_d_r, fmap_r = d(x=y) + y_d_g, fmap_g = d(x=y_hat) + y_d_rs.append(y_d_r) + fmap_rs.append(fmap_r) + y_d_gs.append(y_d_g) + fmap_gs.append(fmap_g) + + return y_d_rs, y_d_gs, fmap_rs, fmap_gs + + +# Method based on descript-audio-codec: https://github.com/descriptinc/descript-audio-codec +# Modified code adapted from https://github.com/gemelo-ai/vocos under the MIT license. +# LICENSE is in incl_licenses directory. +class DiscriminatorB(nn.Module): + def __init__( + self, + window_length: int, + channels: int = 32, + hop_factor: float = 0.25, + bands: Tuple[Tuple[float, float], ...] = ( + (0.0, 0.1), + (0.1, 0.25), + (0.25, 0.5), + (0.5, 0.75), + (0.75, 1.0), + ), + ): + super().__init__() + self.window_length = window_length + self.hop_factor = hop_factor + self.spec_fn = Spectrogram( + n_fft=window_length, + hop_length=int(window_length * hop_factor), + win_length=window_length, + power=None, + ) + n_fft = window_length // 2 + 1 + bands = [(int(b[0] * n_fft), int(b[1] * n_fft)) for b in bands] + self.bands = bands + convs = lambda: nn.ModuleList( + [ + weight_norm(nn.Conv2d(2, channels, (3, 9), (1, 1), padding=(1, 4))), + weight_norm(nn.Conv2d(channels, channels, (3, 9), (1, 2), padding=(1, 4))), + weight_norm(nn.Conv2d(channels, channels, (3, 9), (1, 2), padding=(1, 4))), + weight_norm(nn.Conv2d(channels, channels, (3, 9), (1, 2), padding=(1, 4))), + weight_norm(nn.Conv2d(channels, channels, (3, 3), (1, 1), padding=(1, 1))), + ] + ) + self.band_convs = nn.ModuleList([convs() for _ in range(len(self.bands))]) + + self.conv_post = weight_norm(nn.Conv2d(channels, 1, (3, 3), (1, 1), padding=(1, 1))) + + def spectrogram(self, x: torch.Tensor) -> List[torch.Tensor]: + # Remove DC offset + x = x - x.mean(dim=-1, keepdims=True) + # Peak normalize the volume of input audio + x = 0.8 * x / (x.abs().max(dim=-1, keepdim=True)[0] + 1e-9) + x = self.spec_fn(x) + x = torch.view_as_real(x) + x = x.permute(0, 3, 2, 1) # [B, F, T, C] -> [B, C, T, F] + # Split into bands + x_bands = [x[..., b[0] : b[1]] for b in self.bands] + return x_bands + + def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, List[torch.Tensor]]: + x_bands = self.spectrogram(x.squeeze(1)) + fmap = [] + x = [] + + for band, stack in zip(x_bands, self.band_convs): + for i, layer in enumerate(stack): + band = layer(band) + band = torch.nn.functional.leaky_relu(band, 0.1) + if i > 0: + fmap.append(band) + x.append(band) + + x = torch.cat(x, dim=-1) + x = self.conv_post(x) + fmap.append(x) + + return x, fmap + + +# Method based on descript-audio-codec: https://github.com/descriptinc/descript-audio-codec +# Modified code adapted from https://github.com/gemelo-ai/vocos under the MIT license. +# LICENSE is in incl_licenses directory. +class MultiBandDiscriminator(nn.Module): + def __init__( + self, + h, + ): + """ + Multi-band multi-scale STFT discriminator, with the architecture based on https://github.com/descriptinc/descript-audio-codec. + and the modified code adapted from https://github.com/gemelo-ai/vocos. + """ + super().__init__() + # fft_sizes (list[int]): Tuple of window lengths for FFT. Defaults to [2048, 1024, 512] if not set in h. + self.fft_sizes = h.get("mbd_fft_sizes", [2048, 1024, 512]) + self.discriminators = nn.ModuleList([DiscriminatorB(window_length=w) for w in self.fft_sizes]) + + def forward( + self, y: torch.Tensor, y_hat: torch.Tensor + ) -> Tuple[ + List[torch.Tensor], + List[torch.Tensor], + List[List[torch.Tensor]], + List[List[torch.Tensor]], + ]: + y_d_rs = [] + y_d_gs = [] + fmap_rs = [] + fmap_gs = [] + + for d in self.discriminators: + y_d_r, fmap_r = d(x=y) + y_d_g, fmap_g = d(x=y_hat) + y_d_rs.append(y_d_r) + fmap_rs.append(fmap_r) + y_d_gs.append(y_d_g) + fmap_gs.append(fmap_g) + + return y_d_rs, y_d_gs, fmap_rs, fmap_gs + + +# Adapted from https://github.com/open-mmlab/Amphion/blob/main/models/vocoders/gan/discriminator/mssbcqtd.py under the MIT license. +# LICENSE is in incl_licenses directory. +class DiscriminatorCQT(nn.Module): + def __init__(self, cfg: AttrDict, hop_length: int, n_octaves: int, bins_per_octave: int): + super().__init__() + self.cfg = cfg + + self.filters = cfg["cqtd_filters"] + self.max_filters = cfg["cqtd_max_filters"] + self.filters_scale = cfg["cqtd_filters_scale"] + self.kernel_size = (3, 9) + self.dilations = cfg["cqtd_dilations"] + self.stride = (1, 2) + + self.in_channels = cfg["cqtd_in_channels"] + self.out_channels = cfg["cqtd_out_channels"] + self.fs = cfg["sampling_rate"] + self.hop_length = hop_length + self.n_octaves = n_octaves + self.bins_per_octave = bins_per_octave + + # Lazy-load + from nnAudio import features + + self.cqt_transform = features.cqt.CQT2010v2( + sr=self.fs * 2, + hop_length=self.hop_length, + n_bins=self.bins_per_octave * self.n_octaves, + bins_per_octave=self.bins_per_octave, + output_format="Complex", + pad_mode="constant", + ) + + self.conv_pres = nn.ModuleList() + for _ in range(self.n_octaves): + self.conv_pres.append( + nn.Conv2d( + self.in_channels * 2, + self.in_channels * 2, + kernel_size=self.kernel_size, + padding=self.get_2d_padding(self.kernel_size), + ) + ) + + self.convs = nn.ModuleList() + + self.convs.append( + nn.Conv2d( + self.in_channels * 2, + self.filters, + kernel_size=self.kernel_size, + padding=self.get_2d_padding(self.kernel_size), + ) + ) + + in_chs = min(self.filters_scale * self.filters, self.max_filters) + for i, dilation in enumerate(self.dilations): + out_chs = min((self.filters_scale ** (i + 1)) * self.filters, self.max_filters) + self.convs.append( + weight_norm( + nn.Conv2d( + in_chs, + out_chs, + kernel_size=self.kernel_size, + stride=self.stride, + dilation=(dilation, 1), + padding=self.get_2d_padding(self.kernel_size, (dilation, 1)), + ) + ) + ) + in_chs = out_chs + out_chs = min( + (self.filters_scale ** (len(self.dilations) + 1)) * self.filters, + self.max_filters, + ) + self.convs.append( + weight_norm( + nn.Conv2d( + in_chs, + out_chs, + kernel_size=(self.kernel_size[0], self.kernel_size[0]), + padding=self.get_2d_padding((self.kernel_size[0], self.kernel_size[0])), + ) + ) + ) + + self.conv_post = weight_norm( + nn.Conv2d( + out_chs, + self.out_channels, + kernel_size=(self.kernel_size[0], self.kernel_size[0]), + padding=self.get_2d_padding((self.kernel_size[0], self.kernel_size[0])), + ) + ) + + self.activation = torch.nn.LeakyReLU(negative_slope=0.1) + self.resample = Resample(orig_freq=self.fs, new_freq=self.fs * 2) + + self.cqtd_normalize_volume = self.cfg.get("cqtd_normalize_volume", False) + if self.cqtd_normalize_volume: + print( + "[INFO] cqtd_normalize_volume set to True. Will apply DC offset removal & peak volume normalization in CQTD!" + ) + + def get_2d_padding( + self, + kernel_size: typing.Tuple[int, int], + dilation: typing.Tuple[int, int] = (1, 1), + ): + return ( + ((kernel_size[0] - 1) * dilation[0]) // 2, + ((kernel_size[1] - 1) * dilation[1]) // 2, + ) + + def forward(self, x: torch.tensor) -> Tuple[torch.Tensor, List[torch.Tensor]]: + fmap = [] + + if self.cqtd_normalize_volume: + # Remove DC offset + x = x - x.mean(dim=-1, keepdims=True) + # Peak normalize the volume of input audio + x = 0.8 * x / (x.abs().max(dim=-1, keepdim=True)[0] + 1e-9) + + x = self.resample(x) + + z = self.cqt_transform(x) + + z_amplitude = z[:, :, :, 0].unsqueeze(1) + z_phase = z[:, :, :, 1].unsqueeze(1) + + z = torch.cat([z_amplitude, z_phase], dim=1) + z = torch.permute(z, (0, 1, 3, 2)) # [B, C, W, T] -> [B, C, T, W] + + latent_z = [] + for i in range(self.n_octaves): + latent_z.append( + self.conv_pres[i]( + z[ + :, + :, + :, + i * self.bins_per_octave : (i + 1) * self.bins_per_octave, + ] + ) + ) + latent_z = torch.cat(latent_z, dim=-1) + + for i, l in enumerate(self.convs): + latent_z = l(latent_z) + + latent_z = self.activation(latent_z) + fmap.append(latent_z) + + latent_z = self.conv_post(latent_z) + + return latent_z, fmap + + +class MultiScaleSubbandCQTDiscriminator(nn.Module): + def __init__(self, cfg: AttrDict): + super().__init__() + + self.cfg = cfg + # Using get with defaults + self.cfg["cqtd_filters"] = self.cfg.get("cqtd_filters", 32) + self.cfg["cqtd_max_filters"] = self.cfg.get("cqtd_max_filters", 1024) + self.cfg["cqtd_filters_scale"] = self.cfg.get("cqtd_filters_scale", 1) + self.cfg["cqtd_dilations"] = self.cfg.get("cqtd_dilations", [1, 2, 4]) + self.cfg["cqtd_in_channels"] = self.cfg.get("cqtd_in_channels", 1) + self.cfg["cqtd_out_channels"] = self.cfg.get("cqtd_out_channels", 1) + # Multi-scale params to loop over + self.cfg["cqtd_hop_lengths"] = self.cfg.get("cqtd_hop_lengths", [512, 256, 256]) + self.cfg["cqtd_n_octaves"] = self.cfg.get("cqtd_n_octaves", [9, 9, 9]) + self.cfg["cqtd_bins_per_octaves"] = self.cfg.get("cqtd_bins_per_octaves", [24, 36, 48]) + + self.discriminators = nn.ModuleList( + [ + DiscriminatorCQT( + self.cfg, + hop_length=self.cfg["cqtd_hop_lengths"][i], + n_octaves=self.cfg["cqtd_n_octaves"][i], + bins_per_octave=self.cfg["cqtd_bins_per_octaves"][i], + ) + for i in range(len(self.cfg["cqtd_hop_lengths"])) + ] + ) + + def forward( + self, y: torch.Tensor, y_hat: torch.Tensor + ) -> Tuple[ + List[torch.Tensor], + List[torch.Tensor], + List[List[torch.Tensor]], + List[List[torch.Tensor]], + ]: + y_d_rs = [] + y_d_gs = [] + fmap_rs = [] + fmap_gs = [] + + for disc in self.discriminators: + y_d_r, fmap_r = disc(y) + y_d_g, fmap_g = disc(y_hat) + y_d_rs.append(y_d_r) + fmap_rs.append(fmap_r) + y_d_gs.append(y_d_g) + fmap_gs.append(fmap_g) + + return y_d_rs, y_d_gs, fmap_rs, fmap_gs + + +class CombinedDiscriminator(nn.Module): + """ + Wrapper of chaining multiple discrimiantor architectures. + Example: combine mbd and cqtd as a single class + """ + + def __init__(self, list_discriminator: List[nn.Module]): + super().__init__() + self.discrimiantor = nn.ModuleList(list_discriminator) + + def forward( + self, y: torch.Tensor, y_hat: torch.Tensor + ) -> Tuple[ + List[torch.Tensor], + List[torch.Tensor], + List[List[torch.Tensor]], + List[List[torch.Tensor]], + ]: + y_d_rs = [] + y_d_gs = [] + fmap_rs = [] + fmap_gs = [] + + for disc in self.discrimiantor: + y_d_r, y_d_g, fmap_r, fmap_g = disc(y, y_hat) + y_d_rs.extend(y_d_r) + fmap_rs.extend(fmap_r) + y_d_gs.extend(y_d_g) + fmap_gs.extend(fmap_g) + + return y_d_rs, y_d_gs, fmap_rs, fmap_gs diff --git a/GPT_SoVITS/BigVGAN/env.py b/GPT_SoVITS/BigVGAN/env.py new file mode 100644 index 0000000000000000000000000000000000000000..cf8ac6cea644c78d115dd3902b902993f366ee61 --- /dev/null +++ b/GPT_SoVITS/BigVGAN/env.py @@ -0,0 +1,18 @@ +# Adapted from https://github.com/jik876/hifi-gan under the MIT license. +# LICENSE is in incl_licenses directory. + +import os +import shutil + + +class AttrDict(dict): + def __init__(self, *args, **kwargs): + super(AttrDict, self).__init__(*args, **kwargs) + self.__dict__ = self + + +def build_env(config, config_name, path): + t_path = os.path.join(path, config_name) + if config != t_path: + os.makedirs(path, exist_ok=True) + shutil.copyfile(config, os.path.join(path, config_name)) diff --git a/GPT_SoVITS/BigVGAN/incl_licenses/LICENSE_1 b/GPT_SoVITS/BigVGAN/incl_licenses/LICENSE_1 new file mode 100644 index 0000000000000000000000000000000000000000..5afae394d6b37da0e12ba6b290d2512687f421ac --- /dev/null +++ b/GPT_SoVITS/BigVGAN/incl_licenses/LICENSE_1 @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Jungil Kong + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/GPT_SoVITS/BigVGAN/incl_licenses/LICENSE_2 b/GPT_SoVITS/BigVGAN/incl_licenses/LICENSE_2 new file mode 100644 index 0000000000000000000000000000000000000000..322b758863c4219be68291ae3826218baa93cb4c --- /dev/null +++ b/GPT_SoVITS/BigVGAN/incl_licenses/LICENSE_2 @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Edward Dixon + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/GPT_SoVITS/BigVGAN/incl_licenses/LICENSE_3 b/GPT_SoVITS/BigVGAN/incl_licenses/LICENSE_3 new file mode 100644 index 0000000000000000000000000000000000000000..56ee3c8c4cc2b4b32e0975d17258f9ba515fdbcc --- /dev/null +++ b/GPT_SoVITS/BigVGAN/incl_licenses/LICENSE_3 @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/GPT_SoVITS/BigVGAN/incl_licenses/LICENSE_4 b/GPT_SoVITS/BigVGAN/incl_licenses/LICENSE_4 new file mode 100644 index 0000000000000000000000000000000000000000..48fd1a1ba8d81a94b6c7d1c2ff1a1f307cc5371d --- /dev/null +++ b/GPT_SoVITS/BigVGAN/incl_licenses/LICENSE_4 @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2019, Seungwon Park 박승원 +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/GPT_SoVITS/BigVGAN/incl_licenses/LICENSE_5 b/GPT_SoVITS/BigVGAN/incl_licenses/LICENSE_5 new file mode 100644 index 0000000000000000000000000000000000000000..01ae5538e6b7c787bb4f5d6f2cd9903520d6e465 --- /dev/null +++ b/GPT_SoVITS/BigVGAN/incl_licenses/LICENSE_5 @@ -0,0 +1,16 @@ +Copyright 2020 Alexandre Défossez + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and +associated documentation files (the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, publish, distribute, +sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or +substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT +NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/GPT_SoVITS/BigVGAN/incl_licenses/LICENSE_6 b/GPT_SoVITS/BigVGAN/incl_licenses/LICENSE_6 new file mode 100644 index 0000000000000000000000000000000000000000..2569ec0b6c85f94f3cd071ba16e9028ccf156be2 --- /dev/null +++ b/GPT_SoVITS/BigVGAN/incl_licenses/LICENSE_6 @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023-present, Descript + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/GPT_SoVITS/BigVGAN/incl_licenses/LICENSE_7 b/GPT_SoVITS/BigVGAN/incl_licenses/LICENSE_7 new file mode 100644 index 0000000000000000000000000000000000000000..c37bdaf99c6921f5849425d546069e972f52d7fa --- /dev/null +++ b/GPT_SoVITS/BigVGAN/incl_licenses/LICENSE_7 @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Charactr Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/GPT_SoVITS/BigVGAN/incl_licenses/LICENSE_8 b/GPT_SoVITS/BigVGAN/incl_licenses/LICENSE_8 new file mode 100644 index 0000000000000000000000000000000000000000..ab3d7ffe795779f54e339078e4e752ad9019aae8 --- /dev/null +++ b/GPT_SoVITS/BigVGAN/incl_licenses/LICENSE_8 @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Amphion + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/GPT_SoVITS/BigVGAN/inference.py b/GPT_SoVITS/BigVGAN/inference.py new file mode 100644 index 0000000000000000000000000000000000000000..5f892a3c807a7020eff7fea35179b0f6e5f991c9 --- /dev/null +++ b/GPT_SoVITS/BigVGAN/inference.py @@ -0,0 +1,85 @@ +# Adapted from https://github.com/jik876/hifi-gan under the MIT license. +# LICENSE is in incl_licenses directory. + +from __future__ import absolute_import, division, print_function, unicode_literals + +import os +import argparse +import json +import torch +import librosa +from utils import load_checkpoint +from meldataset import get_mel_spectrogram +from scipy.io.wavfile import write +from env import AttrDict +from meldataset import MAX_WAV_VALUE +from bigvgan import BigVGAN as Generator + +h = None +device = None +torch.backends.cudnn.benchmark = False + + +def inference(a, h): + generator = Generator(h, use_cuda_kernel=a.use_cuda_kernel).to(device) + + state_dict_g = load_checkpoint(a.checkpoint_file, device) + generator.load_state_dict(state_dict_g["generator"]) + + filelist = os.listdir(a.input_wavs_dir) + + os.makedirs(a.output_dir, exist_ok=True) + + generator.eval() + generator.remove_weight_norm() + with torch.no_grad(): + for i, filname in enumerate(filelist): + # Load the ground truth audio and resample if necessary + wav, sr = librosa.load(os.path.join(a.input_wavs_dir, filname), sr=h.sampling_rate, mono=True) + wav = torch.FloatTensor(wav).to(device) + # Compute mel spectrogram from the ground truth audio + x = get_mel_spectrogram(wav.unsqueeze(0), generator.h) + + y_g_hat = generator(x) + + audio = y_g_hat.squeeze() + audio = audio * MAX_WAV_VALUE + audio = audio.cpu().numpy().astype("int16") + + output_file = os.path.join(a.output_dir, os.path.splitext(filname)[0] + "_generated.wav") + write(output_file, h.sampling_rate, audio) + print(output_file) + + +def main(): + print("Initializing Inference Process..") + + parser = argparse.ArgumentParser() + parser.add_argument("--input_wavs_dir", default="test_files") + parser.add_argument("--output_dir", default="generated_files") + parser.add_argument("--checkpoint_file", required=True) + parser.add_argument("--use_cuda_kernel", action="store_true", default=False) + + a = parser.parse_args() + + config_file = os.path.join(os.path.split(a.checkpoint_file)[0], "config.json") + with open(config_file) as f: + data = f.read() + + global h + json_config = json.loads(data) + h = AttrDict(json_config) + + torch.manual_seed(h.seed) + global device + if torch.cuda.is_available(): + torch.cuda.manual_seed(h.seed) + device = torch.device("cuda") + else: + device = torch.device("cpu") + + inference(a, h) + + +if __name__ == "__main__": + main() diff --git a/GPT_SoVITS/BigVGAN/inference_e2e.py b/GPT_SoVITS/BigVGAN/inference_e2e.py new file mode 100644 index 0000000000000000000000000000000000000000..9c0df77435e91935beaca365dd5fd38d76098a4a --- /dev/null +++ b/GPT_SoVITS/BigVGAN/inference_e2e.py @@ -0,0 +1,100 @@ +# Adapted from https://github.com/jik876/hifi-gan under the MIT license. +# LICENSE is in incl_licenses directory. + +from __future__ import absolute_import, division, print_function, unicode_literals + +import glob +import os +import numpy as np +import argparse +import json +import torch +from scipy.io.wavfile import write +from env import AttrDict +from meldataset import MAX_WAV_VALUE +from bigvgan import BigVGAN as Generator + +h = None +device = None +torch.backends.cudnn.benchmark = False + + +def load_checkpoint(filepath, device): + assert os.path.isfile(filepath) + print(f"Loading '{filepath}'") + checkpoint_dict = torch.load(filepath, map_location=device) + print("Complete.") + return checkpoint_dict + + +def scan_checkpoint(cp_dir, prefix): + pattern = os.path.join(cp_dir, prefix + "*") + cp_list = glob.glob(pattern) + if len(cp_list) == 0: + return "" + return sorted(cp_list)[-1] + + +def inference(a, h): + generator = Generator(h, use_cuda_kernel=a.use_cuda_kernel).to(device) + + state_dict_g = load_checkpoint(a.checkpoint_file, device) + generator.load_state_dict(state_dict_g["generator"]) + + filelist = os.listdir(a.input_mels_dir) + + os.makedirs(a.output_dir, exist_ok=True) + + generator.eval() + generator.remove_weight_norm() + with torch.no_grad(): + for i, filname in enumerate(filelist): + # Load the mel spectrogram in .npy format + x = np.load(os.path.join(a.input_mels_dir, filname)) + x = torch.FloatTensor(x).to(device) + if len(x.shape) == 2: + x = x.unsqueeze(0) + + y_g_hat = generator(x) + + audio = y_g_hat.squeeze() + audio = audio * MAX_WAV_VALUE + audio = audio.cpu().numpy().astype("int16") + + output_file = os.path.join(a.output_dir, os.path.splitext(filname)[0] + "_generated_e2e.wav") + write(output_file, h.sampling_rate, audio) + print(output_file) + + +def main(): + print("Initializing Inference Process..") + + parser = argparse.ArgumentParser() + parser.add_argument("--input_mels_dir", default="test_mel_files") + parser.add_argument("--output_dir", default="generated_files_from_mel") + parser.add_argument("--checkpoint_file", required=True) + parser.add_argument("--use_cuda_kernel", action="store_true", default=False) + + a = parser.parse_args() + + config_file = os.path.join(os.path.split(a.checkpoint_file)[0], "config.json") + with open(config_file) as f: + data = f.read() + + global h + json_config = json.loads(data) + h = AttrDict(json_config) + + torch.manual_seed(h.seed) + global device + if torch.cuda.is_available(): + torch.cuda.manual_seed(h.seed) + device = torch.device("cuda") + else: + device = torch.device("cpu") + + inference(a, h) + + +if __name__ == "__main__": + main() diff --git a/GPT_SoVITS/BigVGAN/loss.py b/GPT_SoVITS/BigVGAN/loss.py new file mode 100644 index 0000000000000000000000000000000000000000..c295a144ff7bcfc0d91d9d4676bedfa7015cdb79 --- /dev/null +++ b/GPT_SoVITS/BigVGAN/loss.py @@ -0,0 +1,238 @@ +# Copyright (c) 2024 NVIDIA CORPORATION. +# Licensed under the MIT license. + +# Adapted from https://github.com/jik876/hifi-gan under the MIT license. +# LICENSE is in incl_licenses directory. + + +import torch +import torch.nn as nn +from librosa.filters import mel as librosa_mel_fn +from scipy import signal + +import typing +from typing import List, Tuple +from collections import namedtuple +import math +import functools + + +# Adapted from https://github.com/descriptinc/descript-audio-codec/blob/main/dac/nn/loss.py under the MIT license. +# LICENSE is in incl_licenses directory. +class MultiScaleMelSpectrogramLoss(nn.Module): + """Compute distance between mel spectrograms. Can be used + in a multi-scale way. + + Parameters + ---------- + n_mels : List[int] + Number of mels per STFT, by default [5, 10, 20, 40, 80, 160, 320], + window_lengths : List[int], optional + Length of each window of each STFT, by default [32, 64, 128, 256, 512, 1024, 2048] + loss_fn : typing.Callable, optional + How to compare each loss, by default nn.L1Loss() + clamp_eps : float, optional + Clamp on the log magnitude, below, by default 1e-5 + mag_weight : float, optional + Weight of raw magnitude portion of loss, by default 0.0 (no ampliciation on mag part) + log_weight : float, optional + Weight of log magnitude portion of loss, by default 1.0 + pow : float, optional + Power to raise magnitude to before taking log, by default 1.0 + weight : float, optional + Weight of this loss, by default 1.0 + match_stride : bool, optional + Whether to match the stride of convolutional layers, by default False + + Implementation copied from: https://github.com/descriptinc/lyrebird-audiotools/blob/961786aa1a9d628cca0c0486e5885a457fe70c1a/audiotools/metrics/spectral.py + Additional code copied and modified from https://github.com/descriptinc/audiotools/blob/master/audiotools/core/audio_signal.py + """ + + def __init__( + self, + sampling_rate: int, + n_mels: List[int] = [5, 10, 20, 40, 80, 160, 320], + window_lengths: List[int] = [32, 64, 128, 256, 512, 1024, 2048], + loss_fn: typing.Callable = nn.L1Loss(), + clamp_eps: float = 1e-5, + mag_weight: float = 0.0, + log_weight: float = 1.0, + pow: float = 1.0, + weight: float = 1.0, + match_stride: bool = False, + mel_fmin: List[float] = [0, 0, 0, 0, 0, 0, 0], + mel_fmax: List[float] = [None, None, None, None, None, None, None], + window_type: str = "hann", + ): + super().__init__() + self.sampling_rate = sampling_rate + + STFTParams = namedtuple( + "STFTParams", + ["window_length", "hop_length", "window_type", "match_stride"], + ) + + self.stft_params = [ + STFTParams( + window_length=w, + hop_length=w // 4, + match_stride=match_stride, + window_type=window_type, + ) + for w in window_lengths + ] + self.n_mels = n_mels + self.loss_fn = loss_fn + self.clamp_eps = clamp_eps + self.log_weight = log_weight + self.mag_weight = mag_weight + self.weight = weight + self.mel_fmin = mel_fmin + self.mel_fmax = mel_fmax + self.pow = pow + + @staticmethod + @functools.lru_cache(None) + def get_window( + window_type, + window_length, + ): + return signal.get_window(window_type, window_length) + + @staticmethod + @functools.lru_cache(None) + def get_mel_filters(sr, n_fft, n_mels, fmin, fmax): + return librosa_mel_fn(sr=sr, n_fft=n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax) + + def mel_spectrogram( + self, + wav, + n_mels, + fmin, + fmax, + window_length, + hop_length, + match_stride, + window_type, + ): + """ + Mirrors AudioSignal.mel_spectrogram used by BigVGAN-v2 training from: + https://github.com/descriptinc/audiotools/blob/master/audiotools/core/audio_signal.py + """ + B, C, T = wav.shape + + if match_stride: + assert hop_length == window_length // 4, "For match_stride, hop must equal n_fft // 4" + right_pad = math.ceil(T / hop_length) * hop_length - T + pad = (window_length - hop_length) // 2 + else: + right_pad = 0 + pad = 0 + + wav = torch.nn.functional.pad(wav, (pad, pad + right_pad), mode="reflect") + + window = self.get_window(window_type, window_length) + window = torch.from_numpy(window).to(wav.device).float() + + stft = torch.stft( + wav.reshape(-1, T), + n_fft=window_length, + hop_length=hop_length, + window=window, + return_complex=True, + center=True, + ) + _, nf, nt = stft.shape + stft = stft.reshape(B, C, nf, nt) + if match_stride: + """ + Drop first two and last two frames, which are added, because of padding. Now num_frames * hop_length = num_samples. + """ + stft = stft[..., 2:-2] + magnitude = torch.abs(stft) + + nf = magnitude.shape[2] + mel_basis = self.get_mel_filters(self.sampling_rate, 2 * (nf - 1), n_mels, fmin, fmax) + mel_basis = torch.from_numpy(mel_basis).to(wav.device) + mel_spectrogram = magnitude.transpose(2, -1) @ mel_basis.T + mel_spectrogram = mel_spectrogram.transpose(-1, 2) + + return mel_spectrogram + + def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: + """Computes mel loss between an estimate and a reference + signal. + + Parameters + ---------- + x : torch.Tensor + Estimate signal + y : torch.Tensor + Reference signal + + Returns + ------- + torch.Tensor + Mel loss. + """ + + loss = 0.0 + for n_mels, fmin, fmax, s in zip(self.n_mels, self.mel_fmin, self.mel_fmax, self.stft_params): + kwargs = { + "n_mels": n_mels, + "fmin": fmin, + "fmax": fmax, + "window_length": s.window_length, + "hop_length": s.hop_length, + "match_stride": s.match_stride, + "window_type": s.window_type, + } + + x_mels = self.mel_spectrogram(x, **kwargs) + y_mels = self.mel_spectrogram(y, **kwargs) + x_logmels = torch.log(x_mels.clamp(min=self.clamp_eps).pow(self.pow)) / torch.log(torch.tensor(10.0)) + y_logmels = torch.log(y_mels.clamp(min=self.clamp_eps).pow(self.pow)) / torch.log(torch.tensor(10.0)) + + loss += self.log_weight * self.loss_fn(x_logmels, y_logmels) + loss += self.mag_weight * self.loss_fn(x_logmels, y_logmels) + + return loss + + +# Loss functions +def feature_loss(fmap_r: List[List[torch.Tensor]], fmap_g: List[List[torch.Tensor]]) -> torch.Tensor: + loss = 0 + for dr, dg in zip(fmap_r, fmap_g): + for rl, gl in zip(dr, dg): + loss += torch.mean(torch.abs(rl - gl)) + + return loss * 2 # This equates to lambda=2.0 for the feature matching loss + + +def discriminator_loss( + disc_real_outputs: List[torch.Tensor], disc_generated_outputs: List[torch.Tensor] +) -> Tuple[torch.Tensor, List[torch.Tensor], List[torch.Tensor]]: + loss = 0 + r_losses = [] + g_losses = [] + for dr, dg in zip(disc_real_outputs, disc_generated_outputs): + r_loss = torch.mean((1 - dr) ** 2) + g_loss = torch.mean(dg**2) + loss += r_loss + g_loss + r_losses.append(r_loss.item()) + g_losses.append(g_loss.item()) + + return loss, r_losses, g_losses + + +def generator_loss( + disc_outputs: List[torch.Tensor], +) -> Tuple[torch.Tensor, List[torch.Tensor]]: + loss = 0 + gen_losses = [] + for dg in disc_outputs: + l = torch.mean((1 - dg) ** 2) + gen_losses.append(l) + loss += l + + return loss, gen_losses diff --git a/GPT_SoVITS/BigVGAN/meldataset.py b/GPT_SoVITS/BigVGAN/meldataset.py new file mode 100644 index 0000000000000000000000000000000000000000..dc12c9874cfb9958d6f4842cc067ffda66a390eb --- /dev/null +++ b/GPT_SoVITS/BigVGAN/meldataset.py @@ -0,0 +1,370 @@ +# Copyright (c) 2024 NVIDIA CORPORATION. +# Licensed under the MIT license. + +# Adapted from https://github.com/jik876/hifi-gan under the MIT license. +# LICENSE is in incl_licenses directory. + +import math +import os +import random +import torch +import torch.utils.data +import numpy as np +import librosa +from librosa.filters import mel as librosa_mel_fn +import pathlib +from tqdm import tqdm +from typing import List, Tuple, Optional +from .env import AttrDict + +MAX_WAV_VALUE = 32767.0 # NOTE: 32768.0 -1 to prevent int16 overflow (results in popping sound in corner cases) + + +def dynamic_range_compression(x, C=1, clip_val=1e-5): + return np.log(np.clip(x, a_min=clip_val, a_max=None) * C) + + +def dynamic_range_decompression(x, C=1): + return np.exp(x) / C + + +def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): + return torch.log(torch.clamp(x, min=clip_val) * C) + + +def dynamic_range_decompression_torch(x, C=1): + return torch.exp(x) / C + + +def spectral_normalize_torch(magnitudes): + return dynamic_range_compression_torch(magnitudes) + + +def spectral_de_normalize_torch(magnitudes): + return dynamic_range_decompression_torch(magnitudes) + + +mel_basis_cache = {} +hann_window_cache = {} + + +def mel_spectrogram( + y: torch.Tensor, + n_fft: int, + num_mels: int, + sampling_rate: int, + hop_size: int, + win_size: int, + fmin: int, + fmax: int = None, + center: bool = False, +) -> torch.Tensor: + """ + Calculate the mel spectrogram of an input signal. + This function uses slaney norm for the librosa mel filterbank (using librosa.filters.mel) and uses Hann window for STFT (using torch.stft). + + Args: + y (torch.Tensor): Input signal. + n_fft (int): FFT size. + num_mels (int): Number of mel bins. + sampling_rate (int): Sampling rate of the input signal. + hop_size (int): Hop size for STFT. + win_size (int): Window size for STFT. + fmin (int): Minimum frequency for mel filterbank. + fmax (int): Maximum frequency for mel filterbank. If None, defaults to half the sampling rate (fmax = sr / 2.0) inside librosa_mel_fn + center (bool): Whether to pad the input to center the frames. Default is False. + + Returns: + torch.Tensor: Mel spectrogram. + """ + if torch.min(y) < -1.0: + print(f"[WARNING] Min value of input waveform signal is {torch.min(y)}") + if torch.max(y) > 1.0: + print(f"[WARNING] Max value of input waveform signal is {torch.max(y)}") + + device = y.device + key = f"{n_fft}_{num_mels}_{sampling_rate}_{hop_size}_{win_size}_{fmin}_{fmax}_{device}" + + if key not in mel_basis_cache: + mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax) + mel_basis_cache[key] = torch.from_numpy(mel).float().to(device) + hann_window_cache[key] = torch.hann_window(win_size).to(device) + + mel_basis = mel_basis_cache[key] + hann_window = hann_window_cache[key] + + padding = (n_fft - hop_size) // 2 + y = torch.nn.functional.pad(y.unsqueeze(1), (padding, padding), mode="reflect").squeeze(1) + + spec = torch.stft( + y, + n_fft, + hop_length=hop_size, + win_length=win_size, + window=hann_window, + center=center, + pad_mode="reflect", + normalized=False, + onesided=True, + return_complex=True, + ) + spec = torch.sqrt(torch.view_as_real(spec).pow(2).sum(-1) + 1e-9) + + mel_spec = torch.matmul(mel_basis, spec) + mel_spec = spectral_normalize_torch(mel_spec) + + return mel_spec + + +def get_mel_spectrogram(wav, h): + """ + Generate mel spectrogram from a waveform using given hyperparameters. + + Args: + wav (torch.Tensor): Input waveform. + h: Hyperparameters object with attributes n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax. + + Returns: + torch.Tensor: Mel spectrogram. + """ + return mel_spectrogram( + wav, + h.n_fft, + h.num_mels, + h.sampling_rate, + h.hop_size, + h.win_size, + h.fmin, + h.fmax, + ) + + +def get_dataset_filelist(a): + training_files = [] + validation_files = [] + list_unseen_validation_files = [] + + with open(a.input_training_file, "r", encoding="utf-8") as fi: + training_files = [ + os.path.join(a.input_wavs_dir, x.split("|")[0] + ".wav") for x in fi.read().split("\n") if len(x) > 0 + ] + print(f"first training file: {training_files[0]}") + + with open(a.input_validation_file, "r", encoding="utf-8") as fi: + validation_files = [ + os.path.join(a.input_wavs_dir, x.split("|")[0] + ".wav") for x in fi.read().split("\n") if len(x) > 0 + ] + print(f"first validation file: {validation_files[0]}") + + for i in range(len(a.list_input_unseen_validation_file)): + with open(a.list_input_unseen_validation_file[i], "r", encoding="utf-8") as fi: + unseen_validation_files = [ + os.path.join(a.list_input_unseen_wavs_dir[i], x.split("|")[0] + ".wav") + for x in fi.read().split("\n") + if len(x) > 0 + ] + print(f"first unseen {i}th validation fileset: {unseen_validation_files[0]}") + list_unseen_validation_files.append(unseen_validation_files) + + return training_files, validation_files, list_unseen_validation_files + + +class MelDataset(torch.utils.data.Dataset): + def __init__( + self, + training_files: List[str], + hparams: AttrDict, + segment_size: int, + n_fft: int, + num_mels: int, + hop_size: int, + win_size: int, + sampling_rate: int, + fmin: int, + fmax: Optional[int], + split: bool = True, + shuffle: bool = True, + device: str = None, + fmax_loss: Optional[int] = None, + fine_tuning: bool = False, + base_mels_path: str = None, + is_seen: bool = True, + ): + self.audio_files = training_files + random.seed(1234) + if shuffle: + random.shuffle(self.audio_files) + self.hparams = hparams + self.is_seen = is_seen + if self.is_seen: + self.name = pathlib.Path(self.audio_files[0]).parts[0] + else: + self.name = "-".join(pathlib.Path(self.audio_files[0]).parts[:2]).strip("/") + + self.segment_size = segment_size + self.sampling_rate = sampling_rate + self.split = split + self.n_fft = n_fft + self.num_mels = num_mels + self.hop_size = hop_size + self.win_size = win_size + self.fmin = fmin + self.fmax = fmax + self.fmax_loss = fmax_loss + self.device = device + self.fine_tuning = fine_tuning + self.base_mels_path = base_mels_path + + print("[INFO] checking dataset integrity...") + for i in tqdm(range(len(self.audio_files))): + assert os.path.exists(self.audio_files[i]), f"{self.audio_files[i]} not found" + + def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor, str, torch.Tensor]: + try: + filename = self.audio_files[index] + + # Use librosa.load that ensures loading waveform into mono with [-1, 1] float values + # Audio is ndarray with shape [T_time]. Disable auto-resampling here to minimize overhead + # The on-the-fly resampling during training will be done only for the obtained random chunk + audio, source_sampling_rate = librosa.load(filename, sr=None, mono=True) + + # Main logic that uses pair for training BigVGAN + if not self.fine_tuning: + if self.split: # Training step + # Obtain randomized audio chunk + if source_sampling_rate != self.sampling_rate: + # Adjust segment size to crop if the source sr is different + target_segment_size = math.ceil(self.segment_size * (source_sampling_rate / self.sampling_rate)) + else: + target_segment_size = self.segment_size + + # Compute upper bound index for the random chunk + random_chunk_upper_bound = max(0, audio.shape[0] - target_segment_size) + + # Crop or pad audio to obtain random chunk with target_segment_size + if audio.shape[0] >= target_segment_size: + audio_start = random.randint(0, random_chunk_upper_bound) + audio = audio[audio_start : audio_start + target_segment_size] + else: + audio = np.pad( + audio, + (0, target_segment_size - audio.shape[0]), + mode="constant", + ) + + # Resample audio chunk to self.sampling rate + if source_sampling_rate != self.sampling_rate: + audio = librosa.resample( + audio, + orig_sr=source_sampling_rate, + target_sr=self.sampling_rate, + ) + if audio.shape[0] > self.segment_size: + # trim last elements to match self.segment_size (e.g., 16385 for 44khz downsampled to 24khz -> 16384) + audio = audio[: self.segment_size] + + else: # Validation step + # Resample full audio clip to target sampling rate + if source_sampling_rate != self.sampling_rate: + audio = librosa.resample( + audio, + orig_sr=source_sampling_rate, + target_sr=self.sampling_rate, + ) + # Trim last elements to match audio length to self.hop_size * n for evaluation + if (audio.shape[0] % self.hop_size) != 0: + audio = audio[: -(audio.shape[0] % self.hop_size)] + + # BigVGAN is trained using volume-normalized waveform + audio = librosa.util.normalize(audio) * 0.95 + + # Cast ndarray to torch tensor + audio = torch.FloatTensor(audio) + audio = audio.unsqueeze(0) # [B(1), self.segment_size] + + # Compute mel spectrogram corresponding to audio + mel = mel_spectrogram( + audio, + self.n_fft, + self.num_mels, + self.sampling_rate, + self.hop_size, + self.win_size, + self.fmin, + self.fmax, + center=False, + ) # [B(1), self.num_mels, self.segment_size // self.hop_size] + + # Fine-tuning logic that uses pre-computed mel. Example: Using TTS model-generated mel as input + else: + # For fine-tuning, assert that the waveform is in the defined sampling_rate + # Fine-tuning won't support on-the-fly resampling to be fool-proof (the dataset should have been prepared properly) + assert source_sampling_rate == self.sampling_rate, ( + f"For fine_tuning, waveform must be in the spcified sampling rate {self.sampling_rate}, got {source_sampling_rate}" + ) + + # Cast ndarray to torch tensor + audio = torch.FloatTensor(audio) + audio = audio.unsqueeze(0) # [B(1), T_time] + + # Load pre-computed mel from disk + mel = np.load( + os.path.join( + self.base_mels_path, + os.path.splitext(os.path.split(filename)[-1])[0] + ".npy", + ) + ) + mel = torch.from_numpy(mel) + + if len(mel.shape) < 3: + mel = mel.unsqueeze(0) # ensure [B, C, T] + + if self.split: + frames_per_seg = math.ceil(self.segment_size / self.hop_size) + + if audio.size(1) >= self.segment_size: + mel_start = random.randint(0, mel.size(2) - frames_per_seg - 1) + mel = mel[:, :, mel_start : mel_start + frames_per_seg] + audio = audio[ + :, + mel_start * self.hop_size : (mel_start + frames_per_seg) * self.hop_size, + ] + + # Pad pre-computed mel and audio to match length to ensuring fine-tuning without error. + # NOTE: this may introduce a single-frame misalignment of the + # To remove possible misalignment, it is recommended to prepare the pair where the audio length is the integer multiple of self.hop_size + mel = torch.nn.functional.pad(mel, (0, frames_per_seg - mel.size(2)), "constant") + audio = torch.nn.functional.pad(audio, (0, self.segment_size - audio.size(1)), "constant") + + # Compute mel_loss used by spectral regression objective. Uses self.fmax_loss instead (usually None) + mel_loss = mel_spectrogram( + audio, + self.n_fft, + self.num_mels, + self.sampling_rate, + self.hop_size, + self.win_size, + self.fmin, + self.fmax_loss, + center=False, + ) # [B(1), self.num_mels, self.segment_size // self.hop_size] + + # Shape sanity checks + assert ( + audio.shape[1] == mel.shape[2] * self.hop_size and audio.shape[1] == mel_loss.shape[2] * self.hop_size + ), ( + f"Audio length must be mel frame length * hop_size. Got audio shape {audio.shape} mel shape {mel.shape} mel_loss shape {mel_loss.shape}" + ) + + return (mel.squeeze(), audio.squeeze(0), filename, mel_loss.squeeze()) + + # If it encounters error during loading the data, skip this sample and load random other sample to the batch + except Exception as e: + if self.fine_tuning: + raise e # Terminate training if it is fine-tuning. The dataset should have been prepared properly. + else: + print(f"[WARNING] Failed to load waveform, skipping! filename: {filename} Error: {e}") + return self[random.randrange(len(self))] + + def __len__(self): + return len(self.audio_files) diff --git a/GPT_SoVITS/BigVGAN/nv-modelcard++/.gitkeep b/GPT_SoVITS/BigVGAN/nv-modelcard++/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/GPT_SoVITS/BigVGAN/nv-modelcard++/.gitkeep @@ -0,0 +1 @@ + diff --git a/GPT_SoVITS/BigVGAN/nv-modelcard++/bias.md b/GPT_SoVITS/BigVGAN/nv-modelcard++/bias.md new file mode 100644 index 0000000000000000000000000000000000000000..4b388c28d09b8ca3aab5096304c52e1a5dac0e16 --- /dev/null +++ b/GPT_SoVITS/BigVGAN/nv-modelcard++/bias.md @@ -0,0 +1,4 @@ +| Field | Response | +| :--------------------------------------------------------------------------------------------------------- | :--------------------------------------------------- | +| Participation considerations from adversely impacted groups protected classes in model design and testing: | None | +| Measures taken to mitigate against unwanted bias: | No measures taken to mitigate against unwanted bias. | diff --git a/GPT_SoVITS/BigVGAN/nv-modelcard++/explainability.md b/GPT_SoVITS/BigVGAN/nv-modelcard++/explainability.md new file mode 100644 index 0000000000000000000000000000000000000000..6f1a16676e438ba95f9d411a19e04a0f13409e54 --- /dev/null +++ b/GPT_SoVITS/BigVGAN/nv-modelcard++/explainability.md @@ -0,0 +1,13 @@ +| Field | Response | +| :---------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Intended Application & Domain: | Generating waveform from mel spectrogram. | +| Model Type: | Convolutional Neural Network (CNN) | +| Intended Users: | This model is intended for developers to synthesize and generate waveforms from the AI-generated mel spectrograms. | +| Output: | Audio Waveform | +| Describe how the model works: | Model generates audio waveform corresponding to the input mel spectrogram. | +| Name the adversely impacted groups this has been tested to deliver comparable outcomes regardless of: | Not Applicable | +| Technical Limitations: | This may not perform well on synthetically-generated mel spectrograms that deviate significantly from the profile of mel spectrograms on which this was trained. | +| Verified to have met prescribed NVIDIA quality standards: | Yes | +| Performance Metrics: | Perceptual Evaluation of Speech Quality (PESQ), Virtual Speech Quality Objective Listener (VISQOL), Multi-resolution STFT (MRSTFT), Mel cepstral distortion (MCD), Periodicity RMSE, Voice/Unvoiced F1 Score (V/UV F1) | +| Potential Known Risks: | This model may generate low-quality or distorted soundwaves. | +| Licensing: | https://github.com/NVIDIA/BigVGAN/blob/main/LICENSE | diff --git a/GPT_SoVITS/BigVGAN/nv-modelcard++/overview.md b/GPT_SoVITS/BigVGAN/nv-modelcard++/overview.md new file mode 100644 index 0000000000000000000000000000000000000000..a39cba0b49a4a32a37afa90f2baf4630dcd9cadc --- /dev/null +++ b/GPT_SoVITS/BigVGAN/nv-modelcard++/overview.md @@ -0,0 +1,126 @@ +# Model Overview + +## Description: + +BigVGAN is a generative AI model specialized in synthesizing audio waveforms using Mel spectrogram as inputs. + +
+ +BigVGAN is a fully convolutional architecture with several upsampling blocks using transposed convolution followed by multiple residual dilated convolution layers. + +BigVGAN consists of a novel module, called anti-aliased multi-periodicity composition (AMP), which is specifically designed for generating waveforms. AMP is specialized in synthesizing high-frequency and periodic soundwaves drawing inspiration from audio signal processing principles. + +It applies a periodic activation function, called Snake, which provides an inductive bias to the architecture in generating periodic soundwaves. It also applies anti-aliasing filters to reduce undesired artifacts in the generated waveforms.
+ +This model is ready for commercial use.
+ +## References(s): + +- [BigVGAN: A Universal Neural Vocoder with Large-Scale Training](https://arxiv.org/abs/2206.04658)
+- [Project Page](https://research.nvidia.com/labs/adlr/projects/bigvgan/)
+- [Audio Demo](https://bigvgan-demo.github.io/)
+ +## Model Architecture: + +**Architecture Type:** Convolution Neural Network (CNN)
+**Network Architecture:** You can see the details of this model on this link: https://github.com/NVIDIA/BigVGAN and the related paper can be found here: https://arxiv.org/abs/2206.04658
+**Model Version:** 2.0
+ +## Input: + +**Input Type:** Audio
+**Input Format:** Mel Spectrogram
+**Input Parameters:** None
+**Other Properties Related to Input:** The input mel spectrogram has shape `[batch, channels, frames]`, where `channels` refers to the number of mel bands defined by the model and `frames` refers to the temporal length. The model supports arbitrary long `frames` that fits into the GPU memory. + +## Output: + +**Input Type:** Audio
+**Output Format:** Audio Waveform
+**Output Parameters:** None
+**Other Properties Related to Output:** The output audio waveform has shape `[batch, 1, time]`, where `1` refers to the mono audio channels and `time` refers to the temporal length. `time` is defined as a fixed integer multiple of input `frames`, which is an upsampling ratio of the model (`time = upsampling ratio * frames`). The output audio waveform consitutes float values with a range of `[-1, 1]`. + +## Software Integration: + +**Runtime Engine(s):** PyTorch + +**Supported Hardware Microarchitecture Compatibility:** NVIDIA Ampere, NVIDIA Hopper, NVIDIA Lovelace, NVIDIA Turing, NVIDIA Volta
+ +## Preferred/Supported Operating System(s): + +Linux + +## Model Version(s): + +v2.0 + +## Training, Testing, and Evaluation Datasets: + +### Training Dataset: + +The dataset contains diverse audio types, including speech in multiple languages, environmental sounds, and instruments. + +**Links:** + +- [AAM: Artificial Audio Multitracks Dataset](https://zenodo.org/records/5794629) +- [AudioCaps](https://audiocaps.github.io/) +- [AudioSet](https://research.google.com/audioset/index.html) +- [common-accent](https://huggingface.co/datasets/DTU54DL/common-accent) +- [Crowd Sourced Emotional Multimodal Actors Dataset (CREMA-D)](https://ieeexplore.ieee.org/document/6849440) +- [DCASE2017 Challenge, Task 4: Large-scale weakly supervised sound event detection for smart cars](https://dcase.community/challenge2017/task-large-scale-sound-event-detection) +- [FSDnoisy18k](https://zenodo.org/records/2529934) +- [Free Universal Sound Separation Dataset](https://zenodo.org/records/3694384) +- [Greatest Hits dataset](https://andrewowens.com/vis/) +- [GTZAN](https://ieeexplore.ieee.org/document/1021072) +- [JL corpus](https://www.kaggle.com/datasets/tli725/jl-corpus) +- [Medley-solos-DB: a cross-collection dataset for musical instrument recognition](https://zenodo.org/records/3464194) +- [MUSAN: A Music, Speech, and Noise Corpus](https://www.openslr.org/17/) +- [MusicBench](https://huggingface.co/datasets/amaai-lab/MusicBench) +- [MusicCaps](https://www.kaggle.com/datasets/googleai/musiccaps) +- [MusicNet](https://www.kaggle.com/datasets/imsparsh/musicnet-dataset) +- [NSynth](https://magenta.tensorflow.org/datasets/nsynth) +- [OnAir-Music-Dataset](https://github.com/sevagh/OnAir-Music-Dataset) +- [Audio Piano Triads Dataset](https://zenodo.org/records/4740877) +- [Pitch Audio Dataset (Surge synthesizer)](https://zenodo.org/records/4677097) +- [SONYC Urban Sound Tagging (SONYC-UST): a multilabel dataset from an urban acoustic sensor network](https://zenodo.org/records/3966543) +- [VocalSound: A Dataset for Improving Human Vocal Sounds Recognition](https://arxiv.org/abs/2205.03433) +- [WavText5K](https://github.com/microsoft/WavText5K) +- [CSS10: A Collection of Single Speaker Speech Datasets for 10 Languages](https://github.com/Kyubyong/css10) +- [Hi-Fi Multi-Speaker English TTS Dataset (Hi-Fi TTS)](https://www.openslr.org/109/) +- [IIIT-H Indic Speech Databases](http://festvox.org/databases/iiit_voices/) +- [Libri-Light: A Benchmark for ASR with Limited or No Supervision](https://arxiv.org/abs/1912.07875) +- [LibriTTS: A Corpus Derived from LibriSpeech for Text-to-Speech](https://www.openslr.org/60) +- [LibriTTS-R: A Restored Multi-Speaker Text-to-Speech Corpus](https://www.openslr.org/141/) +- [The SIWIS French Speech Synthesis Database](https://datashare.ed.ac.uk/handle/10283/2353) +- [Crowdsourced high-quality Colombian Spanish speech data set](https://openslr.org/72/) +- [TTS-Portuguese Corpus](https://github.com/Edresson/TTS-Portuguese-Corpus) +- [CSTR VCTK Corpus: English Multi-speaker Corpus for CSTR Voice Cloning Toolkit](https://datashare.ed.ac.uk/handle/10283/3443) + +\*\* Data Collection Method by dataset
+ +- Human
+ +\*\* Labeling Method by dataset (for those with labels)
+ +- Hybrid: Automated, Human, Unknown
+ +### Evaluating Dataset: + +Properties: The audio generation quality of BigVGAN is evaluated using `dev` splits of the [LibriTTS dataset](https://www.openslr.org/60/) and [Hi-Fi TTS dataset](https://www.openslr.org/109/). The datasets include speech in English language with equal balance of genders. + +\*\* Data Collection Method by dataset
+ +- Human
+ +\*\* Labeling Method by dataset
+ +- Automated
+ +## Inference: + +**Engine:** PyTorch
+**Test Hardware:** NVIDIA A100 GPU
+ +## Ethical Considerations: + +NVIDIA believes Trustworthy AI is a shared responsibility and we have established policies and practices to enable development for a wide array of AI applications. When downloaded or used in accordance with our terms of service, developers should work with their internal model team to ensure this model meets requirements for the relevant industry and use case and addresses unforeseen product misuse. For more detailed information on ethical considerations for this model, please see the Model Card++ Explainability, Bias, Safety & Security, and Privacy Subcards. Please report security vulnerabilities or NVIDIA AI Concerns [here](https://www.nvidia.com/en-us/support/submit-security-vulnerability/). diff --git a/GPT_SoVITS/BigVGAN/nv-modelcard++/privacy.md b/GPT_SoVITS/BigVGAN/nv-modelcard++/privacy.md new file mode 100644 index 0000000000000000000000000000000000000000..73554a998384ca1b1050239ebd51bda46aec1878 --- /dev/null +++ b/GPT_SoVITS/BigVGAN/nv-modelcard++/privacy.md @@ -0,0 +1,14 @@ +| Field | Response | +| :------------------------------------------------------------------------------------------------------------------------------------- | :--------------------------------------------- | +| Generatable or reverse engineerable personal information? | None | +| Protected class data used to create this model? | None | +| Was consent obtained for any personal data used? | Not Applicable (No Personal Data) | +| How often is dataset reviewed? | Before Release | +| Is a mechanism in place to honor data subject right of access or deletion of personal data? | Not Applicable | +| If personal collected for the development of the model, was it collected directly by NVIDIA? | Not Applicable | +| If personal collected for the development of the model by NVIDIA, do you maintain or have access to disclosures made to data subjects? | Not Applicable | +| If personal collected for the development of this AI model, was it minimized to only what was required? | Not Applicable | +| Is data in dataset traceable? | Yes | +| Is there provenance for all datasets used in training? | Yes | +| Does data labeling (annotation, metadata) comply with privacy laws? | Yes | +| Is data compliant with data subject requests for data correction or removal, if such a request was made? | No, not possible with externally-sourced data. | diff --git a/GPT_SoVITS/BigVGAN/nv-modelcard++/safety.md b/GPT_SoVITS/BigVGAN/nv-modelcard++/safety.md new file mode 100644 index 0000000000000000000000000000000000000000..ed30370dfedbbb49748706034a7153d54f1a668f --- /dev/null +++ b/GPT_SoVITS/BigVGAN/nv-modelcard++/safety.md @@ -0,0 +1,6 @@ +| Field | Response | +| :---------------------------------------------- | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Model Application(s): | Synethic Audio Generation | +| Describe the life critical impact (if present). | Not Applicable | +| Use Case Restrictions: | None | +| Model and dataset restrictions: | The Principle of least privilege (PoLP) is applied limiting access for dataset generation and model development. Restrictions enforce dataset access during training, and dataset license constraints adhered to. | diff --git a/GPT_SoVITS/BigVGAN/requirements.txt b/GPT_SoVITS/BigVGAN/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..6e61d3203966612e6ad193bbabdef10b1d3fed84 --- /dev/null +++ b/GPT_SoVITS/BigVGAN/requirements.txt @@ -0,0 +1,13 @@ +torch +numpy +librosa>=0.8.1 +scipy +tensorboard +soundfile +matplotlib +pesq +auraloss +tqdm +nnAudio +ninja +huggingface_hub>=0.23.4 \ No newline at end of file diff --git a/GPT_SoVITS/BigVGAN/tests/test_activation.py b/GPT_SoVITS/BigVGAN/tests/test_activation.py new file mode 100644 index 0000000000000000000000000000000000000000..4134883540e472afb9b79972dd5e1cd36bee0e04 --- /dev/null +++ b/GPT_SoVITS/BigVGAN/tests/test_activation.py @@ -0,0 +1,62 @@ +# Copyright (c) 2024 NVIDIA CORPORATION. +# Licensed under the MIT license. + +import os +import sys + +# to import modules from parent_dir +parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) +sys.path.append(parent_dir) + +import torch +from alias_free_activation.cuda import activation1d +from activations import Snake + + +def test_load_fused_kernels(): + try: + print("[Success] load_fused_kernels") + except ImportError as e: + print("[Fail] load_fused_kernels") + raise e + + +def test_anti_alias_activation(): + data = torch.rand((10, 10, 200), device="cuda") + + # Check activations.Snake cuda vs. torch + fused_anti_alias_activation = activation1d.Activation1d(activation=Snake(10), fused=True).cuda() + fused_activation_output = fused_anti_alias_activation(data) + + torch_anti_alias_activation = activation1d.Activation1d(activation=Snake(10), fused=False).cuda() + torch_activation_output = torch_anti_alias_activation(data) + + test_result = (fused_activation_output - torch_activation_output).abs() + + while test_result.dim() != 1: + test_result = test_result.mean(dim=-1) + + diff = test_result.mean(dim=-1) + + if diff <= 1e-3: + print( + f"\n[Success] test_fused_anti_alias_activation" + f"\n > mean_difference={diff}" + f"\n > fused_values={fused_activation_output[-1][-1][:].tolist()}" + f"\n > torch_values={torch_activation_output[-1][-1][:].tolist()}" + ) + else: + print( + f"\n[Fail] test_fused_anti_alias_activation" + f"\n > mean_difference={diff}, " + f"\n > fused_values={fused_activation_output[-1][-1][:].tolist()}, " + f"\n > torch_values={torch_activation_output[-1][-1][:].tolist()}" + ) + + +if __name__ == "__main__": + from alias_free_activation.cuda import load + + load.load() + test_load_fused_kernels() + test_anti_alias_activation() diff --git a/GPT_SoVITS/BigVGAN/tests/test_activation_snake_beta.py b/GPT_SoVITS/BigVGAN/tests/test_activation_snake_beta.py new file mode 100644 index 0000000000000000000000000000000000000000..4cc46b98ff0e91ddbaa025aa7e86afa828bde71f --- /dev/null +++ b/GPT_SoVITS/BigVGAN/tests/test_activation_snake_beta.py @@ -0,0 +1,62 @@ +# Copyright (c) 2024 NVIDIA CORPORATION. +# Licensed under the MIT license. + +import os +import sys + +# to import modules from parent_dir +parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) +sys.path.append(parent_dir) + +import torch +from alias_free_activation.cuda import activation1d +from activations import SnakeBeta + + +def test_load_fused_kernels(): + try: + print("[Success] load_fused_kernels") + except ImportError as e: + print("[Fail] load_fused_kernels") + raise e + + +def test_anti_alias_activation(): + data = torch.rand((10, 10, 200), device="cuda") + + # Check activations, Snake CUDA vs. Torch + fused_anti_alias_activation = activation1d.Activation1d(activation=SnakeBeta(10), fused=True).cuda() + fused_activation_output = fused_anti_alias_activation(data) + + torch_anti_alias_activation = activation1d.Activation1d(activation=SnakeBeta(10), fused=False).cuda() + torch_activation_output = torch_anti_alias_activation(data) + + test_result = (fused_activation_output - torch_activation_output).abs() + + while test_result.dim() != 1: + test_result = test_result.mean(dim=-1) + + diff = test_result.mean(dim=-1) + + if diff <= 1e-3: + print( + f"\n[Success] test_fused_anti_alias_activation" + f"\n > mean_difference={diff}" + f"\n > fused_values={fused_activation_output[-1][-1][:].tolist()}" + f"\n > torch_values={torch_activation_output[-1][-1][:].tolist()}" + ) + else: + print( + f"\n[Fail] test_fused_anti_alias_activation" + f"\n > mean_difference={diff}, " + f"\n > fused_values={fused_activation_output[-1][-1][:].tolist()}, " + f"\n > torch_values={torch_activation_output[-1][-1][:].tolist()}" + ) + + +if __name__ == "__main__": + from alias_free_activation.cuda import load + + load.load() + test_load_fused_kernels() + test_anti_alias_activation() diff --git a/GPT_SoVITS/BigVGAN/tests/test_cuda_vs_torch_model.py b/GPT_SoVITS/BigVGAN/tests/test_cuda_vs_torch_model.py new file mode 100644 index 0000000000000000000000000000000000000000..8ddb29e56c32eb20533a3022949f9487055380d9 --- /dev/null +++ b/GPT_SoVITS/BigVGAN/tests/test_cuda_vs_torch_model.py @@ -0,0 +1,215 @@ +# Copyright (c) 2024 NVIDIA CORPORATION. +# Licensed under the MIT license. + +import os +import sys + +# to import modules from parent_dir +parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) +sys.path.append(parent_dir) + +import torch +import json +from env import AttrDict +from bigvgan import BigVGAN +from time import time +from tqdm import tqdm +from meldataset import mel_spectrogram, MAX_WAV_VALUE +from scipy.io.wavfile import write +import numpy as np + +import argparse + +torch.backends.cudnn.benchmark = True + +# For easier debugging +torch.set_printoptions(linewidth=200, threshold=10_000) + + +def generate_soundwave(duration=5.0, sr=24000): + t = np.linspace(0, duration, int(sr * duration), False, dtype=np.float32) + + modulation = np.sin(2 * np.pi * t / duration) + + min_freq = 220 + max_freq = 1760 + frequencies = min_freq + (max_freq - min_freq) * (modulation + 1) / 2 + soundwave = np.sin(2 * np.pi * frequencies * t) + + soundwave = soundwave / np.max(np.abs(soundwave)) * 0.95 + + return soundwave, sr + + +def get_mel(x, h): + return mel_spectrogram(x, h.n_fft, h.num_mels, h.sampling_rate, h.hop_size, h.win_size, h.fmin, h.fmax) + + +def load_checkpoint(filepath, device): + assert os.path.isfile(filepath) + print(f"Loading '{filepath}'") + checkpoint_dict = torch.load(filepath, map_location=device) + print("Complete.") + return checkpoint_dict + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Test script to check CUDA kernel correctness.") + parser.add_argument( + "--checkpoint_file", + type=str, + required=True, + help="Path to the checkpoint file. Assumes config.json exists in the directory.", + ) + + args = parser.parse_args() + + config_file = os.path.join(os.path.split(args.checkpoint_file)[0], "config.json") + with open(config_file) as f: + config = f.read() + json_config = json.loads(config) + h = AttrDict({**json_config}) + + print("loading plain Pytorch BigVGAN") + generator_original = BigVGAN(h).to("cuda") + print("loading CUDA kernel BigVGAN with auto-build") + generator_cuda_kernel = BigVGAN(h, use_cuda_kernel=True).to("cuda") + + state_dict_g = load_checkpoint(args.checkpoint_file, "cuda") + generator_original.load_state_dict(state_dict_g["generator"]) + generator_cuda_kernel.load_state_dict(state_dict_g["generator"]) + + generator_original.remove_weight_norm() + generator_original.eval() + generator_cuda_kernel.remove_weight_norm() + generator_cuda_kernel.eval() + + # define number of samples and length of mel frame to benchmark + num_sample = 10 + num_mel_frame = 16384 + + # CUDA kernel correctness check + diff = 0.0 + for i in tqdm(range(num_sample)): + # Random mel + data = torch.rand((1, h.num_mels, num_mel_frame), device="cuda") + + with torch.inference_mode(): + audio_original = generator_original(data) + + with torch.inference_mode(): + audio_cuda_kernel = generator_cuda_kernel(data) + + # Both outputs should be (almost) the same + test_result = (audio_original - audio_cuda_kernel).abs() + diff += test_result.mean(dim=-1).item() + + diff /= num_sample + if diff <= 2e-3: # We can expect a small difference (~1e-3) which does not affect perceptual quality + print( + f"\n[Success] test CUDA fused vs. plain torch BigVGAN inference" + f"\n > mean_difference={diff}" + f"\n > fused_values={audio_cuda_kernel[-1][-1][-30:].tolist()}" + f"\n > torch_values={audio_original[-1][-1][-30:].tolist()}" + ) + else: + print( + f"\n[Fail] test CUDA fused vs. plain torch BigVGAN inference" + f"\n > mean_difference={diff}" + f"\n > fused_values={audio_cuda_kernel[-1][-1][-30:].tolist()}, " + f"\n > torch_values={audio_original[-1][-1][-30:].tolist()}" + ) + + del data, audio_original, audio_cuda_kernel + + # Variables for tracking total time and VRAM usage + toc_total_original = 0 + toc_total_cuda_kernel = 0 + vram_used_original_total = 0 + vram_used_cuda_kernel_total = 0 + audio_length_total = 0 + + # Measure Original inference in isolation + for i in tqdm(range(num_sample)): + torch.cuda.reset_peak_memory_stats(device="cuda") + data = torch.rand((1, h.num_mels, num_mel_frame), device="cuda") + torch.cuda.synchronize() + tic = time() + with torch.inference_mode(): + audio_original = generator_original(data) + torch.cuda.synchronize() + toc = time() - tic + toc_total_original += toc + + vram_used_original_total += torch.cuda.max_memory_allocated(device="cuda") + + del data, audio_original + torch.cuda.empty_cache() + + # Measure CUDA kernel inference in isolation + for i in tqdm(range(num_sample)): + torch.cuda.reset_peak_memory_stats(device="cuda") + data = torch.rand((1, h.num_mels, num_mel_frame), device="cuda") + torch.cuda.synchronize() + tic = time() + with torch.inference_mode(): + audio_cuda_kernel = generator_cuda_kernel(data) + torch.cuda.synchronize() + toc = time() - tic + toc_total_cuda_kernel += toc + + audio_length_total += audio_cuda_kernel.shape[-1] + + vram_used_cuda_kernel_total += torch.cuda.max_memory_allocated(device="cuda") + + del data, audio_cuda_kernel + torch.cuda.empty_cache() + + # Calculate metrics + audio_second = audio_length_total / h.sampling_rate + khz_original = audio_length_total / toc_total_original / 1000 + khz_cuda_kernel = audio_length_total / toc_total_cuda_kernel / 1000 + vram_used_original_gb = vram_used_original_total / num_sample / (1024**3) + vram_used_cuda_kernel_gb = vram_used_cuda_kernel_total / num_sample / (1024**3) + + # Print results + print( + f"Original BigVGAN: took {toc_total_original:.2f} seconds to generate {audio_second:.2f} seconds of audio, {khz_original:.1f}kHz, {audio_second / toc_total_original:.1f} faster than realtime, VRAM used {vram_used_original_gb:.1f} GB" + ) + print( + f"CUDA kernel BigVGAN: took {toc_total_cuda_kernel:.2f} seconds to generate {audio_second:.2f} seconds of audio, {khz_cuda_kernel:.1f}kHz, {audio_second / toc_total_cuda_kernel:.1f} faster than realtime, VRAM used {vram_used_cuda_kernel_gb:.1f} GB" + ) + print(f"speedup of CUDA kernel: {khz_cuda_kernel / khz_original}") + print(f"VRAM saving of CUDA kernel: {vram_used_original_gb / vram_used_cuda_kernel_gb}") + + # Use artificial sine waves for inference test + audio_real, sr = generate_soundwave(duration=5.0, sr=h.sampling_rate) + audio_real = torch.tensor(audio_real).to("cuda") + # Compute mel spectrogram from the ground truth audio + x = get_mel(audio_real.unsqueeze(0), h) + + with torch.inference_mode(): + y_g_hat_original = generator_original(x) + y_g_hat_cuda_kernel = generator_cuda_kernel(x) + + audio_real = audio_real.squeeze() + audio_real = audio_real * MAX_WAV_VALUE + audio_real = audio_real.cpu().numpy().astype("int16") + + audio_original = y_g_hat_original.squeeze() + audio_original = audio_original * MAX_WAV_VALUE + audio_original = audio_original.cpu().numpy().astype("int16") + + audio_cuda_kernel = y_g_hat_cuda_kernel.squeeze() + audio_cuda_kernel = audio_cuda_kernel * MAX_WAV_VALUE + audio_cuda_kernel = audio_cuda_kernel.cpu().numpy().astype("int16") + + os.makedirs("tmp", exist_ok=True) + output_file_real = os.path.join("tmp", "audio_real.wav") + output_file_original = os.path.join("tmp", "audio_generated_original.wav") + output_file_cuda_kernel = os.path.join("tmp", "audio_generated_cuda_kernel.wav") + write(output_file_real, h.sampling_rate, audio_real) + write(output_file_original, h.sampling_rate, audio_original) + write(output_file_cuda_kernel, h.sampling_rate, audio_cuda_kernel) + print("Example generated audios of original vs. fused CUDA kernel written to tmp!") + print("Done") diff --git a/GPT_SoVITS/BigVGAN/train.py b/GPT_SoVITS/BigVGAN/train.py new file mode 100644 index 0000000000000000000000000000000000000000..39718cdb33d2e9a88ec9b98dd2032bdce83a4231 --- /dev/null +++ b/GPT_SoVITS/BigVGAN/train.py @@ -0,0 +1,716 @@ +# Copyright (c) 2024 NVIDIA CORPORATION. +# Licensed under the MIT license. + +# Adapted from https://github.com/jik876/hifi-gan under the MIT license. +# LICENSE is in incl_licenses directory. + + +import warnings + +warnings.simplefilter(action="ignore", category=FutureWarning) +import itertools +import os +import time +import argparse +import json +import torch +import torch.nn.functional as F +from torch.utils.tensorboard import SummaryWriter +from torch.utils.data import DistributedSampler, DataLoader +import torch.multiprocessing as mp +from torch.distributed import init_process_group +from torch.nn.parallel import DistributedDataParallel +from env import AttrDict, build_env +from meldataset import MelDataset, mel_spectrogram, get_dataset_filelist, MAX_WAV_VALUE + +from bigvgan import BigVGAN +from discriminators import ( + MultiPeriodDiscriminator, + MultiResolutionDiscriminator, + MultiBandDiscriminator, + MultiScaleSubbandCQTDiscriminator, +) +from loss import ( + feature_loss, + generator_loss, + discriminator_loss, + MultiScaleMelSpectrogramLoss, +) + +from utils import ( + plot_spectrogram, + plot_spectrogram_clipped, + scan_checkpoint, + load_checkpoint, + save_checkpoint, + save_audio, +) +import torchaudio as ta +from pesq import pesq +from tqdm import tqdm +import auraloss + +torch.backends.cudnn.benchmark = False + + +def train(rank, a, h): + if h.num_gpus > 1: + # initialize distributed + init_process_group( + backend=h.dist_config["dist_backend"], + init_method=h.dist_config["dist_url"], + world_size=h.dist_config["world_size"] * h.num_gpus, + rank=rank, + ) + + # Set seed and device + torch.cuda.manual_seed(h.seed) + torch.cuda.set_device(rank) + device = torch.device(f"cuda:{rank:d}") + + # Define BigVGAN generator + generator = BigVGAN(h).to(device) + + # Define discriminators. MPD is used by default + mpd = MultiPeriodDiscriminator(h).to(device) + + # Define additional discriminators. BigVGAN-v1 uses UnivNet's MRD as default + # New in BigVGAN-v2: option to switch to new discriminators: MultiBandDiscriminator / MultiScaleSubbandCQTDiscriminator + if h.get("use_mbd_instead_of_mrd", False): # Switch to MBD + print("[INFO] using MultiBandDiscriminator of BigVGAN-v2 instead of MultiResolutionDiscriminator") + # Variable name is kept as "mrd" for backward compatibility & minimal code change + mrd = MultiBandDiscriminator(h).to(device) + elif h.get("use_cqtd_instead_of_mrd", False): # Switch to CQTD + print("[INFO] using MultiScaleSubbandCQTDiscriminator of BigVGAN-v2 instead of MultiResolutionDiscriminator") + mrd = MultiScaleSubbandCQTDiscriminator(h).to(device) + else: # Fallback to original MRD in BigVGAN-v1 + mrd = MultiResolutionDiscriminator(h).to(device) + + # New in BigVGAN-v2: option to switch to multi-scale L1 mel loss + if h.get("use_multiscale_melloss", False): + print("[INFO] using multi-scale Mel l1 loss of BigVGAN-v2 instead of the original single-scale loss") + fn_mel_loss_multiscale = MultiScaleMelSpectrogramLoss( + sampling_rate=h.sampling_rate + ) # NOTE: accepts waveform as input + else: + fn_mel_loss_singlescale = F.l1_loss + + # Print the model & number of parameters, and create or scan the latest checkpoint from checkpoints directory + if rank == 0: + print(generator) + print(mpd) + print(mrd) + print(f"Generator params: {sum(p.numel() for p in generator.parameters())}") + print(f"Discriminator mpd params: {sum(p.numel() for p in mpd.parameters())}") + print(f"Discriminator mrd params: {sum(p.numel() for p in mrd.parameters())}") + os.makedirs(a.checkpoint_path, exist_ok=True) + print(f"Checkpoints directory: {a.checkpoint_path}") + + if os.path.isdir(a.checkpoint_path): + # New in v2.1: If the step prefix pattern-based checkpoints are not found, also check for renamed files in Hugging Face Hub to resume training + cp_g = scan_checkpoint(a.checkpoint_path, prefix="g_", renamed_file="bigvgan_generator.pt") + cp_do = scan_checkpoint( + a.checkpoint_path, + prefix="do_", + renamed_file="bigvgan_discriminator_optimizer.pt", + ) + + # Load the latest checkpoint if exists + steps = 0 + if cp_g is None or cp_do is None: + state_dict_do = None + last_epoch = -1 + else: + state_dict_g = load_checkpoint(cp_g, device) + state_dict_do = load_checkpoint(cp_do, device) + generator.load_state_dict(state_dict_g["generator"]) + mpd.load_state_dict(state_dict_do["mpd"]) + mrd.load_state_dict(state_dict_do["mrd"]) + steps = state_dict_do["steps"] + 1 + last_epoch = state_dict_do["epoch"] + + # Initialize DDP, optimizers, and schedulers + if h.num_gpus > 1: + generator = DistributedDataParallel(generator, device_ids=[rank]).to(device) + mpd = DistributedDataParallel(mpd, device_ids=[rank]).to(device) + mrd = DistributedDataParallel(mrd, device_ids=[rank]).to(device) + + optim_g = torch.optim.AdamW(generator.parameters(), h.learning_rate, betas=[h.adam_b1, h.adam_b2]) + optim_d = torch.optim.AdamW( + itertools.chain(mrd.parameters(), mpd.parameters()), + h.learning_rate, + betas=[h.adam_b1, h.adam_b2], + ) + + if state_dict_do is not None: + optim_g.load_state_dict(state_dict_do["optim_g"]) + optim_d.load_state_dict(state_dict_do["optim_d"]) + + scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=h.lr_decay, last_epoch=last_epoch) + scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=h.lr_decay, last_epoch=last_epoch) + + # Define training and validation datasets + + """ + unseen_validation_filelist will contain sample filepaths outside the seen training & validation dataset + Example: trained on LibriTTS, validate on VCTK + """ + training_filelist, validation_filelist, list_unseen_validation_filelist = get_dataset_filelist(a) + + trainset = MelDataset( + training_filelist, + h, + h.segment_size, + h.n_fft, + h.num_mels, + h.hop_size, + h.win_size, + h.sampling_rate, + h.fmin, + h.fmax, + shuffle=False if h.num_gpus > 1 else True, + fmax_loss=h.fmax_for_loss, + device=device, + fine_tuning=a.fine_tuning, + base_mels_path=a.input_mels_dir, + is_seen=True, + ) + + train_sampler = DistributedSampler(trainset) if h.num_gpus > 1 else None + + train_loader = DataLoader( + trainset, + num_workers=h.num_workers, + shuffle=False, + sampler=train_sampler, + batch_size=h.batch_size, + pin_memory=True, + drop_last=True, + ) + + if rank == 0: + validset = MelDataset( + validation_filelist, + h, + h.segment_size, + h.n_fft, + h.num_mels, + h.hop_size, + h.win_size, + h.sampling_rate, + h.fmin, + h.fmax, + False, + False, + fmax_loss=h.fmax_for_loss, + device=device, + fine_tuning=a.fine_tuning, + base_mels_path=a.input_mels_dir, + is_seen=True, + ) + validation_loader = DataLoader( + validset, + num_workers=1, + shuffle=False, + sampler=None, + batch_size=1, + pin_memory=True, + drop_last=True, + ) + + list_unseen_validset = [] + list_unseen_validation_loader = [] + for i in range(len(list_unseen_validation_filelist)): + unseen_validset = MelDataset( + list_unseen_validation_filelist[i], + h, + h.segment_size, + h.n_fft, + h.num_mels, + h.hop_size, + h.win_size, + h.sampling_rate, + h.fmin, + h.fmax, + False, + False, + fmax_loss=h.fmax_for_loss, + device=device, + fine_tuning=a.fine_tuning, + base_mels_path=a.input_mels_dir, + is_seen=False, + ) + unseen_validation_loader = DataLoader( + unseen_validset, + num_workers=1, + shuffle=False, + sampler=None, + batch_size=1, + pin_memory=True, + drop_last=True, + ) + list_unseen_validset.append(unseen_validset) + list_unseen_validation_loader.append(unseen_validation_loader) + + # Tensorboard logger + sw = SummaryWriter(os.path.join(a.checkpoint_path, "logs")) + if a.save_audio: # Also save audio to disk if --save_audio is set to True + os.makedirs(os.path.join(a.checkpoint_path, "samples"), exist_ok=True) + + """ + Validation loop, "mode" parameter is automatically defined as (seen or unseen)_(name of the dataset). + If the name of the dataset contains "nonspeech", it skips PESQ calculation to prevent errors + """ + + def validate(rank, a, h, loader, mode="seen"): + assert rank == 0, "validate should only run on rank=0" + generator.eval() + torch.cuda.empty_cache() + + val_err_tot = 0 + val_pesq_tot = 0 + val_mrstft_tot = 0 + + # Modules for evaluation metrics + pesq_resampler = ta.transforms.Resample(h.sampling_rate, 16000).cuda() + loss_mrstft = auraloss.freq.MultiResolutionSTFTLoss(device="cuda") + + if a.save_audio: # Also save audio to disk if --save_audio is set to True + os.makedirs( + os.path.join(a.checkpoint_path, "samples", f"gt_{mode}"), + exist_ok=True, + ) + os.makedirs( + os.path.join(a.checkpoint_path, "samples", f"{mode}_{steps:08d}"), + exist_ok=True, + ) + + with torch.no_grad(): + print(f"step {steps} {mode} speaker validation...") + + # Loop over validation set and compute metrics + for j, batch in enumerate(tqdm(loader)): + x, y, _, y_mel = batch + y = y.to(device) + if hasattr(generator, "module"): + y_g_hat = generator.module(x.to(device)) + else: + y_g_hat = generator(x.to(device)) + y_mel = y_mel.to(device, non_blocking=True) + y_g_hat_mel = mel_spectrogram( + y_g_hat.squeeze(1), + h.n_fft, + h.num_mels, + h.sampling_rate, + h.hop_size, + h.win_size, + h.fmin, + h.fmax_for_loss, + ) + min_t = min(y_mel.size(-1), y_g_hat_mel.size(-1)) + val_err_tot += F.l1_loss(y_mel[..., :min_t], y_g_hat_mel[..., :min_t]).item() + + # PESQ calculation. only evaluate PESQ if it's speech signal (nonspeech PESQ will error out) + if "nonspeech" not in mode: # Skips if the name of dataset (in mode string) contains "nonspeech" + # Resample to 16000 for pesq + y_16k = pesq_resampler(y) + y_g_hat_16k = pesq_resampler(y_g_hat.squeeze(1)) + y_int_16k = (y_16k[0] * MAX_WAV_VALUE).short().cpu().numpy() + y_g_hat_int_16k = (y_g_hat_16k[0] * MAX_WAV_VALUE).short().cpu().numpy() + val_pesq_tot += pesq(16000, y_int_16k, y_g_hat_int_16k, "wb") + + # MRSTFT calculation + min_t = min(y.size(-1), y_g_hat.size(-1)) + val_mrstft_tot += loss_mrstft(y_g_hat[..., :min_t], y[..., :min_t]).item() + + # Log audio and figures to Tensorboard + if j % a.eval_subsample == 0: # Subsample every nth from validation set + if steps >= 0: + sw.add_audio(f"gt_{mode}/y_{j}", y[0], steps, h.sampling_rate) + if a.save_audio: # Also save audio to disk if --save_audio is set to True + save_audio( + y[0], + os.path.join( + a.checkpoint_path, + "samples", + f"gt_{mode}", + f"{j:04d}.wav", + ), + h.sampling_rate, + ) + sw.add_figure( + f"gt_{mode}/y_spec_{j}", + plot_spectrogram(x[0]), + steps, + ) + + sw.add_audio( + f"generated_{mode}/y_hat_{j}", + y_g_hat[0], + steps, + h.sampling_rate, + ) + if a.save_audio: # Also save audio to disk if --save_audio is set to True + save_audio( + y_g_hat[0, 0], + os.path.join( + a.checkpoint_path, + "samples", + f"{mode}_{steps:08d}", + f"{j:04d}.wav", + ), + h.sampling_rate, + ) + # Spectrogram of synthesized audio + y_hat_spec = mel_spectrogram( + y_g_hat.squeeze(1), + h.n_fft, + h.num_mels, + h.sampling_rate, + h.hop_size, + h.win_size, + h.fmin, + h.fmax, + ) + sw.add_figure( + f"generated_{mode}/y_hat_spec_{j}", + plot_spectrogram(y_hat_spec.squeeze(0).cpu().numpy()), + steps, + ) + + """ + Visualization of spectrogram difference between GT and synthesized audio, difference higher than 1 is clipped for better visualization. + """ + spec_delta = torch.clamp( + torch.abs(x[0] - y_hat_spec.squeeze(0).cpu()), + min=1e-6, + max=1.0, + ) + sw.add_figure( + f"delta_dclip1_{mode}/spec_{j}", + plot_spectrogram_clipped(spec_delta.numpy(), clip_max=1.0), + steps, + ) + + val_err = val_err_tot / (j + 1) + val_pesq = val_pesq_tot / (j + 1) + val_mrstft = val_mrstft_tot / (j + 1) + # Log evaluation metrics to Tensorboard + sw.add_scalar(f"validation_{mode}/mel_spec_error", val_err, steps) + sw.add_scalar(f"validation_{mode}/pesq", val_pesq, steps) + sw.add_scalar(f"validation_{mode}/mrstft", val_mrstft, steps) + + generator.train() + + # If the checkpoint is loaded, start with validation loop + if steps != 0 and rank == 0 and not a.debug: + if not a.skip_seen: + validate( + rank, + a, + h, + validation_loader, + mode=f"seen_{train_loader.dataset.name}", + ) + for i in range(len(list_unseen_validation_loader)): + validate( + rank, + a, + h, + list_unseen_validation_loader[i], + mode=f"unseen_{list_unseen_validation_loader[i].dataset.name}", + ) + # Exit the script if --evaluate is set to True + if a.evaluate: + exit() + + # Main training loop + generator.train() + mpd.train() + mrd.train() + for epoch in range(max(0, last_epoch), a.training_epochs): + if rank == 0: + start = time.time() + print(f"Epoch: {epoch + 1}") + + if h.num_gpus > 1: + train_sampler.set_epoch(epoch) + + for i, batch in enumerate(train_loader): + if rank == 0: + start_b = time.time() + x, y, _, y_mel = batch + + x = x.to(device, non_blocking=True) + y = y.to(device, non_blocking=True) + y_mel = y_mel.to(device, non_blocking=True) + y = y.unsqueeze(1) + + y_g_hat = generator(x) + y_g_hat_mel = mel_spectrogram( + y_g_hat.squeeze(1), + h.n_fft, + h.num_mels, + h.sampling_rate, + h.hop_size, + h.win_size, + h.fmin, + h.fmax_for_loss, + ) + + optim_d.zero_grad() + + # MPD + y_df_hat_r, y_df_hat_g, _, _ = mpd(y, y_g_hat.detach()) + loss_disc_f, losses_disc_f_r, losses_disc_f_g = discriminator_loss(y_df_hat_r, y_df_hat_g) + + # MRD + y_ds_hat_r, y_ds_hat_g, _, _ = mrd(y, y_g_hat.detach()) + loss_disc_s, losses_disc_s_r, losses_disc_s_g = discriminator_loss(y_ds_hat_r, y_ds_hat_g) + + loss_disc_all = loss_disc_s + loss_disc_f + + # Set clip_grad_norm value + clip_grad_norm = h.get("clip_grad_norm", 1000.0) # Default to 1000 + + # Whether to freeze D for initial training steps + if steps >= a.freeze_step: + loss_disc_all.backward() + grad_norm_mpd = torch.nn.utils.clip_grad_norm_(mpd.parameters(), clip_grad_norm) + grad_norm_mrd = torch.nn.utils.clip_grad_norm_(mrd.parameters(), clip_grad_norm) + optim_d.step() + else: + print(f"[WARNING] skipping D training for the first {a.freeze_step} steps") + grad_norm_mpd = 0.0 + grad_norm_mrd = 0.0 + + # Generator + optim_g.zero_grad() + + # L1 Mel-Spectrogram Loss + lambda_melloss = h.get("lambda_melloss", 45.0) # Defaults to 45 in BigVGAN-v1 if not set + if h.get("use_multiscale_melloss", False): # uses wav for loss + loss_mel = fn_mel_loss_multiscale(y, y_g_hat) * lambda_melloss + else: # Uses mel for loss + loss_mel = fn_mel_loss_singlescale(y_mel, y_g_hat_mel) * lambda_melloss + + # MPD loss + y_df_hat_r, y_df_hat_g, fmap_f_r, fmap_f_g = mpd(y, y_g_hat) + loss_fm_f = feature_loss(fmap_f_r, fmap_f_g) + loss_gen_f, losses_gen_f = generator_loss(y_df_hat_g) + + # MRD loss + y_ds_hat_r, y_ds_hat_g, fmap_s_r, fmap_s_g = mrd(y, y_g_hat) + loss_fm_s = feature_loss(fmap_s_r, fmap_s_g) + loss_gen_s, losses_gen_s = generator_loss(y_ds_hat_g) + + if steps >= a.freeze_step: + loss_gen_all = loss_gen_s + loss_gen_f + loss_fm_s + loss_fm_f + loss_mel + else: + print(f"[WARNING] using regression loss only for G for the first {a.freeze_step} steps") + loss_gen_all = loss_mel + + loss_gen_all.backward() + grad_norm_g = torch.nn.utils.clip_grad_norm_(generator.parameters(), clip_grad_norm) + optim_g.step() + + if rank == 0: + # STDOUT logging + if steps % a.stdout_interval == 0: + mel_error = loss_mel.item() / lambda_melloss # Log training mel regression loss to stdout + print( + f"Steps: {steps:d}, " + f"Gen Loss Total: {loss_gen_all:4.3f}, " + f"Mel Error: {mel_error:4.3f}, " + f"s/b: {time.time() - start_b:4.3f} " + f"lr: {optim_g.param_groups[0]['lr']:4.7f} " + f"grad_norm_g: {grad_norm_g:4.3f}" + ) + + # Checkpointing + if steps % a.checkpoint_interval == 0 and steps != 0: + checkpoint_path = f"{a.checkpoint_path}/g_{steps:08d}" + save_checkpoint( + checkpoint_path, + {"generator": (generator.module if h.num_gpus > 1 else generator).state_dict()}, + ) + checkpoint_path = f"{a.checkpoint_path}/do_{steps:08d}" + save_checkpoint( + checkpoint_path, + { + "mpd": (mpd.module if h.num_gpus > 1 else mpd).state_dict(), + "mrd": (mrd.module if h.num_gpus > 1 else mrd).state_dict(), + "optim_g": optim_g.state_dict(), + "optim_d": optim_d.state_dict(), + "steps": steps, + "epoch": epoch, + }, + ) + + # Tensorboard summary logging + if steps % a.summary_interval == 0: + mel_error = loss_mel.item() / lambda_melloss # Log training mel regression loss to tensorboard + sw.add_scalar("training/gen_loss_total", loss_gen_all.item(), steps) + sw.add_scalar("training/mel_spec_error", mel_error, steps) + sw.add_scalar("training/fm_loss_mpd", loss_fm_f.item(), steps) + sw.add_scalar("training/gen_loss_mpd", loss_gen_f.item(), steps) + sw.add_scalar("training/disc_loss_mpd", loss_disc_f.item(), steps) + sw.add_scalar("training/grad_norm_mpd", grad_norm_mpd, steps) + sw.add_scalar("training/fm_loss_mrd", loss_fm_s.item(), steps) + sw.add_scalar("training/gen_loss_mrd", loss_gen_s.item(), steps) + sw.add_scalar("training/disc_loss_mrd", loss_disc_s.item(), steps) + sw.add_scalar("training/grad_norm_mrd", grad_norm_mrd, steps) + sw.add_scalar("training/grad_norm_g", grad_norm_g, steps) + sw.add_scalar("training/learning_rate_d", scheduler_d.get_last_lr()[0], steps) + sw.add_scalar("training/learning_rate_g", scheduler_g.get_last_lr()[0], steps) + sw.add_scalar("training/epoch", epoch + 1, steps) + + # Validation + if steps % a.validation_interval == 0: + # Plot training input x so far used + for i_x in range(x.shape[0]): + sw.add_figure( + f"training_input/x_{i_x}", + plot_spectrogram(x[i_x].cpu()), + steps, + ) + sw.add_audio( + f"training_input/y_{i_x}", + y[i_x][0], + steps, + h.sampling_rate, + ) + + # Seen and unseen speakers validation loops + if not a.debug and steps != 0: + validate( + rank, + a, + h, + validation_loader, + mode=f"seen_{train_loader.dataset.name}", + ) + for i in range(len(list_unseen_validation_loader)): + validate( + rank, + a, + h, + list_unseen_validation_loader[i], + mode=f"unseen_{list_unseen_validation_loader[i].dataset.name}", + ) + steps += 1 + + # BigVGAN-v2 learning rate scheduler is changed from epoch-level to step-level + scheduler_g.step() + scheduler_d.step() + + if rank == 0: + print(f"Time taken for epoch {epoch + 1} is {int(time.time() - start)} sec\n") + + +def main(): + print("Initializing Training Process..") + + parser = argparse.ArgumentParser() + + parser.add_argument("--group_name", default=None) + + parser.add_argument("--input_wavs_dir", default="LibriTTS") + parser.add_argument("--input_mels_dir", default="ft_dataset") + parser.add_argument("--input_training_file", default="tests/LibriTTS/train-full.txt") + parser.add_argument("--input_validation_file", default="tests/LibriTTS/val-full.txt") + + parser.add_argument( + "--list_input_unseen_wavs_dir", + nargs="+", + default=["tests/LibriTTS", "tests/LibriTTS"], + ) + parser.add_argument( + "--list_input_unseen_validation_file", + nargs="+", + default=["tests/LibriTTS/dev-clean.txt", "tests/LibriTTS/dev-other.txt"], + ) + + parser.add_argument("--checkpoint_path", default="exp/bigvgan") + parser.add_argument("--config", default="") + + parser.add_argument("--training_epochs", default=100000, type=int) + parser.add_argument("--stdout_interval", default=5, type=int) + parser.add_argument("--checkpoint_interval", default=50000, type=int) + parser.add_argument("--summary_interval", default=100, type=int) + parser.add_argument("--validation_interval", default=50000, type=int) + + parser.add_argument( + "--freeze_step", + default=0, + type=int, + help="freeze D for the first specified steps. G only uses regression loss for these steps.", + ) + + parser.add_argument("--fine_tuning", default=False, type=bool) + + parser.add_argument( + "--debug", + default=False, + type=bool, + help="debug mode. skips validation loop throughout training", + ) + parser.add_argument( + "--evaluate", + default=False, + type=bool, + help="only run evaluation from checkpoint and exit", + ) + parser.add_argument( + "--eval_subsample", + default=5, + type=int, + help="subsampling during evaluation loop", + ) + parser.add_argument( + "--skip_seen", + default=False, + type=bool, + help="skip seen dataset. useful for test set inference", + ) + parser.add_argument( + "--save_audio", + default=False, + type=bool, + help="save audio of test set inference to disk", + ) + + a = parser.parse_args() + + with open(a.config) as f: + data = f.read() + + json_config = json.loads(data) + h = AttrDict(json_config) + + build_env(a.config, "config.json", a.checkpoint_path) + + torch.manual_seed(h.seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed(h.seed) + h.num_gpus = torch.cuda.device_count() + h.batch_size = int(h.batch_size / h.num_gpus) + print(f"Batch size per GPU: {h.batch_size}") + else: + pass + + if h.num_gpus > 1: + mp.spawn( + train, + nprocs=h.num_gpus, + args=( + a, + h, + ), + ) + else: + train(0, a, h) + + +if __name__ == "__main__": + main() diff --git a/GPT_SoVITS/BigVGAN/utils0.py b/GPT_SoVITS/BigVGAN/utils0.py new file mode 100644 index 0000000000000000000000000000000000000000..da98a24cf1447778305563f8e909f30b06e06b26 --- /dev/null +++ b/GPT_SoVITS/BigVGAN/utils0.py @@ -0,0 +1,99 @@ +# Adapted from https://github.com/jik876/hifi-gan under the MIT license. +# LICENSE is in incl_licenses directory. + +import glob +import os +import matplotlib +import torch +from torch.nn.utils import weight_norm + +matplotlib.use("Agg") +import matplotlib.pylab as plt +from .meldataset import MAX_WAV_VALUE +from scipy.io.wavfile import write + + +def plot_spectrogram(spectrogram): + fig, ax = plt.subplots(figsize=(10, 2)) + im = ax.imshow(spectrogram, aspect="auto", origin="lower", interpolation="none") + plt.colorbar(im, ax=ax) + + fig.canvas.draw() + plt.close() + + return fig + + +def plot_spectrogram_clipped(spectrogram, clip_max=2.0): + fig, ax = plt.subplots(figsize=(10, 2)) + im = ax.imshow( + spectrogram, + aspect="auto", + origin="lower", + interpolation="none", + vmin=1e-6, + vmax=clip_max, + ) + plt.colorbar(im, ax=ax) + + fig.canvas.draw() + plt.close() + + return fig + + +def init_weights(m, mean=0.0, std=0.01): + classname = m.__class__.__name__ + if classname.find("Conv") != -1: + m.weight.data.normal_(mean, std) + + +def apply_weight_norm(m): + classname = m.__class__.__name__ + if classname.find("Conv") != -1: + weight_norm(m) + + +def get_padding(kernel_size, dilation=1): + return int((kernel_size * dilation - dilation) / 2) + + +def load_checkpoint(filepath, device): + assert os.path.isfile(filepath) + print(f"Loading '{filepath}'") + checkpoint_dict = torch.load(filepath, map_location=device) + print("Complete.") + return checkpoint_dict + + +def save_checkpoint(filepath, obj): + print(f"Saving checkpoint to {filepath}") + torch.save(obj, filepath) + print("Complete.") + + +def scan_checkpoint(cp_dir, prefix, renamed_file=None): + # Fallback to original scanning logic first + pattern = os.path.join(cp_dir, prefix + "????????") + cp_list = glob.glob(pattern) + + if len(cp_list) > 0: + last_checkpoint_path = sorted(cp_list)[-1] + print(f"[INFO] Resuming from checkpoint: '{last_checkpoint_path}'") + return last_checkpoint_path + + # If no pattern-based checkpoints are found, check for renamed file + if renamed_file: + renamed_path = os.path.join(cp_dir, renamed_file) + if os.path.isfile(renamed_path): + print(f"[INFO] Resuming from renamed checkpoint: '{renamed_file}'") + return renamed_path + + return None + + +def save_audio(audio, path, sr): + # wav: torch with 1d shape + audio = audio * MAX_WAV_VALUE + audio = audio.cpu().numpy().astype("int16") + write(path, sr, audio) diff --git a/GPT_SoVITS/configs/.gitignore b/GPT_SoVITS/configs/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..2a6160516e607039d1737c43421a105e0860bd6c --- /dev/null +++ b/GPT_SoVITS/configs/.gitignore @@ -0,0 +1 @@ +*.yaml \ No newline at end of file diff --git a/GPT_SoVITS/configs/s1.yaml b/GPT_SoVITS/configs/s1.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f8ae17d4415cbbacca2d30e1403239ecc554e98b --- /dev/null +++ b/GPT_SoVITS/configs/s1.yaml @@ -0,0 +1,31 @@ +train: + seed: 1234 + epochs: 300 + batch_size: 8 + gradient_accumulation: 4 + save_every_n_epoch: 1 + precision: 16 + gradient_clip: 1.0 +optimizer: + lr: 0.01 + lr_init: 0.00001 + lr_end: 0.0001 + warmup_steps: 2000 + decay_steps: 40000 +data: + max_eval_sample: 8 + max_sec: 54 + num_workers: 1 + pad_val: 1024 # same with EOS in model +model: + vocab_size: 1025 + phoneme_vocab_size: 512 + embedding_dim: 512 + hidden_dim: 512 + head: 16 + linear_units: 2048 + n_layer: 12 + dropout: 0 + EOS: 1024 +inference: + top_k: 5 diff --git a/GPT_SoVITS/configs/s1big.yaml b/GPT_SoVITS/configs/s1big.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a811150de4c418be57b3bb932a7fe12cc7b6f679 --- /dev/null +++ b/GPT_SoVITS/configs/s1big.yaml @@ -0,0 +1,31 @@ +train: + seed: 1234 + epochs: 300 + batch_size: 8 + gradient_accumulation: 4 + save_every_n_epoch: 1 + precision: 16-mixed + gradient_clip: 1.0 +optimizer: + lr: 0.01 + lr_init: 0.00001 + lr_end: 0.0001 + warmup_steps: 2000 + decay_steps: 40000 +data: + max_eval_sample: 8 + max_sec: 54 + num_workers: 1 + pad_val: 1024 # same with EOS in model +model: + vocab_size: 1025 + phoneme_vocab_size: 512 + embedding_dim: 1024 + hidden_dim: 1024 + head: 16 + linear_units: 2048 + n_layer: 16 + dropout: 0 + EOS: 1024 +inference: + top_k: 5 diff --git a/GPT_SoVITS/configs/s1big2.yaml b/GPT_SoVITS/configs/s1big2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b8b889babbc43ada8c17cee52db68fd484cc0c97 --- /dev/null +++ b/GPT_SoVITS/configs/s1big2.yaml @@ -0,0 +1,31 @@ +train: + seed: 1234 + epochs: 300 + batch_size: 12 + gradient_accumulation: 4 + save_every_n_epoch: 1 + precision: 16-mixed + gradient_clip: 1.0 +optimizer: + lr: 0.01 + lr_init: 0.00001 + lr_end: 0.0001 + warmup_steps: 2000 + decay_steps: 40000 +data: + max_eval_sample: 8 + max_sec: 54 + num_workers: 1 + pad_val: 1024 # same with EOS in model +model: + vocab_size: 1025 + phoneme_vocab_size: 512 + embedding_dim: 1024 + hidden_dim: 1024 + head: 16 + linear_units: 2048 + n_layer: 6 + dropout: 0 + EOS: 1024 +inference: + top_k: 5 diff --git a/GPT_SoVITS/configs/s1longer.yaml b/GPT_SoVITS/configs/s1longer.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3f57abd21a85a36107d247488e988797f0f8d484 --- /dev/null +++ b/GPT_SoVITS/configs/s1longer.yaml @@ -0,0 +1,31 @@ +train: + seed: 1234 + epochs: 20 + batch_size: 8 + save_every_n_epoch: 1 + precision: 16-mixed + gradient_clip: 1.0 +optimizer: + lr: 0.01 + lr_init: 0.00001 + lr_end: 0.0001 + warmup_steps: 2000 + decay_steps: 40000 +data: + max_eval_sample: 8 + max_sec: 54 + num_workers: 4 + pad_val: 1024 # same with EOS in model +model: + vocab_size: 1025 + phoneme_vocab_size: 512 + embedding_dim: 512 + hidden_dim: 512 + head: 16 + linear_units: 2048 + n_layer: 24 + dropout: 0 + EOS: 1024 + random_bert: 0 +inference: + top_k: 5 diff --git a/GPT_SoVITS/configs/s1mq.yaml b/GPT_SoVITS/configs/s1mq.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b554fd34f88faca8bc71eb3eaf6b1bdf301a326b --- /dev/null +++ b/GPT_SoVITS/configs/s1mq.yaml @@ -0,0 +1,77 @@ +train: + seed: 1234 + epochs: 100 + batch_size: 6 + gradient_accumulation: 4 + save_every_n_epoch: 1 + precision: 32 + gradient_clip: 1.0 +optimizer: + lr: 0.01 + lr_init: 0.00001 + lr_end: 0.0001 + warmup_steps: 2000 + decay_steps: 40000 +data: + max_eval_sample: 8 + max_sec: 40 + num_workers: 1 + pad_val: 1024 # same with EOS in model +model: + saving_path: "ckpt/" + resume_checkpoint: null + vocoder_config_path: "quantizer/new_ckpt/config.json" + vocoder_ckpt_path: "quantizer/new_ckpt/g_00600000" + datadir: "/home/liweiche/GigaSpeech/wavs" + metapath: "/home/liweiche/GigaSpeech/train2.json" + val_metapath: "/home/liweiche/GigaSpeech/dev2.json" + sampledir: "logs/" + pretrained_path: null + lr: 0.0001 + batch_size: 200.0 + train_bucket_size: 8192 + training_step: 800000 + optim_flat_percent: 0.0 + warmup_step: 50 + adam_beta1: 0.9 + adam_beta2: 0.98 + ffd_size: 3072 + hidden_size: 768 + enc_nlayers: 6 + dec_nlayers: 6 + nheads: 12 + ar_layer: 4 + ar_ffd_size: 1024 + ar_hidden_size: 256 + ar_nheads: 4 + aligner_softmax_temp: 1.0 + layer_norm_eps: 0.00001 + speaker_embed_dropout: 0.05 + label_smoothing: 0.0 + val_check_interval: 5000 + check_val_every_n_epoch: 1 + precision: "fp16" + nworkers: 16 + distributed: true + accelerator: "ddp" + version: null + accumulate_grad_batches: 1 + use_repetition_token: true + use_repetition_gating: false + repetition_penalty: 1.0 + sampling_temperature: 1.0 + top_k: -1 + min_top_k: 3 + top_p: 0.8 + sample_num: 4 + length_penalty_max_length: 15000 + length_penalty_max_prob: 0.95 + max_input_length: 2048 + max_output_length: 2000 + sample_rate: 16000 + n_codes: 1024 + n_cluster_groups: 1 + phone_context_window: 4 + phoneset_size: 1000 +inference: + top_k: 5 diff --git a/GPT_SoVITS/configs/s2.json b/GPT_SoVITS/configs/s2.json new file mode 100644 index 0000000000000000000000000000000000000000..0bd672263e923ce21a0edb3ebdecbc8235c1291f --- /dev/null +++ b/GPT_SoVITS/configs/s2.json @@ -0,0 +1,91 @@ +{ + "train": { + "log_interval": 100, + "eval_interval": 500, + "seed": 1234, + "epochs": 100, + "learning_rate": 0.0001, + "betas": [ + 0.8, + 0.99 + ], + "eps": 1e-09, + "batch_size": 32, + "fp16_run": true, + "lr_decay": 0.999875, + "segment_size": 20480, + "init_lr_ratio": 1, + "warmup_epochs": 0, + "c_mel": 45, + "c_kl": 1.0, + "text_low_lr_rate": 0.4, + "grad_ckpt": false + }, + "data": { + "max_wav_value": 32768.0, + "sampling_rate": 32000, + "filter_length": 2048, + "hop_length": 640, + "win_length": 2048, + "n_mel_channels": 128, + "mel_fmin": 0.0, + "mel_fmax": null, + "add_blank": true, + "n_speakers": 300, + "cleaned_text": true + }, + "model": { + "inter_channels": 192, + "hidden_channels": 192, + "filter_channels": 768, + "n_heads": 2, + "n_layers": 6, + "kernel_size": 3, + "p_dropout": 0.1, + "resblock": "1", + "resblock_kernel_sizes": [ + 3, + 7, + 11 + ], + "resblock_dilation_sizes": [ + [ + 1, + 3, + 5 + ], + [ + 1, + 3, + 5 + ], + [ + 1, + 3, + 5 + ] + ], + "upsample_rates": [ + 10, + 8, + 2, + 2, + 2 + ], + "upsample_initial_channel": 512, + "upsample_kernel_sizes": [ + 16, + 16, + 8, + 2, + 2 + ], + "n_layers_q": 3, + "use_spectral_norm": false, + "gin_channels": 512, + "semantic_frame_rate": "25hz", + "freeze_quantizer": true + }, + "s2_ckpt_dir": "logs/s2/big2k1", + "content_module": "cnhubert" +} \ No newline at end of file diff --git a/GPT_SoVITS/configs/train.yaml b/GPT_SoVITS/configs/train.yaml new file mode 100644 index 0000000000000000000000000000000000000000..be5333571980807b7d8f788165035c02cb0d176c --- /dev/null +++ b/GPT_SoVITS/configs/train.yaml @@ -0,0 +1,32 @@ +gpu: + n_card: 1 + n_process_per_card: 2 +io: + text_path: D:\RVC1006\GPT-SoVITS\GPT_SoVITS + save_every_n_epoch: 1 + precision: 16-mixed + gradient_clip: 1.0 +optimizer: + lr: 0.01 + lr_init: 0.00001 + lr_end: 0.0001 + warmup_steps: 2000 + decay_steps: 40000 +data: + max_eval_sample: 8 + max_sec: 54 + num_workers: 1 + pad_val: 1024 # same with EOS in model +model: + vocab_size: 1025 + phoneme_vocab_size: 512 + embedding_dim: 512 + hidden_dim: 512 + head: 16 + linear_units: 2048 + n_layer: 24 + dropout: 0 + EOS: 1024 + random_bert: 0 +inference: + top_k: 5 diff --git a/GPT_SoVITS/configs/tts_infer.yaml b/GPT_SoVITS/configs/tts_infer.yaml new file mode 100644 index 0000000000000000000000000000000000000000..20c41a20ae26a746d37e079a39e29d7af6b4513c --- /dev/null +++ b/GPT_SoVITS/configs/tts_infer.yaml @@ -0,0 +1,40 @@ +custom: + bert_base_path: GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large + cnhuhbert_base_path: GPT_SoVITS/pretrained_models/chinese-hubert-base + device: cuda + is_half: true + t2s_weights_path: GPT_SoVITS/pretrained_models/gsv-v2final-pretrained/s1bert25hz-5kh-longer-epoch=12-step=369668.ckpt + version: v2 + vits_weights_path: GPT_SoVITS/pretrained_models/gsv-v2final-pretrained/s2G2333k.pth +v1: + bert_base_path: GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large + cnhuhbert_base_path: GPT_SoVITS/pretrained_models/chinese-hubert-base + device: cpu + is_half: false + t2s_weights_path: GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt + version: v1 + vits_weights_path: GPT_SoVITS/pretrained_models/s2G488k.pth +v2: + bert_base_path: GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large + cnhuhbert_base_path: GPT_SoVITS/pretrained_models/chinese-hubert-base + device: cpu + is_half: false + t2s_weights_path: GPT_SoVITS/pretrained_models/gsv-v2final-pretrained/s1bert25hz-5kh-longer-epoch=12-step=369668.ckpt + version: v2 + vits_weights_path: GPT_SoVITS/pretrained_models/gsv-v2final-pretrained/s2G2333k.pth +v3: + bert_base_path: GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large + cnhuhbert_base_path: GPT_SoVITS/pretrained_models/chinese-hubert-base + device: cpu + is_half: false + t2s_weights_path: GPT_SoVITS/pretrained_models/s1v3.ckpt + version: v3 + vits_weights_path: GPT_SoVITS/pretrained_models/s2Gv3.pth +v4: + bert_base_path: GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large + cnhuhbert_base_path: GPT_SoVITS/pretrained_models/chinese-hubert-base + device: cpu + is_half: false + t2s_weights_path: GPT_SoVITS/pretrained_models/s1v3.ckpt + version: v4 + vits_weights_path: GPT_SoVITS/pretrained_models/gsv-v4-pretrained/s2Gv4.pth diff --git a/tools/__init__.py b/tools/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tools/audio_sr.py b/tools/audio_sr.py new file mode 100644 index 0000000000000000000000000000000000000000..58df6d201d2a044c246bc7fe69559f508cd95dfb --- /dev/null +++ b/tools/audio_sr.py @@ -0,0 +1,50 @@ +from __future__ import absolute_import, division, print_function, unicode_literals +import sys +import os + +AP_BWE_main_dir_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "AP_BWE_main") +sys.path.append(AP_BWE_main_dir_path) +import json +import torch +import torchaudio.functional as aF +# from attrdict import AttrDict####will be bug in py3.10 + +from datasets1.dataset import amp_pha_stft, amp_pha_istft +from models.model import APNet_BWE_Model + + +class AP_BWE: + def __init__(self, device, DictToAttrRecursive, checkpoint_file=None): + if checkpoint_file == None: + checkpoint_file = "%s/24kto48k/g_24kto48k.zip" % (AP_BWE_main_dir_path) + if os.path.exists(checkpoint_file) == False: + raise FileNotFoundError + config_file = os.path.join(os.path.split(checkpoint_file)[0], "config.json") + with open(config_file) as f: + data = f.read() + json_config = json.loads(data) + # h = AttrDict(json_config) + h = DictToAttrRecursive(json_config) + model = APNet_BWE_Model(h).to(device) + state_dict = torch.load(checkpoint_file, map_location="cpu", weights_only=False) + model.load_state_dict(state_dict["generator"]) + model.eval() + self.device = device + self.model = model + self.h = h + + def to(self, *arg, **kwargs): + self.model.to(*arg, **kwargs) + self.device = self.model.conv_pre_mag.weight.device + return self + + def __call__(self, audio, orig_sampling_rate): + with torch.no_grad(): + # audio, orig_sampling_rate = torchaudio.load(inp_path) + # audio = audio.to(self.device) + audio = aF.resample(audio, orig_freq=orig_sampling_rate, new_freq=self.h.hr_sampling_rate) + amp_nb, pha_nb, com_nb = amp_pha_stft(audio, self.h.n_fft, self.h.hop_size, self.h.win_size) + amp_wb_g, pha_wb_g, com_wb_g = self.model(amp_nb, pha_nb) + audio_hr_g = amp_pha_istft(amp_wb_g, pha_wb_g, self.h.n_fft, self.h.hop_size, self.h.win_size) + # sf.write(opt_path, audio_hr_g.squeeze().cpu().numpy(), self.h.hr_sampling_rate, 'PCM_16') + return audio_hr_g.squeeze().cpu().numpy(), self.h.hr_sampling_rate diff --git a/tools/cmd-denoise.py b/tools/cmd-denoise.py new file mode 100644 index 0000000000000000000000000000000000000000..bbf68476405be4e9fa2152095e3ed737a0142844 --- /dev/null +++ b/tools/cmd-denoise.py @@ -0,0 +1,38 @@ +import os +import argparse +import traceback + +from modelscope.pipelines import pipeline +from modelscope.utils.constant import Tasks +from tqdm import tqdm + +path_denoise = "tools/denoise-model/speech_frcrn_ans_cirm_16k" +path_denoise = path_denoise if os.path.exists(path_denoise) else "damo/speech_frcrn_ans_cirm_16k" +ans = pipeline(Tasks.acoustic_noise_suppression, model=path_denoise) + + +def execute_denoise(input_folder, output_folder): + os.makedirs(output_folder, exist_ok=True) + # print(input_folder) + # print(list(os.listdir(input_folder).sort())) + for name in tqdm(os.listdir(input_folder)): + try: + ans("%s/%s" % (input_folder, name), output_path="%s/%s" % (output_folder, name)) + except: + traceback.print_exc() + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "-i", "--input_folder", type=str, required=True, help="Path to the folder containing WAV files." + ) + parser.add_argument("-o", "--output_folder", type=str, required=True, help="Output folder to store transcriptions.") + parser.add_argument( + "-p", "--precision", type=str, default="float16", choices=["float16", "float32"], help="fp16 or fp32" + ) # 还没接入 + cmd = parser.parse_args() + execute_denoise( + input_folder=cmd.input_folder, + output_folder=cmd.output_folder, + ) diff --git a/tools/my_utils.py b/tools/my_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..44d326e1ea1798968a0badf3f688ab20817cb78e --- /dev/null +++ b/tools/my_utils.py @@ -0,0 +1,129 @@ +import os +import traceback +import ffmpeg +import numpy as np +import gradio as gr +from tools.i18n.i18n import I18nAuto +import pandas as pd + +i18n = I18nAuto(language=os.environ.get("language", "Auto")) + + +def load_audio(file, sr): + try: + # https://github.com/openai/whisper/blob/main/whisper/audio.py#L26 + # This launches a subprocess to decode audio while down-mixing and resampling as necessary. + # Requires the ffmpeg CLI and `ffmpeg-python` package to be installed. + file = clean_path(file) # 防止小白拷路径头尾带了空格和"和回车 + if os.path.exists(file) == False: + raise RuntimeError("You input a wrong audio path that does not exists, please fix it!") + out, _ = ( + ffmpeg.input(file, threads=0) + .output("-", format="f32le", acodec="pcm_f32le", ac=1, ar=sr) + .run(cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True) + ) + except Exception: + traceback.print_exc() + raise RuntimeError(i18n("音频加载失败")) + + return np.frombuffer(out, np.float32).flatten() + + +def clean_path(path_str: str): + if path_str.endswith(("\\", "/")): + return clean_path(path_str[0:-1]) + path_str = path_str.replace("/", os.sep).replace("\\", os.sep) + return path_str.strip( + " '\n\"\u202a" + ) # path_str.strip(" ").strip('\'').strip("\n").strip('"').strip(" ").strip("\u202a") + + +def check_for_existance(file_list: list = None, is_train=False, is_dataset_processing=False): + files_status = [] + if is_train == True and file_list: + file_list.append(os.path.join(file_list[0], "2-name2text.txt")) + file_list.append(os.path.join(file_list[0], "3-bert")) + file_list.append(os.path.join(file_list[0], "4-cnhubert")) + file_list.append(os.path.join(file_list[0], "5-wav32k")) + file_list.append(os.path.join(file_list[0], "6-name2semantic.tsv")) + for file in file_list: + if os.path.exists(file): + files_status.append(True) + else: + files_status.append(False) + if sum(files_status) != len(files_status): + if is_train: + for file, status in zip(file_list, files_status): + if status: + pass + else: + gr.Warning(file) + gr.Warning(i18n("以下文件或文件夹不存在")) + return False + elif is_dataset_processing: + if files_status[0]: + return True + elif not files_status[0]: + gr.Warning(file_list[0]) + elif not files_status[1] and file_list[1]: + gr.Warning(file_list[1]) + gr.Warning(i18n("以下文件或文件夹不存在")) + return False + else: + if file_list[0]: + gr.Warning(file_list[0]) + gr.Warning(i18n("以下文件或文件夹不存在")) + else: + gr.Warning(i18n("路径不能为空")) + return False + return True + + +def check_details(path_list=None, is_train=False, is_dataset_processing=False): + if is_dataset_processing: + list_path, audio_path = path_list + if not list_path.endswith(".list"): + gr.Warning(i18n("请填入正确的List路径")) + return + if audio_path: + if not os.path.isdir(audio_path): + gr.Warning(i18n("请填入正确的音频文件夹路径")) + return + with open(list_path, "r", encoding="utf8") as f: + line = f.readline().strip("\n").split("\n") + wav_name, _, __, ___ = line[0].split("|") + wav_name = clean_path(wav_name) + if audio_path != "" and audio_path != None: + wav_name = os.path.basename(wav_name) + wav_path = "%s/%s" % (audio_path, wav_name) + else: + wav_path = wav_name + if os.path.exists(wav_path): + ... + else: + gr.Warning(i18n("路径错误")) + return + if is_train: + path_list.append(os.path.join(path_list[0], "2-name2text.txt")) + path_list.append(os.path.join(path_list[0], "4-cnhubert")) + path_list.append(os.path.join(path_list[0], "5-wav32k")) + path_list.append(os.path.join(path_list[0], "6-name2semantic.tsv")) + phone_path, hubert_path, wav_path, semantic_path = path_list[1:] + with open(phone_path, "r", encoding="utf-8") as f: + if f.read(1): + ... + else: + gr.Warning(i18n("缺少音素数据集")) + if os.listdir(hubert_path): + ... + else: + gr.Warning(i18n("缺少Hubert数据集")) + if os.listdir(wav_path): + ... + else: + gr.Warning(i18n("缺少音频数据集")) + df = pd.read_csv(semantic_path, delimiter="\t", encoding="utf-8") + if len(df) >= 1: + ... + else: + gr.Warning(i18n("缺少语义数据集")) diff --git a/tools/slice_audio.py b/tools/slice_audio.py new file mode 100644 index 0000000000000000000000000000000000000000..66fafa93dccd9195808a1f94ae62000466612695 --- /dev/null +++ b/tools/slice_audio.py @@ -0,0 +1,53 @@ +import os +import sys +import numpy as np +import traceback +from scipy.io import wavfile + +# parent_directory = os.path.dirname(os.path.abspath(__file__)) +# sys.path.append(parent_directory) +from tools.my_utils import load_audio +from slicer2 import Slicer + + +def slice(inp, opt_root, threshold, min_length, min_interval, hop_size, max_sil_kept, _max, alpha, i_part, all_part): + os.makedirs(opt_root, exist_ok=True) + if os.path.isfile(inp): + input = [inp] + elif os.path.isdir(inp): + input = [os.path.join(inp, name) for name in sorted(list(os.listdir(inp)))] + else: + return "输入路径存在但既不是文件也不是文件夹" + slicer = Slicer( + sr=32000, # 长音频采样率 + threshold=int(threshold), # 音量小于这个值视作静音的备选切割点 + min_length=int(min_length), # 每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值 + min_interval=int(min_interval), # 最短切割间隔 + hop_size=int(hop_size), # 怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好) + max_sil_kept=int(max_sil_kept), # 切完后静音最多留多长 + ) + _max = float(_max) + alpha = float(alpha) + for inp_path in input[int(i_part) :: int(all_part)]: + # print(inp_path) + try: + name = os.path.basename(inp_path) + audio = load_audio(inp_path, 32000) + # print(audio.shape) + for chunk, start, end in slicer.slice(audio): # start和end是帧数 + tmp_max = np.abs(chunk).max() + if tmp_max > 1: + chunk /= tmp_max + chunk = (chunk / tmp_max * (_max * alpha)) + (1 - alpha) * chunk + wavfile.write( + "%s/%s_%010d_%010d.wav" % (opt_root, name, start, end), + 32000, + # chunk.astype(np.float32), + (chunk * 32767).astype(np.int16), + ) + except: + print(inp_path, "->fail->", traceback.format_exc()) + return "执行完毕,请检查输出文件" + + +print(slice(*sys.argv[1:])) diff --git a/tools/slicer2.py b/tools/slicer2.py new file mode 100644 index 0000000000000000000000000000000000000000..8d80f1b4ca442e4645b5d79ddfa4f73b23f911b9 --- /dev/null +++ b/tools/slicer2.py @@ -0,0 +1,230 @@ +import numpy as np + + +# This function is obtained from librosa. +def get_rms( + y, + frame_length=2048, + hop_length=512, + pad_mode="constant", +): + padding = (int(frame_length // 2), int(frame_length // 2)) + y = np.pad(y, padding, mode=pad_mode) + + axis = -1 + # put our new within-frame axis at the end for now + out_strides = y.strides + tuple([y.strides[axis]]) + # Reduce the shape on the framing axis + x_shape_trimmed = list(y.shape) + x_shape_trimmed[axis] -= frame_length - 1 + out_shape = tuple(x_shape_trimmed) + tuple([frame_length]) + xw = np.lib.stride_tricks.as_strided(y, shape=out_shape, strides=out_strides) + if axis < 0: + target_axis = axis - 1 + else: + target_axis = axis + 1 + xw = np.moveaxis(xw, -1, target_axis) + # Downsample along the target axis + slices = [slice(None)] * xw.ndim + slices[axis] = slice(0, None, hop_length) + x = xw[tuple(slices)] + + # Calculate power + power = np.mean(np.abs(x) ** 2, axis=-2, keepdims=True) + + return np.sqrt(power) + + +class Slicer: + def __init__( + self, + sr: int, + threshold: float = -40.0, + min_length: int = 5000, + min_interval: int = 300, + hop_size: int = 20, + max_sil_kept: int = 5000, + ): + if not min_length >= min_interval >= hop_size: + raise ValueError("The following condition must be satisfied: min_length >= min_interval >= hop_size") + if not max_sil_kept >= hop_size: + raise ValueError("The following condition must be satisfied: max_sil_kept >= hop_size") + min_interval = sr * min_interval / 1000 + self.threshold = 10 ** (threshold / 20.0) + self.hop_size = round(sr * hop_size / 1000) + self.win_size = min(round(min_interval), 4 * self.hop_size) + self.min_length = round(sr * min_length / 1000 / self.hop_size) + self.min_interval = round(min_interval / self.hop_size) + self.max_sil_kept = round(sr * max_sil_kept / 1000 / self.hop_size) + + def _apply_slice(self, waveform, begin, end): + if len(waveform.shape) > 1: + return waveform[:, begin * self.hop_size : min(waveform.shape[1], end * self.hop_size)] + else: + return waveform[begin * self.hop_size : min(waveform.shape[0], end * self.hop_size)] + + # @timeit + def slice(self, waveform): + if len(waveform.shape) > 1: + samples = waveform.mean(axis=0) + else: + samples = waveform + if samples.shape[0] <= self.min_length: + return [waveform] + rms_list = get_rms(y=samples, frame_length=self.win_size, hop_length=self.hop_size).squeeze(0) + sil_tags = [] + silence_start = None + clip_start = 0 + for i, rms in enumerate(rms_list): + # Keep looping while frame is silent. + if rms < self.threshold: + # Record start of silent frames. + if silence_start is None: + silence_start = i + continue + # Keep looping while frame is not silent and silence start has not been recorded. + if silence_start is None: + continue + # Clear recorded silence start if interval is not enough or clip is too short + is_leading_silence = silence_start == 0 and i > self.max_sil_kept + need_slice_middle = i - silence_start >= self.min_interval and i - clip_start >= self.min_length + if not is_leading_silence and not need_slice_middle: + silence_start = None + continue + # Need slicing. Record the range of silent frames to be removed. + if i - silence_start <= self.max_sil_kept: + pos = rms_list[silence_start : i + 1].argmin() + silence_start + if silence_start == 0: + sil_tags.append((0, pos)) + else: + sil_tags.append((pos, pos)) + clip_start = pos + elif i - silence_start <= self.max_sil_kept * 2: + pos = rms_list[i - self.max_sil_kept : silence_start + self.max_sil_kept + 1].argmin() + pos += i - self.max_sil_kept + pos_l = rms_list[silence_start : silence_start + self.max_sil_kept + 1].argmin() + silence_start + pos_r = rms_list[i - self.max_sil_kept : i + 1].argmin() + i - self.max_sil_kept + if silence_start == 0: + sil_tags.append((0, pos_r)) + clip_start = pos_r + else: + sil_tags.append((min(pos_l, pos), max(pos_r, pos))) + clip_start = max(pos_r, pos) + else: + pos_l = rms_list[silence_start : silence_start + self.max_sil_kept + 1].argmin() + silence_start + pos_r = rms_list[i - self.max_sil_kept : i + 1].argmin() + i - self.max_sil_kept + if silence_start == 0: + sil_tags.append((0, pos_r)) + else: + sil_tags.append((pos_l, pos_r)) + clip_start = pos_r + silence_start = None + # Deal with trailing silence. + total_frames = rms_list.shape[0] + if silence_start is not None and total_frames - silence_start >= self.min_interval: + silence_end = min(total_frames, silence_start + self.max_sil_kept) + pos = rms_list[silence_start : silence_end + 1].argmin() + silence_start + sil_tags.append((pos, total_frames + 1)) + # Apply and return slices. + ####音频+起始时间+终止时间 + if len(sil_tags) == 0: + return [[waveform, 0, int(total_frames * self.hop_size)]] + else: + chunks = [] + if sil_tags[0][0] > 0: + chunks.append([self._apply_slice(waveform, 0, sil_tags[0][0]), 0, int(sil_tags[0][0] * self.hop_size)]) + for i in range(len(sil_tags) - 1): + chunks.append( + [ + self._apply_slice(waveform, sil_tags[i][1], sil_tags[i + 1][0]), + int(sil_tags[i][1] * self.hop_size), + int(sil_tags[i + 1][0] * self.hop_size), + ] + ) + if sil_tags[-1][1] < total_frames: + chunks.append( + [ + self._apply_slice(waveform, sil_tags[-1][1], total_frames), + int(sil_tags[-1][1] * self.hop_size), + int(total_frames * self.hop_size), + ] + ) + return chunks + + +def main(): + import os.path + from argparse import ArgumentParser + + import librosa + import soundfile + + parser = ArgumentParser() + parser.add_argument("audio", type=str, help="The audio to be sliced") + parser.add_argument("--out", type=str, help="Output directory of the sliced audio clips") + parser.add_argument( + "--db_thresh", + type=float, + required=False, + default=-40, + help="The dB threshold for silence detection", + ) + parser.add_argument( + "--min_length", + type=int, + required=False, + default=5000, + help="The minimum milliseconds required for each sliced audio clip", + ) + parser.add_argument( + "--min_interval", + type=int, + required=False, + default=300, + help="The minimum milliseconds for a silence part to be sliced", + ) + parser.add_argument( + "--hop_size", + type=int, + required=False, + default=10, + help="Frame length in milliseconds", + ) + parser.add_argument( + "--max_sil_kept", + type=int, + required=False, + default=500, + help="The maximum silence length kept around the sliced clip, presented in milliseconds", + ) + args = parser.parse_args() + out = args.out + if out is None: + out = os.path.dirname(os.path.abspath(args.audio)) + audio, sr = librosa.load(args.audio, sr=None, mono=False) + slicer = Slicer( + sr=sr, + threshold=args.db_thresh, + min_length=args.min_length, + min_interval=args.min_interval, + hop_size=args.hop_size, + max_sil_kept=args.max_sil_kept, + ) + chunks = slicer.slice(audio) + if not os.path.exists(out): + os.makedirs(out) + for i, chunk in enumerate(chunks): + if len(chunk.shape) > 1: + chunk = chunk.T + soundfile.write( + os.path.join( + out, + "%s_%d.wav" % (os.path.basename(args.audio).rsplit(".", maxsplit=1)[0], i), + ), + chunk, + sr, + ) + + +if __name__ == "__main__": + main() diff --git a/tools/subfix_webui.py b/tools/subfix_webui.py new file mode 100644 index 0000000000000000000000000000000000000000..eae1c980f956075b8dc75cae7a463e4ed3741ed0 --- /dev/null +++ b/tools/subfix_webui.py @@ -0,0 +1,415 @@ +import argparse +import os +import copy +import json +import uuid + +try: + import gradio.analytics as analytics + + analytics.version_check = lambda: None +except: + ... + +import librosa +import gradio as gr +import numpy as np +import soundfile + +g_json_key_text = "" +g_json_key_path = "" +g_load_file = "" +g_load_format = "" + +g_max_json_index = 0 +g_index = 0 +g_batch = 10 +g_text_list = [] +g_audio_list = [] +g_checkbox_list = [] +g_data_json = [] + + +def reload_data(index, batch): + global g_index + g_index = index + global g_batch + g_batch = batch + datas = g_data_json[index : index + batch] + output = [] + for d in datas: + output.append({g_json_key_text: d[g_json_key_text], g_json_key_path: d[g_json_key_path]}) + return output + + +def b_change_index(index, batch): + global g_index, g_batch + g_index, g_batch = index, batch + datas = reload_data(index, batch) + output = [] + for i, _ in enumerate(datas): + output.append( + # gr.Textbox( + # label=f"Text {i+index}", + # value=_[g_json_key_text]#text + # ) + {"__type__": "update", "label": f"Text {i + index}", "value": _[g_json_key_text]} + ) + for _ in range(g_batch - len(datas)): + output.append( + # gr.Textbox( + # label=f"Text", + # value="" + # ) + {"__type__": "update", "label": "Text", "value": ""} + ) + for _ in datas: + output.append(_[g_json_key_path]) + for _ in range(g_batch - len(datas)): + output.append(None) + for _ in range(g_batch): + output.append(False) + return output + + +def b_next_index(index, batch): + b_save_file() + if (index + batch) <= g_max_json_index: + return index + batch, *b_change_index(index + batch, batch) + else: + return index, *b_change_index(index, batch) + + +def b_previous_index(index, batch): + b_save_file() + if (index - batch) >= 0: + return index - batch, *b_change_index(index - batch, batch) + else: + return 0, *b_change_index(0, batch) + + +def b_submit_change(*text_list): + global g_data_json + change = False + for i, new_text in enumerate(text_list): + if g_index + i <= g_max_json_index: + new_text = new_text.strip() + " " + if g_data_json[g_index + i][g_json_key_text] != new_text: + g_data_json[g_index + i][g_json_key_text] = new_text + change = True + if change: + b_save_file() + return g_index, *b_change_index(g_index, g_batch) + + +def b_delete_audio(*checkbox_list): + global g_data_json, g_index, g_max_json_index + b_save_file() + change = False + for i, checkbox in reversed(list(enumerate(checkbox_list))): + if g_index + i < len(g_data_json): + if checkbox == True: + g_data_json.pop(g_index + i) + change = True + + g_max_json_index = len(g_data_json) - 1 + if g_index > g_max_json_index: + g_index = g_max_json_index + g_index = g_index if g_index >= 0 else 0 + if change: + b_save_file() + # return gr.Slider(value=g_index, maximum=(g_max_json_index if g_max_json_index>=0 else 0)), *b_change_index(g_index, g_batch) + return { + "value": g_index, + "__type__": "update", + "maximum": (g_max_json_index if g_max_json_index >= 0 else 0), + }, *b_change_index(g_index, g_batch) + + +def b_invert_selection(*checkbox_list): + new_list = [not item if item is True else True for item in checkbox_list] + return new_list + + +def get_next_path(filename): + base_dir = os.path.dirname(filename) + base_name = os.path.splitext(os.path.basename(filename))[0] + for i in range(100): + new_path = os.path.join(base_dir, f"{base_name}_{str(i).zfill(2)}.wav") + if not os.path.exists(new_path): + return new_path + return os.path.join(base_dir, f"{str(uuid.uuid4())}.wav") + + +def b_audio_split(audio_breakpoint, *checkbox_list): + global g_data_json, g_max_json_index + checked_index = [] + for i, checkbox in enumerate(checkbox_list): + if checkbox == True and g_index + i < len(g_data_json): + checked_index.append(g_index + i) + if len(checked_index) == 1: + index = checked_index[0] + audio_json = copy.deepcopy(g_data_json[index]) + path = audio_json[g_json_key_path] + data, sample_rate = librosa.load(path, sr=None, mono=True) + audio_maxframe = len(data) + break_frame = int(audio_breakpoint * sample_rate) + + if break_frame >= 1 and break_frame < audio_maxframe: + audio_first = data[0:break_frame] + audio_second = data[break_frame:] + nextpath = get_next_path(path) + soundfile.write(nextpath, audio_second, sample_rate) + soundfile.write(path, audio_first, sample_rate) + g_data_json.insert(index + 1, audio_json) + g_data_json[index + 1][g_json_key_path] = nextpath + b_save_file() + + g_max_json_index = len(g_data_json) - 1 + # return gr.Slider(value=g_index, maximum=g_max_json_index), *b_change_index(g_index, g_batch) + return {"value": g_index, "maximum": g_max_json_index, "__type__": "update"}, *b_change_index(g_index, g_batch) + + +def b_merge_audio(interval_r, *checkbox_list): + global g_data_json, g_max_json_index + b_save_file() + checked_index = [] + audios_path = [] + audios_text = [] + for i, checkbox in enumerate(checkbox_list): + if checkbox == True and g_index + i < len(g_data_json): + checked_index.append(g_index + i) + + if len(checked_index) > 1: + for i in checked_index: + audios_path.append(g_data_json[i][g_json_key_path]) + audios_text.append(g_data_json[i][g_json_key_text]) + for i in reversed(checked_index[1:]): + g_data_json.pop(i) + + base_index = checked_index[0] + base_path = audios_path[0] + g_data_json[base_index][g_json_key_text] = "".join(audios_text) + + audio_list = [] + l_sample_rate = None + for i, path in enumerate(audios_path): + data, sample_rate = librosa.load(path, sr=l_sample_rate, mono=True) + l_sample_rate = sample_rate + if i > 0: + silence = np.zeros(int(l_sample_rate * interval_r)) + audio_list.append(silence) + + audio_list.append(data) + + audio_concat = np.concatenate(audio_list) + + soundfile.write(base_path, audio_concat, l_sample_rate) + + b_save_file() + + g_max_json_index = len(g_data_json) - 1 + + # return gr.Slider(value=g_index, maximum=g_max_json_index), *b_change_index(g_index, g_batch) + return {"value": g_index, "maximum": g_max_json_index, "__type__": "update"}, *b_change_index(g_index, g_batch) + + +def b_save_json(): + with open(g_load_file, "w", encoding="utf-8") as file: + for data in g_data_json: + file.write(f"{json.dumps(data, ensure_ascii=False)}\n") + + +def b_save_list(): + with open(g_load_file, "w", encoding="utf-8") as file: + for data in g_data_json: + wav_path = data["wav_path"] + speaker_name = data["speaker_name"] + language = data["language"] + text = data["text"] + file.write(f"{wav_path}|{speaker_name}|{language}|{text}".strip() + "\n") + + +def b_load_json(): + global g_data_json, g_max_json_index + with open(g_load_file, "r", encoding="utf-8") as file: + g_data_json = file.readlines() + g_data_json = [json.loads(line) for line in g_data_json] + g_max_json_index = len(g_data_json) - 1 + + +def b_load_list(): + global g_data_json, g_max_json_index + with open(g_load_file, "r", encoding="utf-8") as source: + data_list = source.readlines() + for _ in data_list: + data = _.split("|") + if len(data) == 4: + wav_path, speaker_name, language, text = data + g_data_json.append( + {"wav_path": wav_path, "speaker_name": speaker_name, "language": language, "text": text.strip()} + ) + else: + print("error line:", data) + g_max_json_index = len(g_data_json) - 1 + + +def b_save_file(): + if g_load_format == "json": + b_save_json() + elif g_load_format == "list": + b_save_list() + + +def b_load_file(): + if g_load_format == "json": + b_load_json() + elif g_load_format == "list": + b_load_list() + + +def set_global(load_json, load_list, json_key_text, json_key_path, batch): + global g_json_key_text, g_json_key_path, g_load_file, g_load_format, g_batch + + g_batch = int(batch) + + if load_json != "None": + g_load_format = "json" + g_load_file = load_json + elif load_list != "None": + g_load_format = "list" + g_load_file = load_list + else: + g_load_format = "list" + g_load_file = "demo.list" + + g_json_key_text = json_key_text + g_json_key_path = json_key_path + + b_load_file() + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Process some integers.") + parser.add_argument("--load_json", default="None", help="source file, like demo.json") + parser.add_argument("--is_share", default="False", help="whether webui is_share=True") + parser.add_argument("--load_list", default="None", help="source file, like demo.list") + parser.add_argument("--webui_port_subfix", default=9871, help="source file, like demo.list") + parser.add_argument("--json_key_text", default="text", help="the text key name in json, Default: text") + parser.add_argument("--json_key_path", default="wav_path", help="the path key name in json, Default: wav_path") + parser.add_argument("--g_batch", default=10, help="max number g_batch wav to display, Default: 10") + + args = parser.parse_args() + + set_global(args.load_json, args.load_list, args.json_key_text, args.json_key_path, args.g_batch) + + with gr.Blocks() as demo: + with gr.Row(): + btn_change_index = gr.Button("Change Index") + btn_submit_change = gr.Button("Submit Text") + btn_merge_audio = gr.Button("Merge Audio") + btn_delete_audio = gr.Button("Delete Audio") + btn_previous_index = gr.Button("Previous Index") + btn_next_index = gr.Button("Next Index") + + with gr.Row(): + index_slider = gr.Slider(minimum=0, maximum=g_max_json_index, value=g_index, step=1, label="Index", scale=3) + splitpoint_slider = gr.Slider( + minimum=0, maximum=120.0, value=0, step=0.1, label="Audio Split Point(s)", scale=3 + ) + btn_audio_split = gr.Button("Split Audio", scale=1) + btn_save_json = gr.Button("Save File", visible=True, scale=1) + btn_invert_selection = gr.Button("Invert Selection", scale=1) + + with gr.Row(): + with gr.Column(): + for _ in range(0, g_batch): + with gr.Row(): + text = gr.Textbox(label="Text", visible=True, scale=5) + audio_output = gr.Audio(label="Output Audio", visible=True, scale=5) + audio_check = gr.Checkbox(label="Yes", show_label=True, info="Choose Audio", scale=1) + g_text_list.append(text) + g_audio_list.append(audio_output) + g_checkbox_list.append(audio_check) + + with gr.Row(): + batchsize_slider = gr.Slider( + minimum=1, maximum=g_batch, value=g_batch, step=1, label="Batch Size", scale=3, interactive=False + ) + interval_slider = gr.Slider(minimum=0, maximum=2, value=0, step=0.01, label="Interval", scale=3) + btn_theme_dark = gr.Button("Light Theme", link="?__theme=light", scale=1) + btn_theme_light = gr.Button("Dark Theme", link="?__theme=dark", scale=1) + + btn_change_index.click( + b_change_index, + inputs=[ + index_slider, + batchsize_slider, + ], + outputs=[*g_text_list, *g_audio_list, *g_checkbox_list], + ) + + btn_submit_change.click( + b_submit_change, + inputs=[ + *g_text_list, + ], + outputs=[index_slider, *g_text_list, *g_audio_list, *g_checkbox_list], + ) + + btn_previous_index.click( + b_previous_index, + inputs=[ + index_slider, + batchsize_slider, + ], + outputs=[index_slider, *g_text_list, *g_audio_list, *g_checkbox_list], + ) + + btn_next_index.click( + b_next_index, + inputs=[ + index_slider, + batchsize_slider, + ], + outputs=[index_slider, *g_text_list, *g_audio_list, *g_checkbox_list], + ) + + btn_delete_audio.click( + b_delete_audio, + inputs=[*g_checkbox_list], + outputs=[index_slider, *g_text_list, *g_audio_list, *g_checkbox_list], + ) + + btn_merge_audio.click( + b_merge_audio, + inputs=[interval_slider, *g_checkbox_list], + outputs=[index_slider, *g_text_list, *g_audio_list, *g_checkbox_list], + ) + + btn_audio_split.click( + b_audio_split, + inputs=[splitpoint_slider, *g_checkbox_list], + outputs=[index_slider, *g_text_list, *g_audio_list, *g_checkbox_list], + ) + + btn_invert_selection.click(b_invert_selection, inputs=[*g_checkbox_list], outputs=[*g_checkbox_list]) + + btn_save_json.click(b_save_file) + + demo.load( + b_change_index, + inputs=[ + index_slider, + batchsize_slider, + ], + outputs=[*g_text_list, *g_audio_list, *g_checkbox_list], + ) + + demo.launch( + server_name="0.0.0.0", + inbrowser=True, + # quiet=True, + share=eval(args.is_share), + server_port=int(args.webui_port_subfix), + ) diff --git a/tools/uvr5/bs_roformer/__init__.py b/tools/uvr5/bs_roformer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tools/uvr5/bs_roformer/attend.py b/tools/uvr5/bs_roformer/attend.py new file mode 100644 index 0000000000000000000000000000000000000000..29dad18e1b05a1926f3dd142ddec1dd6401e8243 --- /dev/null +++ b/tools/uvr5/bs_roformer/attend.py @@ -0,0 +1,67 @@ +from packaging import version +import torch +from torch import nn, einsum +import torch.nn.functional as F + + +def exists(val): + return val is not None + + +def default(v, d): + return v if exists(v) else d + + +class Attend(nn.Module): + def __init__(self, dropout=0.0, flash=False, scale=None): + super().__init__() + self.scale = scale + self.dropout = dropout + self.attn_dropout = nn.Dropout(dropout) + + self.flash = flash + assert not (flash and version.parse(torch.__version__) < version.parse("2.0.0")), ( + "in order to use flash attention, you must be using pytorch 2.0 or above" + ) + + def flash_attn(self, q, k, v): + # _, heads, q_len, _, k_len, is_cuda, device = *q.shape, k.shape[-2], q.is_cuda, q.device + + if exists(self.scale): + default_scale = q.shape[-1] ** -0.5 + q = q * (self.scale / default_scale) + + # pytorch 2.0 flash attn: q, k, v, mask, dropout, softmax_scale + # with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=True, enable_mem_efficient=True): + return F.scaled_dot_product_attention(q, k, v, dropout_p=self.dropout if self.training else 0.0) + + def forward(self, q, k, v): + """ + einstein notation + b - batch + h - heads + n, i, j - sequence length (base sequence length, source, target) + d - feature dimension + """ + + # q_len, k_len, device = q.shape[-2], k.shape[-2], q.device + + scale = default(self.scale, q.shape[-1] ** -0.5) + + if self.flash: + return self.flash_attn(q, k, v) + + # similarity + + sim = einsum("b h i d, b h j d -> b h i j", q, k) * scale + + # attention + + attn = sim.softmax(dim=-1) + attn = self.attn_dropout(attn) + + # aggregate values + + out = einsum("b h i j, b h j d -> b h i d", attn, v) + + return out diff --git a/tools/uvr5/bs_roformer/bs_roformer.py b/tools/uvr5/bs_roformer/bs_roformer.py new file mode 100644 index 0000000000000000000000000000000000000000..8c9d1f300435f26b6a11f13b117f88e1b308aeb0 --- /dev/null +++ b/tools/uvr5/bs_roformer/bs_roformer.py @@ -0,0 +1,626 @@ +from functools import partial + +import torch +from torch import nn +from torch.nn import Module, ModuleList +import torch.nn.functional as F + +from bs_roformer.attend import Attend +from torch.utils.checkpoint import checkpoint + +from typing import Tuple, Optional, Callable +# from beartype.typing import Tuple, Optional, List, Callable +# from beartype import beartype + +from rotary_embedding_torch import RotaryEmbedding + +from einops import rearrange, pack, unpack +from einops.layers.torch import Rearrange + +# helper functions + + +def exists(val): + return val is not None + + +def default(v, d): + return v if exists(v) else d + + +def pack_one(t, pattern): + return pack([t], pattern) + + +def unpack_one(t, ps, pattern): + return unpack(t, ps, pattern)[0] + + +# norm + + +def l2norm(t): + return F.normalize(t, dim=-1, p=2) + + +class RMSNorm(Module): + def __init__(self, dim): + super().__init__() + self.scale = dim**0.5 + self.gamma = nn.Parameter(torch.ones(dim)) + + def forward(self, x): + return F.normalize(x, dim=-1) * self.scale * self.gamma + + +# attention + + +class FeedForward(Module): + def __init__(self, dim, mult=4, dropout=0.0): + super().__init__() + dim_inner = int(dim * mult) + self.net = nn.Sequential( + RMSNorm(dim), + nn.Linear(dim, dim_inner), + nn.GELU(), + nn.Dropout(dropout), + nn.Linear(dim_inner, dim), + nn.Dropout(dropout), + ) + + def forward(self, x): + return self.net(x) + + +class Attention(Module): + def __init__(self, dim, heads=8, dim_head=64, dropout=0.0, rotary_embed=None, flash=True): + super().__init__() + self.heads = heads + self.scale = dim_head**-0.5 + dim_inner = heads * dim_head + + self.rotary_embed = rotary_embed + + self.attend = Attend(flash=flash, dropout=dropout) + + self.norm = RMSNorm(dim) + self.to_qkv = nn.Linear(dim, dim_inner * 3, bias=False) + + self.to_gates = nn.Linear(dim, heads) + + self.to_out = nn.Sequential(nn.Linear(dim_inner, dim, bias=False), nn.Dropout(dropout)) + + def forward(self, x): + x = self.norm(x) + + q, k, v = rearrange(self.to_qkv(x), "b n (qkv h d) -> qkv b h n d", qkv=3, h=self.heads) + + if exists(self.rotary_embed): + q = self.rotary_embed.rotate_queries_or_keys(q) + k = self.rotary_embed.rotate_queries_or_keys(k) + + out = self.attend(q, k, v) + + gates = self.to_gates(x) + out = out * rearrange(gates, "b n h -> b h n 1").sigmoid() + + out = rearrange(out, "b h n d -> b n (h d)") + return self.to_out(out) + + +class LinearAttention(Module): + """ + this flavor of linear attention proposed in https://arxiv.org/abs/2106.09681 by El-Nouby et al. + """ + + # @beartype + def __init__(self, *, dim, dim_head=32, heads=8, scale=8, flash=False, dropout=0.0): + super().__init__() + dim_inner = dim_head * heads + self.norm = RMSNorm(dim) + + self.to_qkv = nn.Sequential( + nn.Linear(dim, dim_inner * 3, bias=False), Rearrange("b n (qkv h d) -> qkv b h d n", qkv=3, h=heads) + ) + + self.temperature = nn.Parameter(torch.ones(heads, 1, 1)) + + self.attend = Attend(scale=scale, dropout=dropout, flash=flash) + + self.to_out = nn.Sequential(Rearrange("b h d n -> b n (h d)"), nn.Linear(dim_inner, dim, bias=False)) + + def forward(self, x): + x = self.norm(x) + + q, k, v = self.to_qkv(x) + + q, k = map(l2norm, (q, k)) + q = q * self.temperature.exp() + + out = self.attend(q, k, v) + + return self.to_out(out) + + +class Transformer(Module): + def __init__( + self, + *, + dim, + depth, + dim_head=64, + heads=8, + attn_dropout=0.0, + ff_dropout=0.0, + ff_mult=4, + norm_output=True, + rotary_embed=None, + flash_attn=True, + linear_attn=False, + ): + super().__init__() + self.layers = ModuleList([]) + + for _ in range(depth): + if linear_attn: + attn = LinearAttention(dim=dim, dim_head=dim_head, heads=heads, dropout=attn_dropout, flash=flash_attn) + else: + attn = Attention( + dim=dim, + dim_head=dim_head, + heads=heads, + dropout=attn_dropout, + rotary_embed=rotary_embed, + flash=flash_attn, + ) + + self.layers.append(ModuleList([attn, FeedForward(dim=dim, mult=ff_mult, dropout=ff_dropout)])) + + self.norm = RMSNorm(dim) if norm_output else nn.Identity() + + def forward(self, x): + for attn, ff in self.layers: + x = attn(x) + x + x = ff(x) + x + + return self.norm(x) + + +# bandsplit module + + +class BandSplit(Module): + # @beartype + def __init__(self, dim, dim_inputs: Tuple[int, ...]): + super().__init__() + self.dim_inputs = dim_inputs + self.to_features = ModuleList([]) + + for dim_in in dim_inputs: + net = nn.Sequential(RMSNorm(dim_in), nn.Linear(dim_in, dim)) + + self.to_features.append(net) + + def forward(self, x): + x = x.split(self.dim_inputs, dim=-1) + + outs = [] + for split_input, to_feature in zip(x, self.to_features): + split_output = to_feature(split_input) + outs.append(split_output) + + return torch.stack(outs, dim=-2) + + +def MLP(dim_in, dim_out, dim_hidden=None, depth=1, activation=nn.Tanh): + dim_hidden = default(dim_hidden, dim_in) + + net = [] + dims = (dim_in, *((dim_hidden,) * (depth - 1)), dim_out) + + for ind, (layer_dim_in, layer_dim_out) in enumerate(zip(dims[:-1], dims[1:])): + is_last = ind == (len(dims) - 2) + + net.append(nn.Linear(layer_dim_in, layer_dim_out)) + + if is_last: + continue + + net.append(activation()) + + return nn.Sequential(*net) + + +class MaskEstimator(Module): + # @beartype + def __init__(self, dim, dim_inputs: Tuple[int, ...], depth, mlp_expansion_factor=4): + super().__init__() + self.dim_inputs = dim_inputs + self.to_freqs = ModuleList([]) + dim_hidden = dim * mlp_expansion_factor + + for dim_in in dim_inputs: + net = [] + + mlp = nn.Sequential(MLP(dim, dim_in * 2, dim_hidden=dim_hidden, depth=depth), nn.GLU(dim=-1)) + + self.to_freqs.append(mlp) + + def forward(self, x): + x = x.unbind(dim=-2) + + outs = [] + + for band_features, mlp in zip(x, self.to_freqs): + freq_out = mlp(band_features) + outs.append(freq_out) + + return torch.cat(outs, dim=-1) + + +# main class + +DEFAULT_FREQS_PER_BANDS = ( + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 2, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 4, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 12, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 24, + 48, + 48, + 48, + 48, + 48, + 48, + 48, + 48, + 128, + 129, +) + + +class BSRoformer(Module): + # @beartype + def __init__( + self, + dim, + *, + depth, + stereo=False, + num_stems=1, + time_transformer_depth=2, + freq_transformer_depth=2, + linear_transformer_depth=0, + freqs_per_bands: Tuple[int, ...] = DEFAULT_FREQS_PER_BANDS, + # in the paper, they divide into ~60 bands, test with 1 for starters + dim_head=64, + heads=8, + attn_dropout=0.0, + ff_dropout=0.0, + flash_attn=True, + dim_freqs_in=1025, + stft_n_fft=2048, + stft_hop_length=512, + # 10ms at 44100Hz, from sections 4.1, 4.4 in the paper - @faroit recommends // 2 or // 4 for better reconstruction + stft_win_length=2048, + stft_normalized=False, + stft_window_fn: Optional[Callable] = None, + mask_estimator_depth=2, + multi_stft_resolution_loss_weight=1.0, + multi_stft_resolutions_window_sizes: Tuple[int, ...] = (4096, 2048, 1024, 512, 256), + multi_stft_hop_size=147, + multi_stft_normalized=False, + multi_stft_window_fn: Callable = torch.hann_window, + mlp_expansion_factor=4, + use_torch_checkpoint=False, + skip_connection=False, + ): + super().__init__() + + self.stereo = stereo + self.audio_channels = 2 if stereo else 1 + self.num_stems = num_stems + self.use_torch_checkpoint = use_torch_checkpoint + self.skip_connection = skip_connection + + self.layers = ModuleList([]) + + transformer_kwargs = dict( + dim=dim, + heads=heads, + dim_head=dim_head, + attn_dropout=attn_dropout, + ff_dropout=ff_dropout, + flash_attn=flash_attn, + norm_output=False, + ) + + time_rotary_embed = RotaryEmbedding(dim=dim_head) + freq_rotary_embed = RotaryEmbedding(dim=dim_head) + + for _ in range(depth): + tran_modules = [] + if linear_transformer_depth > 0: + tran_modules.append(Transformer(depth=linear_transformer_depth, linear_attn=True, **transformer_kwargs)) + tran_modules.append( + Transformer(depth=time_transformer_depth, rotary_embed=time_rotary_embed, **transformer_kwargs) + ) + tran_modules.append( + Transformer(depth=freq_transformer_depth, rotary_embed=freq_rotary_embed, **transformer_kwargs) + ) + self.layers.append(nn.ModuleList(tran_modules)) + + self.final_norm = RMSNorm(dim) + + self.stft_kwargs = dict( + n_fft=stft_n_fft, hop_length=stft_hop_length, win_length=stft_win_length, normalized=stft_normalized + ) + + self.stft_window_fn = partial(default(stft_window_fn, torch.hann_window), stft_win_length) + + freqs = torch.stft( + torch.randn(1, 4096), **self.stft_kwargs, window=torch.ones(stft_win_length), return_complex=True + ).shape[1] + + assert len(freqs_per_bands) > 1 + assert sum(freqs_per_bands) == freqs, ( + f"the number of freqs in the bands must equal {freqs} based on the STFT settings, but got {sum(freqs_per_bands)}" + ) + + freqs_per_bands_with_complex = tuple(2 * f * self.audio_channels for f in freqs_per_bands) + + self.band_split = BandSplit(dim=dim, dim_inputs=freqs_per_bands_with_complex) + + self.mask_estimators = nn.ModuleList([]) + + for _ in range(num_stems): + mask_estimator = MaskEstimator( + dim=dim, + dim_inputs=freqs_per_bands_with_complex, + depth=mask_estimator_depth, + mlp_expansion_factor=mlp_expansion_factor, + ) + + self.mask_estimators.append(mask_estimator) + + # for the multi-resolution stft loss + + self.multi_stft_resolution_loss_weight = multi_stft_resolution_loss_weight + self.multi_stft_resolutions_window_sizes = multi_stft_resolutions_window_sizes + self.multi_stft_n_fft = stft_n_fft + self.multi_stft_window_fn = multi_stft_window_fn + + self.multi_stft_kwargs = dict(hop_length=multi_stft_hop_size, normalized=multi_stft_normalized) + + def forward(self, raw_audio, target=None, return_loss_breakdown=False): + """ + einops + + b - batch + f - freq + t - time + s - audio channel (1 for mono, 2 for stereo) + n - number of 'stems' + c - complex (2) + d - feature dimension + """ + + device = raw_audio.device + + # defining whether model is loaded on MPS (MacOS GPU accelerator) + x_is_mps = True if device.type == "mps" else False + + if raw_audio.ndim == 2: + raw_audio = rearrange(raw_audio, "b t -> b 1 t") + + channels = raw_audio.shape[1] + assert (not self.stereo and channels == 1) or (self.stereo and channels == 2), ( + "stereo needs to be set to True if passing in audio signal that is stereo (channel dimension of 2). also need to be False if mono (channel dimension of 1)" + ) + + # to stft + + raw_audio, batch_audio_channel_packed_shape = pack_one(raw_audio, "* t") + + stft_window = self.stft_window_fn(device=device) + + # RuntimeError: FFT operations are only supported on MacOS 14+ + # Since it's tedious to define whether we're on correct MacOS version - simple try-catch is used + try: + stft_repr = torch.stft(raw_audio, **self.stft_kwargs, window=stft_window, return_complex=True) + except: + stft_repr = torch.stft( + raw_audio.cpu() if x_is_mps else raw_audio, + **self.stft_kwargs, + window=stft_window.cpu() if x_is_mps else stft_window, + return_complex=True, + ).to(device) + + stft_repr = torch.view_as_real(stft_repr) + + stft_repr = unpack_one(stft_repr, batch_audio_channel_packed_shape, "* f t c") + + # merge stereo / mono into the frequency, with frequency leading dimension, for band splitting + stft_repr = rearrange(stft_repr, "b s f t c -> b (f s) t c") + + x = rearrange(stft_repr, "b f t c -> b t (f c)") + + if self.use_torch_checkpoint: + x = checkpoint(self.band_split, x, use_reentrant=False) + else: + x = self.band_split(x) + + # axial / hierarchical attention + + store = [None] * len(self.layers) + for i, transformer_block in enumerate(self.layers): + if len(transformer_block) == 3: + linear_transformer, time_transformer, freq_transformer = transformer_block + + x, ft_ps = pack([x], "b * d") + if self.use_torch_checkpoint: + x = checkpoint(linear_transformer, x, use_reentrant=False) + else: + x = linear_transformer(x) + (x,) = unpack(x, ft_ps, "b * d") + else: + time_transformer, freq_transformer = transformer_block + + if self.skip_connection: + # Sum all previous + for j in range(i): + x = x + store[j] + + x = rearrange(x, "b t f d -> b f t d") + x, ps = pack([x], "* t d") + + if self.use_torch_checkpoint: + x = checkpoint(time_transformer, x, use_reentrant=False) + else: + x = time_transformer(x) + + (x,) = unpack(x, ps, "* t d") + x = rearrange(x, "b f t d -> b t f d") + x, ps = pack([x], "* f d") + + if self.use_torch_checkpoint: + x = checkpoint(freq_transformer, x, use_reentrant=False) + else: + x = freq_transformer(x) + + (x,) = unpack(x, ps, "* f d") + + if self.skip_connection: + store[i] = x + + x = self.final_norm(x) + + num_stems = len(self.mask_estimators) + + if self.use_torch_checkpoint: + mask = torch.stack([checkpoint(fn, x, use_reentrant=False) for fn in self.mask_estimators], dim=1) + else: + mask = torch.stack([fn(x) for fn in self.mask_estimators], dim=1) + mask = rearrange(mask, "b n t (f c) -> b n f t c", c=2) + + # modulate frequency representation + + stft_repr = rearrange(stft_repr, "b f t c -> b 1 f t c") + + # complex number multiplication + + stft_repr = torch.view_as_complex(stft_repr) + mask = torch.view_as_complex(mask) + + stft_repr = stft_repr * mask + + # istft + + stft_repr = rearrange(stft_repr, "b n (f s) t -> (b n s) f t", s=self.audio_channels) + + # same as torch.stft() fix for MacOS MPS above + try: + recon_audio = torch.istft( + stft_repr, **self.stft_kwargs, window=stft_window, return_complex=False, length=raw_audio.shape[-1] + ) + except: + recon_audio = torch.istft( + stft_repr.cpu() if x_is_mps else stft_repr, + **self.stft_kwargs, + window=stft_window.cpu() if x_is_mps else stft_window, + return_complex=False, + length=raw_audio.shape[-1], + ).to(device) + + recon_audio = rearrange(recon_audio, "(b n s) t -> b n s t", s=self.audio_channels, n=num_stems) + + if num_stems == 1: + recon_audio = rearrange(recon_audio, "b 1 s t -> b s t") + + # if a target is passed in, calculate loss for learning + + if not exists(target): + return recon_audio + + if self.num_stems > 1: + assert target.ndim == 4 and target.shape[1] == self.num_stems + + if target.ndim == 2: + target = rearrange(target, "... t -> ... 1 t") + + target = target[..., : recon_audio.shape[-1]] # protect against lost length on istft + + loss = F.l1_loss(recon_audio, target) + + multi_stft_resolution_loss = 0.0 + + for window_size in self.multi_stft_resolutions_window_sizes: + res_stft_kwargs = dict( + n_fft=max(window_size, self.multi_stft_n_fft), # not sure what n_fft is across multi resolution stft + win_length=window_size, + return_complex=True, + window=self.multi_stft_window_fn(window_size, device=device), + **self.multi_stft_kwargs, + ) + + recon_Y = torch.stft(rearrange(recon_audio, "... s t -> (... s) t"), **res_stft_kwargs) + target_Y = torch.stft(rearrange(target, "... s t -> (... s) t"), **res_stft_kwargs) + + multi_stft_resolution_loss = multi_stft_resolution_loss + F.l1_loss(recon_Y, target_Y) + + weighted_multi_resolution_loss = multi_stft_resolution_loss * self.multi_stft_resolution_loss_weight + + total_loss = loss + weighted_multi_resolution_loss + + if not return_loss_breakdown: + return total_loss + + return total_loss, (loss, multi_stft_resolution_loss) diff --git a/tools/uvr5/bs_roformer/mel_band_roformer.py b/tools/uvr5/bs_roformer/mel_band_roformer.py new file mode 100644 index 0000000000000000000000000000000000000000..78a390400133307c2a41bf044abf39d165180370 --- /dev/null +++ b/tools/uvr5/bs_roformer/mel_band_roformer.py @@ -0,0 +1,606 @@ +from functools import partial + +import torch +from torch import nn +from torch.nn import Module, ModuleList +import torch.nn.functional as F + +from bs_roformer.attend import Attend +from torch.utils.checkpoint import checkpoint + +from typing import Tuple, Optional, Callable +# from beartype.typing import Tuple, Optional, List, Callable +# from beartype import beartype + +from rotary_embedding_torch import RotaryEmbedding + +from einops import rearrange, pack, unpack, reduce, repeat +from einops.layers.torch import Rearrange + +from librosa import filters + + +# helper functions + + +def exists(val): + return val is not None + + +def default(v, d): + return v if exists(v) else d + + +def pack_one(t, pattern): + return pack([t], pattern) + + +def unpack_one(t, ps, pattern): + return unpack(t, ps, pattern)[0] + + +def pad_at_dim(t, pad, dim=-1, value=0.0): + dims_from_right = (-dim - 1) if dim < 0 else (t.ndim - dim - 1) + zeros = (0, 0) * dims_from_right + return F.pad(t, (*zeros, *pad), value=value) + + +def l2norm(t): + return F.normalize(t, dim=-1, p=2) + + +# norm + + +class RMSNorm(Module): + def __init__(self, dim): + super().__init__() + self.scale = dim**0.5 + self.gamma = nn.Parameter(torch.ones(dim)) + + def forward(self, x): + return F.normalize(x, dim=-1) * self.scale * self.gamma + + +# attention + + +class FeedForward(Module): + def __init__(self, dim, mult=4, dropout=0.0): + super().__init__() + dim_inner = int(dim * mult) + self.net = nn.Sequential( + RMSNorm(dim), + nn.Linear(dim, dim_inner), + nn.GELU(), + nn.Dropout(dropout), + nn.Linear(dim_inner, dim), + nn.Dropout(dropout), + ) + + def forward(self, x): + return self.net(x) + + +class Attention(Module): + def __init__(self, dim, heads=8, dim_head=64, dropout=0.0, rotary_embed=None, flash=True): + super().__init__() + self.heads = heads + self.scale = dim_head**-0.5 + dim_inner = heads * dim_head + + self.rotary_embed = rotary_embed + + self.attend = Attend(flash=flash, dropout=dropout) + + self.norm = RMSNorm(dim) + self.to_qkv = nn.Linear(dim, dim_inner * 3, bias=False) + + self.to_gates = nn.Linear(dim, heads) + + self.to_out = nn.Sequential(nn.Linear(dim_inner, dim, bias=False), nn.Dropout(dropout)) + + def forward(self, x): + x = self.norm(x) + + q, k, v = rearrange(self.to_qkv(x), "b n (qkv h d) -> qkv b h n d", qkv=3, h=self.heads) + + if exists(self.rotary_embed): + q = self.rotary_embed.rotate_queries_or_keys(q) + k = self.rotary_embed.rotate_queries_or_keys(k) + + out = self.attend(q, k, v) + + gates = self.to_gates(x) + out = out * rearrange(gates, "b n h -> b h n 1").sigmoid() + + out = rearrange(out, "b h n d -> b n (h d)") + return self.to_out(out) + + +class LinearAttention(Module): + """ + this flavor of linear attention proposed in https://arxiv.org/abs/2106.09681 by El-Nouby et al. + """ + + # @beartype + def __init__(self, *, dim, dim_head=32, heads=8, scale=8, flash=False, dropout=0.0): + super().__init__() + dim_inner = dim_head * heads + self.norm = RMSNorm(dim) + + self.to_qkv = nn.Sequential( + nn.Linear(dim, dim_inner * 3, bias=False), Rearrange("b n (qkv h d) -> qkv b h d n", qkv=3, h=heads) + ) + + self.temperature = nn.Parameter(torch.ones(heads, 1, 1)) + + self.attend = Attend(scale=scale, dropout=dropout, flash=flash) + + self.to_out = nn.Sequential(Rearrange("b h d n -> b n (h d)"), nn.Linear(dim_inner, dim, bias=False)) + + def forward(self, x): + x = self.norm(x) + + q, k, v = self.to_qkv(x) + + q, k = map(l2norm, (q, k)) + q = q * self.temperature.exp() + + out = self.attend(q, k, v) + + return self.to_out(out) + + +class Transformer(Module): + def __init__( + self, + *, + dim, + depth, + dim_head=64, + heads=8, + attn_dropout=0.0, + ff_dropout=0.0, + ff_mult=4, + norm_output=True, + rotary_embed=None, + flash_attn=True, + linear_attn=False, + ): + super().__init__() + self.layers = ModuleList([]) + + for _ in range(depth): + if linear_attn: + attn = LinearAttention(dim=dim, dim_head=dim_head, heads=heads, dropout=attn_dropout, flash=flash_attn) + else: + attn = Attention( + dim=dim, + dim_head=dim_head, + heads=heads, + dropout=attn_dropout, + rotary_embed=rotary_embed, + flash=flash_attn, + ) + + self.layers.append(ModuleList([attn, FeedForward(dim=dim, mult=ff_mult, dropout=ff_dropout)])) + + self.norm = RMSNorm(dim) if norm_output else nn.Identity() + + def forward(self, x): + for attn, ff in self.layers: + x = attn(x) + x + x = ff(x) + x + + return self.norm(x) + + +# bandsplit module + + +class BandSplit(Module): + # @beartype + def __init__(self, dim, dim_inputs: Tuple[int, ...]): + super().__init__() + self.dim_inputs = dim_inputs + self.to_features = ModuleList([]) + + for dim_in in dim_inputs: + net = nn.Sequential(RMSNorm(dim_in), nn.Linear(dim_in, dim)) + + self.to_features.append(net) + + def forward(self, x): + x = x.split(self.dim_inputs, dim=-1) + + outs = [] + for split_input, to_feature in zip(x, self.to_features): + split_output = to_feature(split_input) + outs.append(split_output) + + return torch.stack(outs, dim=-2) + + +def MLP(dim_in, dim_out, dim_hidden=None, depth=1, activation=nn.Tanh): + dim_hidden = default(dim_hidden, dim_in) + + net = [] + dims = (dim_in, *((dim_hidden,) * depth), dim_out) + + for ind, (layer_dim_in, layer_dim_out) in enumerate(zip(dims[:-1], dims[1:])): + is_last = ind == (len(dims) - 2) + + net.append(nn.Linear(layer_dim_in, layer_dim_out)) + + if is_last: + continue + + net.append(activation()) + + return nn.Sequential(*net) + + +class MaskEstimator(Module): + # @beartype + def __init__(self, dim, dim_inputs: Tuple[int, ...], depth, mlp_expansion_factor=4): + super().__init__() + self.dim_inputs = dim_inputs + self.to_freqs = ModuleList([]) + dim_hidden = dim * mlp_expansion_factor + + for dim_in in dim_inputs: + net = [] + + mlp = nn.Sequential(MLP(dim, dim_in * 2, dim_hidden=dim_hidden, depth=depth), nn.GLU(dim=-1)) + + self.to_freqs.append(mlp) + + def forward(self, x): + x = x.unbind(dim=-2) + + outs = [] + + for band_features, mlp in zip(x, self.to_freqs): + freq_out = mlp(band_features) + outs.append(freq_out) + + return torch.cat(outs, dim=-1) + + +# main class + + +class MelBandRoformer(Module): + # @beartype + def __init__( + self, + dim, + *, + depth, + stereo=False, + num_stems=1, + time_transformer_depth=2, + freq_transformer_depth=2, + linear_transformer_depth=0, + num_bands=60, + dim_head=64, + heads=8, + attn_dropout=0.1, + ff_dropout=0.1, + flash_attn=True, + dim_freqs_in=1025, + sample_rate=44100, # needed for mel filter bank from librosa + stft_n_fft=2048, + stft_hop_length=512, + # 10ms at 44100Hz, from sections 4.1, 4.4 in the paper - @faroit recommends // 2 or // 4 for better reconstruction + stft_win_length=2048, + stft_normalized=False, + stft_window_fn: Optional[Callable] = None, + mask_estimator_depth=1, + multi_stft_resolution_loss_weight=1.0, + multi_stft_resolutions_window_sizes: Tuple[int, ...] = (4096, 2048, 1024, 512, 256), + multi_stft_hop_size=147, + multi_stft_normalized=False, + multi_stft_window_fn: Callable = torch.hann_window, + match_input_audio_length=False, # if True, pad output tensor to match length of input tensor + mlp_expansion_factor=4, + use_torch_checkpoint=False, + skip_connection=False, + ): + super().__init__() + + self.stereo = stereo + self.audio_channels = 2 if stereo else 1 + self.num_stems = num_stems + self.use_torch_checkpoint = use_torch_checkpoint + self.skip_connection = skip_connection + + self.layers = ModuleList([]) + + transformer_kwargs = dict( + dim=dim, + heads=heads, + dim_head=dim_head, + attn_dropout=attn_dropout, + ff_dropout=ff_dropout, + flash_attn=flash_attn, + ) + + time_rotary_embed = RotaryEmbedding(dim=dim_head) + freq_rotary_embed = RotaryEmbedding(dim=dim_head) + + for _ in range(depth): + tran_modules = [] + if linear_transformer_depth > 0: + tran_modules.append(Transformer(depth=linear_transformer_depth, linear_attn=True, **transformer_kwargs)) + tran_modules.append( + Transformer(depth=time_transformer_depth, rotary_embed=time_rotary_embed, **transformer_kwargs) + ) + tran_modules.append( + Transformer(depth=freq_transformer_depth, rotary_embed=freq_rotary_embed, **transformer_kwargs) + ) + self.layers.append(nn.ModuleList(tran_modules)) + + self.stft_window_fn = partial(default(stft_window_fn, torch.hann_window), stft_win_length) + + self.stft_kwargs = dict( + n_fft=stft_n_fft, hop_length=stft_hop_length, win_length=stft_win_length, normalized=stft_normalized + ) + + freqs = torch.stft( + torch.randn(1, 4096), **self.stft_kwargs, window=torch.ones(stft_n_fft), return_complex=True + ).shape[1] + + # create mel filter bank + # with librosa.filters.mel as in section 2 of paper + + mel_filter_bank_numpy = filters.mel(sr=sample_rate, n_fft=stft_n_fft, n_mels=num_bands) + + mel_filter_bank = torch.from_numpy(mel_filter_bank_numpy) + + # for some reason, it doesn't include the first freq? just force a value for now + + mel_filter_bank[0][0] = 1.0 + + # In some systems/envs we get 0.0 instead of ~1.9e-18 in the last position, + # so let's force a positive value + + mel_filter_bank[-1, -1] = 1.0 + + # binary as in paper (then estimated masks are averaged for overlapping regions) + + freqs_per_band = mel_filter_bank > 0 + assert freqs_per_band.any(dim=0).all(), "all frequencies need to be covered by all bands for now" + + repeated_freq_indices = repeat(torch.arange(freqs), "f -> b f", b=num_bands) + freq_indices = repeated_freq_indices[freqs_per_band] + + if stereo: + freq_indices = repeat(freq_indices, "f -> f s", s=2) + freq_indices = freq_indices * 2 + torch.arange(2) + freq_indices = rearrange(freq_indices, "f s -> (f s)") + + self.register_buffer("freq_indices", freq_indices, persistent=False) + self.register_buffer("freqs_per_band", freqs_per_band, persistent=False) + + num_freqs_per_band = reduce(freqs_per_band, "b f -> b", "sum") + num_bands_per_freq = reduce(freqs_per_band, "b f -> f", "sum") + + self.register_buffer("num_freqs_per_band", num_freqs_per_band, persistent=False) + self.register_buffer("num_bands_per_freq", num_bands_per_freq, persistent=False) + + # band split and mask estimator + + freqs_per_bands_with_complex = tuple(2 * f * self.audio_channels for f in num_freqs_per_band.tolist()) + + self.band_split = BandSplit(dim=dim, dim_inputs=freqs_per_bands_with_complex) + + self.mask_estimators = nn.ModuleList([]) + + for _ in range(num_stems): + mask_estimator = MaskEstimator( + dim=dim, + dim_inputs=freqs_per_bands_with_complex, + depth=mask_estimator_depth, + mlp_expansion_factor=mlp_expansion_factor, + ) + + self.mask_estimators.append(mask_estimator) + + # for the multi-resolution stft loss + + self.multi_stft_resolution_loss_weight = multi_stft_resolution_loss_weight + self.multi_stft_resolutions_window_sizes = multi_stft_resolutions_window_sizes + self.multi_stft_n_fft = stft_n_fft + self.multi_stft_window_fn = multi_stft_window_fn + + self.multi_stft_kwargs = dict(hop_length=multi_stft_hop_size, normalized=multi_stft_normalized) + + self.match_input_audio_length = match_input_audio_length + + def forward(self, raw_audio, target=None, return_loss_breakdown=False): + """ + einops + + b - batch + f - freq + t - time + s - audio channel (1 for mono, 2 for stereo) + n - number of 'stems' + c - complex (2) + d - feature dimension + """ + + device = raw_audio.device + + if raw_audio.ndim == 2: + raw_audio = rearrange(raw_audio, "b t -> b 1 t") + + batch, channels, raw_audio_length = raw_audio.shape + + istft_length = raw_audio_length if self.match_input_audio_length else None + + assert (not self.stereo and channels == 1) or (self.stereo and channels == 2), ( + "stereo needs to be set to True if passing in audio signal that is stereo (channel dimension of 2). also need to be False if mono (channel dimension of 1)" + ) + + # to stft + + raw_audio, batch_audio_channel_packed_shape = pack_one(raw_audio, "* t") + + stft_window = self.stft_window_fn(device=device) + + stft_repr = torch.stft(raw_audio, **self.stft_kwargs, window=stft_window, return_complex=True) + stft_repr = torch.view_as_real(stft_repr) + + stft_repr = unpack_one(stft_repr, batch_audio_channel_packed_shape, "* f t c") + + # merge stereo / mono into the frequency, with frequency leading dimension, for band splitting + stft_repr = rearrange(stft_repr, "b s f t c -> b (f s) t c") + + # index out all frequencies for all frequency ranges across bands ascending in one go + + batch_arange = torch.arange(batch, device=device)[..., None] + + # account for stereo + + x = stft_repr[batch_arange, self.freq_indices] + + # fold the complex (real and imag) into the frequencies dimension + + x = rearrange(x, "b f t c -> b t (f c)") + + if self.use_torch_checkpoint: + x = checkpoint(self.band_split, x, use_reentrant=False) + else: + x = self.band_split(x) + + # axial / hierarchical attention + + store = [None] * len(self.layers) + for i, transformer_block in enumerate(self.layers): + if len(transformer_block) == 3: + linear_transformer, time_transformer, freq_transformer = transformer_block + + x, ft_ps = pack([x], "b * d") + if self.use_torch_checkpoint: + x = checkpoint(linear_transformer, x, use_reentrant=False) + else: + x = linear_transformer(x) + (x,) = unpack(x, ft_ps, "b * d") + else: + time_transformer, freq_transformer = transformer_block + + if self.skip_connection: + # Sum all previous + for j in range(i): + x = x + store[j] + + x = rearrange(x, "b t f d -> b f t d") + x, ps = pack([x], "* t d") + + if self.use_torch_checkpoint: + x = checkpoint(time_transformer, x, use_reentrant=False) + else: + x = time_transformer(x) + + (x,) = unpack(x, ps, "* t d") + x = rearrange(x, "b f t d -> b t f d") + x, ps = pack([x], "* f d") + + if self.use_torch_checkpoint: + x = checkpoint(freq_transformer, x, use_reentrant=False) + else: + x = freq_transformer(x) + + (x,) = unpack(x, ps, "* f d") + + if self.skip_connection: + store[i] = x + + num_stems = len(self.mask_estimators) + if self.use_torch_checkpoint: + masks = torch.stack([checkpoint(fn, x, use_reentrant=False) for fn in self.mask_estimators], dim=1) + else: + masks = torch.stack([fn(x) for fn in self.mask_estimators], dim=1) + masks = rearrange(masks, "b n t (f c) -> b n f t c", c=2) + + # modulate frequency representation + + stft_repr = rearrange(stft_repr, "b f t c -> b 1 f t c") + + # complex number multiplication + + stft_repr = torch.view_as_complex(stft_repr) + masks = torch.view_as_complex(masks) + + masks = masks.type(stft_repr.dtype) + + # need to average the estimated mask for the overlapped frequencies + + scatter_indices = repeat(self.freq_indices, "f -> b n f t", b=batch, n=num_stems, t=stft_repr.shape[-1]) + + stft_repr_expanded_stems = repeat(stft_repr, "b 1 ... -> b n ...", n=num_stems) + masks_summed = torch.zeros_like(stft_repr_expanded_stems).scatter_add_(2, scatter_indices, masks) + + denom = repeat(self.num_bands_per_freq, "f -> (f r) 1", r=channels) + + masks_averaged = masks_summed / denom.clamp(min=1e-8) + + # modulate stft repr with estimated mask + + stft_repr = stft_repr * masks_averaged + + # istft + + stft_repr = rearrange(stft_repr, "b n (f s) t -> (b n s) f t", s=self.audio_channels) + + recon_audio = torch.istft( + stft_repr, **self.stft_kwargs, window=stft_window, return_complex=False, length=istft_length + ) + + recon_audio = rearrange(recon_audio, "(b n s) t -> b n s t", b=batch, s=self.audio_channels, n=num_stems) + + if num_stems == 1: + recon_audio = rearrange(recon_audio, "b 1 s t -> b s t") + + # if a target is passed in, calculate loss for learning + + if not exists(target): + return recon_audio + + if self.num_stems > 1: + assert target.ndim == 4 and target.shape[1] == self.num_stems + + if target.ndim == 2: + target = rearrange(target, "... t -> ... 1 t") + + target = target[..., : recon_audio.shape[-1]] # protect against lost length on istft + + loss = F.l1_loss(recon_audio, target) + + multi_stft_resolution_loss = 0.0 + + for window_size in self.multi_stft_resolutions_window_sizes: + res_stft_kwargs = dict( + n_fft=max(window_size, self.multi_stft_n_fft), # not sure what n_fft is across multi resolution stft + win_length=window_size, + return_complex=True, + window=self.multi_stft_window_fn(window_size, device=device), + **self.multi_stft_kwargs, + ) + + recon_Y = torch.stft(rearrange(recon_audio, "... s t -> (... s) t"), **res_stft_kwargs) + target_Y = torch.stft(rearrange(target, "... s t -> (... s) t"), **res_stft_kwargs) + + multi_stft_resolution_loss = multi_stft_resolution_loss + F.l1_loss(recon_Y, target_Y) + + weighted_multi_resolution_loss = multi_stft_resolution_loss * self.multi_stft_resolution_loss_weight + + total_loss = loss + weighted_multi_resolution_loss + + if not return_loss_breakdown: + return total_loss + + return total_loss, (loss, multi_stft_resolution_loss) diff --git a/tools/uvr5/bsroformer.py b/tools/uvr5/bsroformer.py new file mode 100644 index 0000000000000000000000000000000000000000..ddcbfa749e663e1f546db448b81cf38be4018860 --- /dev/null +++ b/tools/uvr5/bsroformer.py @@ -0,0 +1,304 @@ +# This code is modified from https://github.com/ZFTurbo/ +import os +import warnings + +import librosa +import numpy as np +import soundfile as sf +import torch +import torch.nn as nn +import yaml +from tqdm import tqdm + +warnings.filterwarnings("ignore") + + +class Roformer_Loader: + def get_config(self, config_path): + with open(config_path, "r", encoding="utf-8") as f: + # use fullloader to load tag !!python/tuple, code can be improved + config = yaml.load(f, Loader=yaml.FullLoader) + return config + + def get_default_config(self): + default_config = None + if self.model_type == "bs_roformer": + # Use model_bs_roformer_ep_368_sdr_12.9628.yaml and model_bs_roformer_ep_317_sdr_12.9755.yaml as default configuration files + # Other BS_Roformer models may not be compatible + # fmt: off + default_config = { + "audio": {"chunk_size": 352800, "sample_rate": 44100}, + "model": { + "dim": 512, + "depth": 12, + "stereo": True, + "num_stems": 1, + "time_transformer_depth": 1, + "freq_transformer_depth": 1, + "linear_transformer_depth": 0, + "freqs_per_bands": (2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 12, 12, 12, 12, 12, 12, 12, 12, 24, 24, 24, 24, 24, 24, 24, 24, 48, 48, 48, 48, 48, 48, 48, 48, 128, 129), + "dim_head": 64, + "heads": 8, + "attn_dropout": 0.1, + "ff_dropout": 0.1, + "flash_attn": True, + "dim_freqs_in": 1025, + "stft_n_fft": 2048, + "stft_hop_length": 441, + "stft_win_length": 2048, + "stft_normalized": False, + "mask_estimator_depth": 2, + "multi_stft_resolution_loss_weight": 1.0, + "multi_stft_resolutions_window_sizes": (4096, 2048, 1024, 512, 256), + "multi_stft_hop_size": 147, + "multi_stft_normalized": False, + }, + "training": {"instruments": ["vocals", "other"], "target_instrument": "vocals"}, + "inference": {"batch_size": 2, "num_overlap": 2}, + } + # fmt: on + elif self.model_type == "mel_band_roformer": + # Use model_mel_band_roformer_ep_3005_sdr_11.4360.yaml as default configuration files + # Other Mel_Band_Roformer models may not be compatible + default_config = { + "audio": {"chunk_size": 352800, "sample_rate": 44100}, + "model": { + "dim": 384, + "depth": 12, + "stereo": True, + "num_stems": 1, + "time_transformer_depth": 1, + "freq_transformer_depth": 1, + "linear_transformer_depth": 0, + "num_bands": 60, + "dim_head": 64, + "heads": 8, + "attn_dropout": 0.1, + "ff_dropout": 0.1, + "flash_attn": True, + "dim_freqs_in": 1025, + "sample_rate": 44100, + "stft_n_fft": 2048, + "stft_hop_length": 441, + "stft_win_length": 2048, + "stft_normalized": False, + "mask_estimator_depth": 2, + "multi_stft_resolution_loss_weight": 1.0, + "multi_stft_resolutions_window_sizes": (4096, 2048, 1024, 512, 256), + "multi_stft_hop_size": 147, + "multi_stft_normalized": False, + }, + "training": {"instruments": ["vocals", "other"], "target_instrument": "vocals"}, + "inference": {"batch_size": 2, "num_overlap": 2}, + } + + return default_config + + def get_model_from_config(self): + if self.model_type == "bs_roformer": + from bs_roformer.bs_roformer import BSRoformer + + model = BSRoformer(**dict(self.config["model"])) + elif self.model_type == "mel_band_roformer": + from bs_roformer.mel_band_roformer import MelBandRoformer + + model = MelBandRoformer(**dict(self.config["model"])) + else: + print("Error: Unknown model: {}".format(self.model_type)) + model = None + return model + + def demix_track(self, model, mix, device): + C = self.config["audio"]["chunk_size"] # chunk_size + N = self.config["inference"]["num_overlap"] + fade_size = C // 10 + step = int(C // N) + border = C - step + batch_size = self.config["inference"]["batch_size"] + + length_init = mix.shape[-1] + progress_bar = tqdm(total=length_init // step + 1, desc="Processing", leave=False) + + # Do pad from the beginning and end to account floating window results better + if length_init > 2 * border and (border > 0): + mix = nn.functional.pad(mix, (border, border), mode="reflect") + + # Prepare windows arrays (do 1 time for speed up). This trick repairs click problems on the edges of segment + window_size = C + fadein = torch.linspace(0, 1, fade_size) + fadeout = torch.linspace(1, 0, fade_size) + window_start = torch.ones(window_size) + window_middle = torch.ones(window_size) + window_finish = torch.ones(window_size) + window_start[-fade_size:] *= fadeout # First audio chunk, no fadein + window_finish[:fade_size] *= fadein # Last audio chunk, no fadeout + window_middle[-fade_size:] *= fadeout + window_middle[:fade_size] *= fadein + + with torch.amp.autocast("cuda"): + with torch.inference_mode(): + if self.config["training"]["target_instrument"] is None: + req_shape = (len(self.config["training"]["instruments"]),) + tuple(mix.shape) + else: + req_shape = (1,) + tuple(mix.shape) + + result = torch.zeros(req_shape, dtype=torch.float32) + counter = torch.zeros(req_shape, dtype=torch.float32) + i = 0 + batch_data = [] + batch_locations = [] + while i < mix.shape[1]: + part = mix[:, i : i + C].to(device) + length = part.shape[-1] + if length < C: + if length > C // 2 + 1: + part = nn.functional.pad(input=part, pad=(0, C - length), mode="reflect") + else: + part = nn.functional.pad(input=part, pad=(0, C - length, 0, 0), mode="constant", value=0) + if self.is_half: + part = part.half() + batch_data.append(part) + batch_locations.append((i, length)) + i += step + progress_bar.update(1) + + if len(batch_data) >= batch_size or (i >= mix.shape[1]): + arr = torch.stack(batch_data, dim=0) + # print(23333333,arr.dtype) + x = model(arr) + + window = window_middle + if i - step == 0: # First audio chunk, no fadein + window = window_start + elif i >= mix.shape[1]: # Last audio chunk, no fadeout + window = window_finish + + for j in range(len(batch_locations)): + start, l = batch_locations[j] + result[..., start : start + l] += x[j][..., :l].cpu() * window[..., :l] + counter[..., start : start + l] += window[..., :l] + + batch_data = [] + batch_locations = [] + + estimated_sources = result / counter + estimated_sources = estimated_sources.cpu().numpy() + np.nan_to_num(estimated_sources, copy=False, nan=0.0) + + if length_init > 2 * border and (border > 0): + # Remove pad + estimated_sources = estimated_sources[..., border:-border] + + progress_bar.close() + + if self.config["training"]["target_instrument"] is None: + return {k: v for k, v in zip(self.config["training"]["instruments"], estimated_sources)} + else: + return {k: v for k, v in zip([self.config["training"]["target_instrument"]], estimated_sources)} + + def run_folder(self, input, vocal_root, others_root, format): + self.model.eval() + path = input + os.makedirs(vocal_root, exist_ok=True) + os.makedirs(others_root, exist_ok=True) + file_base_name = os.path.splitext(os.path.basename(path))[0] + + sample_rate = 44100 + if "sample_rate" in self.config["audio"]: + sample_rate = self.config["audio"]["sample_rate"] + + try: + mix, sr = librosa.load(path, sr=sample_rate, mono=False) + except Exception as e: + print("Can read track: {}".format(path)) + print("Error message: {}".format(str(e))) + return + + # in case if model only supports mono tracks + isstereo = self.config["model"].get("stereo", True) + if not isstereo and len(mix.shape) != 1: + mix = np.mean(mix, axis=0) # if more than 2 channels, take mean + print("Warning: Track has more than 1 channels, but model is mono, taking mean of all channels.") + + mix_orig = mix.copy() + + mixture = torch.tensor(mix, dtype=torch.float32) + res = self.demix_track(self.model, mixture, self.device) + + if self.config["training"]["target_instrument"] is not None: + # if target instrument is specified, save target instrument as vocal and other instruments as others + # other instruments are caculated by subtracting target instrument from mixture + target_instrument = self.config["training"]["target_instrument"] + other_instruments = [i for i in self.config["training"]["instruments"] if i != target_instrument] + other = mix_orig - res[target_instrument] # caculate other instruments + + path_vocal = "{}/{}_{}.wav".format(vocal_root, file_base_name, target_instrument) + path_other = "{}/{}_{}.wav".format(others_root, file_base_name, other_instruments[0]) + self.save_audio(path_vocal, res[target_instrument].T, sr, format) + self.save_audio(path_other, other.T, sr, format) + else: + # if target instrument is not specified, save the first instrument as vocal and the rest as others + vocal_inst = self.config["training"]["instruments"][0] + path_vocal = "{}/{}_{}.wav".format(vocal_root, file_base_name, vocal_inst) + self.save_audio(path_vocal, res[vocal_inst].T, sr, format) + for other in self.config["training"]["instruments"][1:]: # save other instruments + path_other = "{}/{}_{}.wav".format(others_root, file_base_name, other) + self.save_audio(path_other, res[other].T, sr, format) + + def save_audio(self, path, data, sr, format): + # input path should be endwith '.wav' + if format in ["wav", "flac"]: + if format == "flac": + path = path[:-3] + "flac" + sf.write(path, data, sr) + else: + sf.write(path, data, sr) + os.system('ffmpeg -i "{}" -vn "{}" -q:a 2 -y'.format(path, path[:-3] + format)) + try: + os.remove(path) + except: + pass + + def __init__(self, model_path, config_path, device, is_half): + self.device = device + self.is_half = is_half + self.model_type = None + self.config = None + + # get model_type, first try: + if "bs_roformer" in model_path.lower() or "bsroformer" in model_path.lower(): + self.model_type = "bs_roformer" + elif "mel_band_roformer" in model_path.lower() or "melbandroformer" in model_path.lower(): + self.model_type = "mel_band_roformer" + + if not os.path.exists(config_path): + if self.model_type is None: + # if model_type is still None, raise an error + raise ValueError( + "Error: Unknown model type. If you are using a model without a configuration file, Ensure that your model name includes 'bs_roformer', 'bsroformer', 'mel_band_roformer', or 'melbandroformer'. Otherwise, you can manually place the model configuration file into 'tools/uvr5/uvr5w_weights' and ensure that the configuration file is named as '.yaml' then try it again." + ) + self.config = self.get_default_config() + else: + # if there is a configuration file + self.config = self.get_config(config_path) + if self.model_type is None: + # if model_type is still None, second try, get model_type from the configuration file + if "freqs_per_bands" in self.config["model"]: + # if freqs_per_bands in config, it's a bs_roformer model + self.model_type = "bs_roformer" + else: + # else it's a mel_band_roformer model + self.model_type = "mel_band_roformer" + + print("Detected model type: {}".format(self.model_type)) + model = self.get_model_from_config() + state_dict = torch.load(model_path, map_location="cpu") + model.load_state_dict(state_dict) + + if is_half == False: + self.model = model.to(device) + else: + self.model = model.half().to(device) + + def _path_audio_(self, input, others_root, vocal_root, format, is_hp3=False): + self.run_folder(input, vocal_root, others_root, format) diff --git a/tools/uvr5/mdxnet.py b/tools/uvr5/mdxnet.py new file mode 100644 index 0000000000000000000000000000000000000000..e109827343cbcd6c3375e12bc02b15c5f38d2d11 --- /dev/null +++ b/tools/uvr5/mdxnet.py @@ -0,0 +1,223 @@ +import os +import logging + +logger = logging.getLogger(__name__) + +import librosa +import numpy as np +import soundfile as sf +import torch +from tqdm import tqdm + +cpu = torch.device("cpu") + + +class ConvTDFNetTrim: + def __init__(self, device, model_name, target_name, L, dim_f, dim_t, n_fft, hop=1024): + super(ConvTDFNetTrim, self).__init__() + + self.dim_f = dim_f + self.dim_t = 2**dim_t + self.n_fft = n_fft + self.hop = hop + self.n_bins = self.n_fft // 2 + 1 + self.chunk_size = hop * (self.dim_t - 1) + self.window = torch.hann_window(window_length=self.n_fft, periodic=True).to(device) + self.target_name = target_name + self.blender = "blender" in model_name + + self.dim_c = 4 + out_c = self.dim_c * 4 if target_name == "*" else self.dim_c + self.freq_pad = torch.zeros([1, out_c, self.n_bins - self.dim_f, self.dim_t]).to(device) + + self.n = L // 2 + + def stft(self, x): + x = x.reshape([-1, self.chunk_size]) + x = torch.stft( + x, + n_fft=self.n_fft, + hop_length=self.hop, + window=self.window, + center=True, + return_complex=True, + ) + x = torch.view_as_real(x) + x = x.permute([0, 3, 1, 2]) + x = x.reshape([-1, 2, 2, self.n_bins, self.dim_t]).reshape([-1, self.dim_c, self.n_bins, self.dim_t]) + return x[:, :, : self.dim_f] + + def istft(self, x, freq_pad=None): + freq_pad = self.freq_pad.repeat([x.shape[0], 1, 1, 1]) if freq_pad is None else freq_pad + x = torch.cat([x, freq_pad], -2) + c = 4 * 2 if self.target_name == "*" else 2 + x = x.reshape([-1, c, 2, self.n_bins, self.dim_t]).reshape([-1, 2, self.n_bins, self.dim_t]) + x = x.permute([0, 2, 3, 1]) + x = x.contiguous() + x = torch.view_as_complex(x) + x = torch.istft(x, n_fft=self.n_fft, hop_length=self.hop, window=self.window, center=True) + return x.reshape([-1, c, self.chunk_size]) + + +def get_models(device, dim_f, dim_t, n_fft): + return ConvTDFNetTrim( + device=device, + model_name="Conv-TDF", + target_name="vocals", + L=11, + dim_f=dim_f, + dim_t=dim_t, + n_fft=n_fft, + ) + + +class Predictor: + def __init__(self, args): + import onnxruntime as ort + + logger.info(ort.get_available_providers()) + self.args = args + self.model_ = get_models(device=cpu, dim_f=args.dim_f, dim_t=args.dim_t, n_fft=args.n_fft) + self.model = ort.InferenceSession( + os.path.join(args.onnx, self.model_.target_name + ".onnx"), + providers=[ + "CUDAExecutionProvider", + "DmlExecutionProvider", + "CPUExecutionProvider", + ], + ) + logger.info("ONNX load done") + + def demix(self, mix): + samples = mix.shape[-1] + margin = self.args.margin + chunk_size = self.args.chunks * 44100 + assert not margin == 0, "margin cannot be zero!" + if margin > chunk_size: + margin = chunk_size + + segmented_mix = {} + + if self.args.chunks == 0 or samples < chunk_size: + chunk_size = samples + + counter = -1 + for skip in range(0, samples, chunk_size): + counter += 1 + + s_margin = 0 if counter == 0 else margin + end = min(skip + chunk_size + margin, samples) + + start = skip - s_margin + + segmented_mix[skip] = mix[:, start:end].copy() + if end == samples: + break + + sources = self.demix_base(segmented_mix, margin_size=margin) + """ + mix:(2,big_sample) + segmented_mix:offset->(2,small_sample) + sources:(1,2,big_sample) + """ + return sources + + def demix_base(self, mixes, margin_size): + chunked_sources = [] + progress_bar = tqdm(total=len(mixes)) + progress_bar.set_description("Processing") + for mix in mixes: + cmix = mixes[mix] + sources = [] + n_sample = cmix.shape[1] + model = self.model_ + trim = model.n_fft // 2 + gen_size = model.chunk_size - 2 * trim + pad = gen_size - n_sample % gen_size + mix_p = np.concatenate((np.zeros((2, trim)), cmix, np.zeros((2, pad)), np.zeros((2, trim))), 1) + mix_waves = [] + i = 0 + while i < n_sample + pad: + waves = np.array(mix_p[:, i : i + model.chunk_size]) + mix_waves.append(waves) + i += gen_size + mix_waves = torch.tensor(mix_waves, dtype=torch.float32).to(cpu) + with torch.no_grad(): + _ort = self.model + spek = model.stft(mix_waves) + if self.args.denoise: + spec_pred = ( + -_ort.run(None, {"input": -spek.cpu().numpy()})[0] * 0.5 + + _ort.run(None, {"input": spek.cpu().numpy()})[0] * 0.5 + ) + tar_waves = model.istft(torch.tensor(spec_pred)) + else: + tar_waves = model.istft(torch.tensor(_ort.run(None, {"input": spek.cpu().numpy()})[0])) + tar_signal = tar_waves[:, :, trim:-trim].transpose(0, 1).reshape(2, -1).numpy()[:, :-pad] + + start = 0 if mix == 0 else margin_size + end = None if mix == list(mixes.keys())[::-1][0] else -margin_size + if margin_size == 0: + end = None + sources.append(tar_signal[:, start:end]) + + progress_bar.update(1) + + chunked_sources.append(sources) + _sources = np.concatenate(chunked_sources, axis=-1) + # del self.model + progress_bar.close() + return _sources + + def prediction(self, m, vocal_root, others_root, format): + os.makedirs(vocal_root, exist_ok=True) + os.makedirs(others_root, exist_ok=True) + basename = os.path.basename(m) + mix, rate = librosa.load(m, mono=False, sr=44100) + if mix.ndim == 1: + mix = np.asfortranarray([mix, mix]) + mix = mix.T + sources = self.demix(mix.T) + opt = sources[0].T + if format in ["wav", "flac"]: + sf.write("%s/%s_main_vocal.%s" % (vocal_root, basename, format), mix - opt, rate) + sf.write("%s/%s_others.%s" % (others_root, basename, format), opt, rate) + else: + path_vocal = "%s/%s_main_vocal.wav" % (vocal_root, basename) + path_other = "%s/%s_others.wav" % (others_root, basename) + sf.write(path_vocal, mix - opt, rate) + sf.write(path_other, opt, rate) + opt_path_vocal = path_vocal[:-4] + ".%s" % format + opt_path_other = path_other[:-4] + ".%s" % format + if os.path.exists(path_vocal): + os.system("ffmpeg -i '%s' -vn '%s' -q:a 2 -y" % (path_vocal, opt_path_vocal)) + if os.path.exists(opt_path_vocal): + try: + os.remove(path_vocal) + except: + pass + if os.path.exists(path_other): + os.system("ffmpeg -i '%s' -vn '%s' -q:a 2 -y" % (path_other, opt_path_other)) + if os.path.exists(opt_path_other): + try: + os.remove(path_other) + except: + pass + + +class MDXNetDereverb: + def __init__(self, chunks): + self.onnx = "%s/uvr5_weights/onnx_dereverb_By_FoxJoy" % os.path.dirname(os.path.abspath(__file__)) + self.shifts = 10 # 'Predict with randomised equivariant stabilisation' + self.mixing = "min_mag" # ['default','min_mag','max_mag'] + self.chunks = chunks + self.margin = 44100 + self.dim_t = 9 + self.dim_f = 3072 + self.n_fft = 6144 + self.denoise = True + self.pred = Predictor(self) + self.device = cpu + + def _path_audio_(self, input, others_root, vocal_root, format, is_hp3=False): + self.pred.prediction(input, vocal_root, others_root, format) diff --git a/tools/uvr5/uvr5_weights/.gitignore b/tools/uvr5/uvr5_weights/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..d6b7ef32c8478a48c3994dcadc86837f4371184d --- /dev/null +++ b/tools/uvr5/uvr5_weights/.gitignore @@ -0,0 +1,2 @@ +* +!.gitignore diff --git a/tools/uvr5/vr.py b/tools/uvr5/vr.py new file mode 100644 index 0000000000000000000000000000000000000000..4ca8a3b780c4870861166a475145e4b120c65d3d --- /dev/null +++ b/tools/uvr5/vr.py @@ -0,0 +1,342 @@ +import os + +parent_directory = os.path.dirname(os.path.abspath(__file__)) +import logging + +logger = logging.getLogger(__name__) + +import librosa +import numpy as np +import soundfile as sf +import torch +from lib.lib_v5 import nets_61968KB as Nets +from lib.lib_v5 import spec_utils +from lib.lib_v5.model_param_init import ModelParameters +from lib.lib_v5.nets_new import CascadedNet +from lib.utils import inference + + +class AudioPre: + def __init__(self, agg, model_path, device, is_half, tta=False): + self.model_path = model_path + self.device = device + self.data = { + # Processing Options + "postprocess": False, + "tta": tta, + # Constants + "window_size": 512, + "agg": agg, + "high_end_process": "mirroring", + } + mp = ModelParameters("%s/lib/lib_v5/modelparams/4band_v2.json" % parent_directory) + model = Nets.CascadedASPPNet(mp.param["bins"] * 2) + cpk = torch.load(model_path, map_location="cpu") + model.load_state_dict(cpk) + model.eval() + if is_half: + model = model.half().to(device) + else: + model = model.to(device) + + self.mp = mp + self.model = model + + def _path_audio_(self, music_file, ins_root=None, vocal_root=None, format="flac", is_hp3=False): + if ins_root is None and vocal_root is None: + return "No save root." + name = os.path.basename(music_file) + if ins_root is not None: + os.makedirs(ins_root, exist_ok=True) + if vocal_root is not None: + os.makedirs(vocal_root, exist_ok=True) + X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {} + bands_n = len(self.mp.param["band"]) + # print(bands_n) + for d in range(bands_n, 0, -1): + bp = self.mp.param["band"][d] + if d == bands_n: # high-end band + ( + X_wave[d], + _, + ) = librosa.core.load( # 理论上librosa读取可能对某些音频有bug,应该上ffmpeg读取,但是太麻烦了弃坑 + music_file, + sr=bp["sr"], + mono=False, + dtype=np.float32, + res_type=bp["res_type"], + ) + if X_wave[d].ndim == 1: + X_wave[d] = np.asfortranarray([X_wave[d], X_wave[d]]) + else: # lower bands + X_wave[d] = librosa.core.resample( + X_wave[d + 1], + orig_sr=self.mp.param["band"][d + 1]["sr"], + target_sr=bp["sr"], + res_type=bp["res_type"], + ) + # Stft of wave source + X_spec_s[d] = spec_utils.wave_to_spectrogram_mt( + X_wave[d], + bp["hl"], + bp["n_fft"], + self.mp.param["mid_side"], + self.mp.param["mid_side_b2"], + self.mp.param["reverse"], + ) + # pdb.set_trace() + if d == bands_n and self.data["high_end_process"] != "none": + input_high_end_h = (bp["n_fft"] // 2 - bp["crop_stop"]) + ( + self.mp.param["pre_filter_stop"] - self.mp.param["pre_filter_start"] + ) + input_high_end = X_spec_s[d][:, bp["n_fft"] // 2 - input_high_end_h : bp["n_fft"] // 2, :] + + X_spec_m = spec_utils.combine_spectrograms(X_spec_s, self.mp) + aggresive_set = float(self.data["agg"] / 100) + aggressiveness = { + "value": aggresive_set, + "split_bin": self.mp.param["band"][1]["crop_stop"], + } + with torch.no_grad(): + pred, X_mag, X_phase = inference(X_spec_m, self.device, self.model, aggressiveness, self.data) + # Postprocess + if self.data["postprocess"]: + pred_inv = np.clip(X_mag - pred, 0, np.inf) + pred = spec_utils.mask_silence(pred, pred_inv) + y_spec_m = pred * X_phase + v_spec_m = X_spec_m - y_spec_m + + if is_hp3 == True: + ins_root, vocal_root = vocal_root, ins_root + + if ins_root is not None: + if self.data["high_end_process"].startswith("mirroring"): + input_high_end_ = spec_utils.mirroring(self.data["high_end_process"], y_spec_m, input_high_end, self.mp) + wav_instrument = spec_utils.cmb_spectrogram_to_wave( + y_spec_m, self.mp, input_high_end_h, input_high_end_ + ) + else: + wav_instrument = spec_utils.cmb_spectrogram_to_wave(y_spec_m, self.mp) + logger.info("%s instruments done" % name) + if is_hp3 == True: + head = "vocal_" + else: + head = "instrument_" + if format in ["wav", "flac"]: + sf.write( + os.path.join( + ins_root, + head + "{}_{}.{}".format(name, self.data["agg"], format), + ), + (np.array(wav_instrument) * 32768).astype("int16"), + self.mp.param["sr"], + ) # + else: + path = os.path.join(ins_root, head + "{}_{}.wav".format(name, self.data["agg"])) + sf.write( + path, + (np.array(wav_instrument) * 32768).astype("int16"), + self.mp.param["sr"], + ) + if os.path.exists(path): + opt_format_path = path[:-4] + ".%s" % format + os.system("ffmpeg -i %s -vn %s -q:a 2 -y" % (path, opt_format_path)) + if os.path.exists(opt_format_path): + try: + os.remove(path) + except: + pass + if vocal_root is not None: + if is_hp3 == True: + head = "instrument_" + else: + head = "vocal_" + if self.data["high_end_process"].startswith("mirroring"): + input_high_end_ = spec_utils.mirroring(self.data["high_end_process"], v_spec_m, input_high_end, self.mp) + wav_vocals = spec_utils.cmb_spectrogram_to_wave(v_spec_m, self.mp, input_high_end_h, input_high_end_) + else: + wav_vocals = spec_utils.cmb_spectrogram_to_wave(v_spec_m, self.mp) + logger.info("%s vocals done" % name) + if format in ["wav", "flac"]: + sf.write( + os.path.join( + vocal_root, + head + "{}_{}.{}".format(name, self.data["agg"], format), + ), + (np.array(wav_vocals) * 32768).astype("int16"), + self.mp.param["sr"], + ) + else: + path = os.path.join(vocal_root, head + "{}_{}.wav".format(name, self.data["agg"])) + sf.write( + path, + (np.array(wav_vocals) * 32768).astype("int16"), + self.mp.param["sr"], + ) + if os.path.exists(path): + opt_format_path = path[:-4] + ".%s" % format + os.system("ffmpeg -i %s -vn %s -q:a 2 -y" % (path, opt_format_path)) + if os.path.exists(opt_format_path): + try: + os.remove(path) + except: + pass + + +class AudioPreDeEcho: + def __init__(self, agg, model_path, device, is_half, tta=False): + self.model_path = model_path + self.device = device + self.data = { + # Processing Options + "postprocess": False, + "tta": tta, + # Constants + "window_size": 512, + "agg": agg, + "high_end_process": "mirroring", + } + mp = ModelParameters("%s/lib/lib_v5/modelparams/4band_v3.json" % parent_directory) + nout = 64 if "DeReverb" in model_path else 48 + model = CascadedNet(mp.param["bins"] * 2, nout) + cpk = torch.load(model_path, map_location="cpu") + model.load_state_dict(cpk) + model.eval() + if is_half: + model = model.half().to(device) + else: + model = model.to(device) + + self.mp = mp + self.model = model + + def _path_audio_( + self, music_file, vocal_root=None, ins_root=None, format="flac", is_hp3=False + ): # 3个VR模型vocal和ins是反的 + if ins_root is None and vocal_root is None: + return "No save root." + name = os.path.basename(music_file) + if ins_root is not None: + os.makedirs(ins_root, exist_ok=True) + if vocal_root is not None: + os.makedirs(vocal_root, exist_ok=True) + X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {} + bands_n = len(self.mp.param["band"]) + # print(bands_n) + for d in range(bands_n, 0, -1): + bp = self.mp.param["band"][d] + if d == bands_n: # high-end band + ( + X_wave[d], + _, + ) = librosa.core.load( # 理论上librosa读取可能对某些音频有bug,应该上ffmpeg读取,但是太麻烦了弃坑 + music_file, + sr=bp["sr"], + mono=False, + dtype=np.float32, + res_type=bp["res_type"], + ) + if X_wave[d].ndim == 1: + X_wave[d] = np.asfortranarray([X_wave[d], X_wave[d]]) + else: # lower bands + X_wave[d] = librosa.core.resample( + X_wave[d + 1], + orig_sr=self.mp.param["band"][d + 1]["sr"], + target_sr=bp["sr"], + res_type=bp["res_type"], + ) + # Stft of wave source + X_spec_s[d] = spec_utils.wave_to_spectrogram_mt( + X_wave[d], + bp["hl"], + bp["n_fft"], + self.mp.param["mid_side"], + self.mp.param["mid_side_b2"], + self.mp.param["reverse"], + ) + # pdb.set_trace() + if d == bands_n and self.data["high_end_process"] != "none": + input_high_end_h = (bp["n_fft"] // 2 - bp["crop_stop"]) + ( + self.mp.param["pre_filter_stop"] - self.mp.param["pre_filter_start"] + ) + input_high_end = X_spec_s[d][:, bp["n_fft"] // 2 - input_high_end_h : bp["n_fft"] // 2, :] + + X_spec_m = spec_utils.combine_spectrograms(X_spec_s, self.mp) + aggresive_set = float(self.data["agg"] / 100) + aggressiveness = { + "value": aggresive_set, + "split_bin": self.mp.param["band"][1]["crop_stop"], + } + with torch.no_grad(): + pred, X_mag, X_phase = inference(X_spec_m, self.device, self.model, aggressiveness, self.data) + # Postprocess + if self.data["postprocess"]: + pred_inv = np.clip(X_mag - pred, 0, np.inf) + pred = spec_utils.mask_silence(pred, pred_inv) + y_spec_m = pred * X_phase + v_spec_m = X_spec_m - y_spec_m + + if ins_root is not None: + if self.data["high_end_process"].startswith("mirroring"): + input_high_end_ = spec_utils.mirroring(self.data["high_end_process"], y_spec_m, input_high_end, self.mp) + wav_instrument = spec_utils.cmb_spectrogram_to_wave( + y_spec_m, self.mp, input_high_end_h, input_high_end_ + ) + else: + wav_instrument = spec_utils.cmb_spectrogram_to_wave(y_spec_m, self.mp) + logger.info("%s instruments done" % name) + if format in ["wav", "flac"]: + sf.write( + os.path.join( + ins_root, + "vocal_{}_{}.{}".format(name, self.data["agg"], format), + ), + (np.array(wav_instrument) * 32768).astype("int16"), + self.mp.param["sr"], + ) # + else: + path = os.path.join(ins_root, "vocal_{}_{}.wav".format(name, self.data["agg"])) + sf.write( + path, + (np.array(wav_instrument) * 32768).astype("int16"), + self.mp.param["sr"], + ) + if os.path.exists(path): + opt_format_path = path[:-4] + ".%s" % format + os.system("ffmpeg -i %s -vn %s -q:a 2 -y" % (path, opt_format_path)) + if os.path.exists(opt_format_path): + try: + os.remove(path) + except: + pass + if vocal_root is not None: + if self.data["high_end_process"].startswith("mirroring"): + input_high_end_ = spec_utils.mirroring(self.data["high_end_process"], v_spec_m, input_high_end, self.mp) + wav_vocals = spec_utils.cmb_spectrogram_to_wave(v_spec_m, self.mp, input_high_end_h, input_high_end_) + else: + wav_vocals = spec_utils.cmb_spectrogram_to_wave(v_spec_m, self.mp) + logger.info("%s vocals done" % name) + if format in ["wav", "flac"]: + sf.write( + os.path.join( + vocal_root, + "instrument_{}_{}.{}".format(name, self.data["agg"], format), + ), + (np.array(wav_vocals) * 32768).astype("int16"), + self.mp.param["sr"], + ) + else: + path = os.path.join(vocal_root, "instrument_{}_{}.wav".format(name, self.data["agg"])) + sf.write( + path, + (np.array(wav_vocals) * 32768).astype("int16"), + self.mp.param["sr"], + ) + if os.path.exists(path): + opt_format_path = path[:-4] + ".%s" % format + os.system("ffmpeg -i %s -vn %s -q:a 2 -y" % (path, opt_format_path)) + if os.path.exists(opt_format_path): + try: + os.remove(path) + except: + pass diff --git a/tools/uvr5/webui.py b/tools/uvr5/webui.py new file mode 100644 index 0000000000000000000000000000000000000000..a3d7fe468d4dd6cfabd5852f8386ac58e265af01 --- /dev/null +++ b/tools/uvr5/webui.py @@ -0,0 +1,228 @@ +import os +import traceback +import gradio as gr +import logging +from tools.i18n.i18n import I18nAuto +from tools.my_utils import clean_path + +i18n = I18nAuto() + +logger = logging.getLogger(__name__) +import ffmpeg +import torch +import sys +from mdxnet import MDXNetDereverb +from vr import AudioPre, AudioPreDeEcho +from bsroformer import Roformer_Loader + +try: + import gradio.analytics as analytics + + analytics.version_check = lambda: None +except: + ... + +weight_uvr5_root = "tools/uvr5/uvr5_weights" +uvr5_names = [] +for name in os.listdir(weight_uvr5_root): + if name.endswith(".pth") or name.endswith(".ckpt") or "onnx" in name: + uvr5_names.append(name.replace(".pth", "").replace(".ckpt", "")) + +device = sys.argv[1] +is_half = eval(sys.argv[2]) +webui_port_uvr5 = int(sys.argv[3]) +is_share = eval(sys.argv[4]) + + +def html_left(text, label="p"): + return f"""
+ <{label} style="margin: 0; padding: 0;">{text} +
""" + + +def html_center(text, label="p"): + return f"""
+ <{label} style="margin: 0; padding: 0;">{text} +
""" + + +def uvr(model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format0): + infos = [] + try: + inp_root = clean_path(inp_root) + save_root_vocal = clean_path(save_root_vocal) + save_root_ins = clean_path(save_root_ins) + is_hp3 = "HP3" in model_name + if model_name == "onnx_dereverb_By_FoxJoy": + pre_fun = MDXNetDereverb(15) + elif "roformer" in model_name.lower(): + func = Roformer_Loader + pre_fun = func( + model_path=os.path.join(weight_uvr5_root, model_name + ".ckpt"), + config_path=os.path.join(weight_uvr5_root, model_name + ".yaml"), + device=device, + is_half=is_half, + ) + if not os.path.exists(os.path.join(weight_uvr5_root, model_name + ".yaml")): + infos.append( + "Warning: You are using a model without a configuration file. The program will automatically use the default configuration file. However, the default configuration file cannot guarantee that all models will run successfully. You can manually place the model configuration file into 'tools/uvr5/uvr5w_weights' and ensure that the configuration file is named as '.yaml' then try it again. (For example, the configuration file corresponding to the model 'bs_roformer_ep_368_sdr_12.9628.ckpt' should be 'bs_roformer_ep_368_sdr_12.9628.yaml'.) Or you can just ignore this warning." + ) + yield "\n".join(infos) + else: + func = AudioPre if "DeEcho" not in model_name else AudioPreDeEcho + pre_fun = func( + agg=int(agg), + model_path=os.path.join(weight_uvr5_root, model_name + ".pth"), + device=device, + is_half=is_half, + ) + if inp_root != "": + paths = [os.path.join(inp_root, name) for name in os.listdir(inp_root)] + else: + paths = [path.name for path in paths] + for path in paths: + inp_path = os.path.join(inp_root, path) + if os.path.isfile(inp_path) == False: + continue + need_reformat = 1 + done = 0 + try: + info = ffmpeg.probe(inp_path, cmd="ffprobe") + if info["streams"][0]["channels"] == 2 and info["streams"][0]["sample_rate"] == "44100": + need_reformat = 0 + pre_fun._path_audio_(inp_path, save_root_ins, save_root_vocal, format0, is_hp3) + done = 1 + except: + need_reformat = 1 + traceback.print_exc() + if need_reformat == 1: + tmp_path = "%s/%s.reformatted.wav" % ( + os.path.join(os.environ["TEMP"]), + os.path.basename(inp_path), + ) + os.system(f'ffmpeg -i "{inp_path}" -vn -acodec pcm_s16le -ac 2 -ar 44100 "{tmp_path}" -y') + inp_path = tmp_path + try: + if done == 0: + pre_fun._path_audio_(inp_path, save_root_ins, save_root_vocal, format0, is_hp3) + infos.append("%s->Success" % (os.path.basename(inp_path))) + yield "\n".join(infos) + except: + infos.append("%s->%s" % (os.path.basename(inp_path), traceback.format_exc())) + yield "\n".join(infos) + except: + infos.append(traceback.format_exc()) + yield "\n".join(infos) + finally: + try: + if model_name == "onnx_dereverb_By_FoxJoy": + del pre_fun.pred.model + del pre_fun.pred.model_ + else: + del pre_fun.model + del pre_fun + except: + traceback.print_exc() + print("clean_empty_cache") + if torch.cuda.is_available(): + torch.cuda.empty_cache() + yield "\n".join(infos) + + +with gr.Blocks(title="UVR5 WebUI") as app: + gr.Markdown( + value=i18n("本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.") + + "
" + + i18n("如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.") + ) + with gr.Group(): + gr.Markdown(html_center(i18n("伴奏人声分离&去混响&去回声"), "h2")) + with gr.Group(): + gr.Markdown( + value=html_left( + i18n("人声伴奏分离批量处理, 使用UVR5模型。") + + "
" + + i18n( + "合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。" + ) + + "
" + + i18n("模型分为三类:") + + "
" + + i18n( + "1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点;" + ) + + "
" + + i18n("2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型;") + + "
" + + i18n("3、去混响、去延迟模型(by FoxJoy):") + + "
  " + + i18n("(1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;") + + "
 " + + i18n( + "(234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。" + ) + + "
" + + i18n("去混响/去延迟,附:") + + "
" + + i18n("1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;") + + "
" + + i18n("2、MDX-Net-Dereverb模型挺慢的;") + + "
" + + i18n("3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。"), + "h4", + ) + ) + with gr.Row(): + with gr.Column(): + model_choose = gr.Dropdown(label=i18n("模型"), choices=uvr5_names) + dir_wav_input = gr.Textbox( + label=i18n("输入待处理音频文件夹路径"), + placeholder="C:\\Users\\Desktop\\todo-songs", + ) + wav_inputs = gr.File( + file_count="multiple", label=i18n("也可批量输入音频文件, 二选一, 优先读文件夹") + ) + with gr.Column(): + agg = gr.Slider( + minimum=0, + maximum=20, + step=1, + label=i18n("人声提取激进程度"), + value=10, + interactive=True, + visible=False, # 先不开放调整 + ) + opt_vocal_root = gr.Textbox(label=i18n("指定输出主人声文件夹"), value="output/uvr5_opt") + opt_ins_root = gr.Textbox(label=i18n("指定输出非主人声文件夹"), value="output/uvr5_opt") + format0 = gr.Radio( + label=i18n("导出文件格式"), + choices=["wav", "flac", "mp3", "m4a"], + value="flac", + interactive=True, + ) + with gr.Column(): + with gr.Row(): + but2 = gr.Button(i18n("转换"), variant="primary") + with gr.Row(): + vc_output4 = gr.Textbox(label=i18n("输出信息"), lines=3) + but2.click( + uvr, + [ + model_choose, + dir_wav_input, + opt_vocal_root, + wav_inputs, + opt_ins_root, + agg, + format0, + ], + [vc_output4], + api_name="uvr_convert", + ) +app.queue().launch( # concurrency_count=511, max_size=1022 + server_name="0.0.0.0", + inbrowser=True, + share=is_share, + server_port=webui_port_uvr5, + # quiet=True, +)