Plachta commited on
Commit
ac28dc4
1 Parent(s): f32cd36

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +194 -194
app.py CHANGED
@@ -1,195 +1,195 @@
1
- import gradio as gr
2
- import torch
3
- import torchaudio
4
- import librosa
5
- from modules.commons import build_model, load_checkpoint, recursive_munch
6
- import yaml
7
- from hf_utils import load_custom_model_from_hf
8
-
9
- # Load model and configuration
10
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
11
-
12
- dit_checkpoint_path, dit_config_path = load_custom_model_from_hf("Plachta/Seed-VC",
13
- "DiT_step_298000_seed_uvit_facodec_small_wavenet_pruned.pth",
14
- "config_dit_mel_seed_facodec_small_wavenet.yml")
15
-
16
- config = yaml.safe_load(open(dit_config_path, 'r'))
17
- model_params = recursive_munch(config['model_params'])
18
- model = build_model(model_params, stage='DiT')
19
- hop_length = config['preprocess_params']['spect_params']['hop_length']
20
- sr = config['preprocess_params']['sr']
21
-
22
- # Load checkpoints
23
- model, _, _, _ = load_checkpoint(model, None, dit_checkpoint_path,
24
- load_only_params=True, ignore_modules=[], is_distributed=False)
25
- for key in model:
26
- model[key].eval()
27
- model[key].to(device)
28
- model.cfm.estimator.setup_caches(max_batch_size=1, max_seq_length=8192)
29
-
30
- # Load additional modules
31
- from modules.campplus.DTDNN import CAMPPlus
32
-
33
- campplus_model = CAMPPlus(feat_dim=80, embedding_size=192)
34
- campplus_model.load_state_dict(torch.load(config['model_params']['style_encoder']['campplus_path']))
35
- campplus_model.eval()
36
- campplus_model.to(device)
37
-
38
- from modules.hifigan.generator import HiFTGenerator
39
- from modules.hifigan.f0_predictor import ConvRNNF0Predictor
40
-
41
- hift_checkpoint_path, hift_config_path = load_custom_model_from_hf("Plachta/Seed-VC",
42
- "hift.pt",
43
- "hifigan.yml")
44
- hift_config = yaml.safe_load(open(hift_config_path, 'r'))
45
- hift_gen = HiFTGenerator(**hift_config['hift'], f0_predictor=ConvRNNF0Predictor(**hift_config['f0_predictor']))
46
- hift_gen.load_state_dict(torch.load(hift_checkpoint_path, map_location='cpu'))
47
- hift_gen.eval()
48
- hift_gen.to(device)
49
-
50
- speech_tokenizer_type = config['model_params']['speech_tokenizer'].get('type', 'cosyvoice')
51
- if speech_tokenizer_type == 'cosyvoice':
52
- from modules.cosyvoice_tokenizer.frontend import CosyVoiceFrontEnd
53
- speech_tokenizer_path = load_custom_model_from_hf("Plachta/Seed-VC", "speech_tokenizer_v1.onnx", None)
54
- cosyvoice_frontend = CosyVoiceFrontEnd(speech_tokenizer_model=speech_tokenizer_path,
55
- device='cuda', device_id=0)
56
- elif speech_tokenizer_type == 'facodec':
57
- ckpt_path, config_path = load_custom_model_from_hf("Plachta/FAcodec", 'pytorch_model.bin', 'config.yml')
58
-
59
- codec_config = yaml.safe_load(open(config_path))
60
- codec_model_params = recursive_munch(codec_config['model_params'])
61
- codec_encoder = build_model(codec_model_params, stage="codec")
62
-
63
- ckpt_params = torch.load(ckpt_path, map_location="cpu")
64
-
65
- for key in codec_encoder:
66
- codec_encoder[key].load_state_dict(ckpt_params[key], strict=False)
67
- _ = [codec_encoder[key].eval() for key in codec_encoder]
68
- _ = [codec_encoder[key].to(device) for key in codec_encoder]
69
- # Generate mel spectrograms
70
- mel_fn_args = {
71
- "n_fft": config['preprocess_params']['spect_params']['n_fft'],
72
- "win_size": config['preprocess_params']['spect_params']['win_length'],
73
- "hop_size": config['preprocess_params']['spect_params']['hop_length'],
74
- "num_mels": config['preprocess_params']['spect_params']['n_mels'],
75
- "sampling_rate": sr,
76
- "fmin": 0,
77
- "fmax": 8000,
78
- "center": False
79
- }
80
- from modules.audio import mel_spectrogram
81
-
82
- to_mel = lambda x: mel_spectrogram(x, **mel_fn_args)
83
-
84
- @torch.no_grad()
85
- @torch.inference_mode()
86
- def voice_conversion(source, target, diffusion_steps, length_adjust, inference_cfg_rate, n_quantizers):
87
- # Load audio
88
- source_audio = librosa.load(source, sr=sr)[0]
89
- ref_audio = librosa.load(target, sr=sr)[0]
90
- # source_sr, source_audio = source
91
- # ref_sr, ref_audio = target
92
- # # if any of the inputs has 2 channels, take the first only
93
- # if source_audio.ndim == 2:
94
- # source_audio = source_audio[:, 0]
95
- # if ref_audio.ndim == 2:
96
- # ref_audio = ref_audio[:, 0]
97
- #
98
- # source_audio, ref_audio = source_audio / 32768.0, ref_audio / 32768.0
99
- #
100
- # # if source or audio sr not equal to default sr, resample
101
- # if source_sr != sr:
102
- # source_audio = librosa.resample(source_audio, source_sr, sr)
103
- # if ref_sr != sr:
104
- # ref_audio = librosa.resample(ref_audio, ref_sr, sr)
105
-
106
- # Process audio
107
- source_audio = torch.tensor(source_audio[:sr * 30]).unsqueeze(0).float().to(device)
108
- ref_audio = torch.tensor(ref_audio[:sr * 30]).unsqueeze(0).float().to(device)
109
-
110
- # Resample
111
- source_waves_16k = torchaudio.functional.resample(source_audio, sr, 16000)
112
- ref_waves_16k = torchaudio.functional.resample(ref_audio, sr, 16000)
113
-
114
- # Extract features
115
- if speech_tokenizer_type == 'cosyvoice':
116
- S_alt = cosyvoice_frontend.extract_speech_token(source_waves_16k)[0]
117
- S_ori = cosyvoice_frontend.extract_speech_token(ref_waves_16k)[0]
118
- elif speech_tokenizer_type == 'facodec':
119
- converted_waves_24k = torchaudio.functional.resample(source_audio, sr, 24000)
120
- wave_lengths_24k = torch.LongTensor([converted_waves_24k.size(1)]).to(converted_waves_24k.device)
121
- waves_input = converted_waves_24k.unsqueeze(1)
122
- z = codec_encoder.encoder(waves_input)
123
- (
124
- quantized,
125
- codes
126
- ) = codec_encoder.quantizer(
127
- z,
128
- waves_input,
129
- )
130
- S_alt = torch.cat([codes[1], codes[0]], dim=1)
131
-
132
- # S_ori should be extracted in the same way
133
- waves_24k = torchaudio.functional.resample(ref_audio, sr, 24000)
134
- waves_input = waves_24k.unsqueeze(1)
135
- z = codec_encoder.encoder(waves_input)
136
- (
137
- quantized,
138
- codes
139
- ) = codec_encoder.quantizer(
140
- z,
141
- waves_input,
142
- )
143
- S_ori = torch.cat([codes[1], codes[0]], dim=1)
144
-
145
- mel = to_mel(source_audio.to(device).float())
146
- mel2 = to_mel(ref_audio.to(device).float())
147
-
148
- target_lengths = torch.LongTensor([int(mel.size(2) * length_adjust)]).to(mel.device)
149
- target2_lengths = torch.LongTensor([mel2.size(2)]).to(mel2.device)
150
-
151
- feat2 = torchaudio.compliance.kaldi.fbank(ref_waves_16k,
152
- num_mel_bins=80,
153
- dither=0,
154
- sample_frequency=16000)
155
- feat2 = feat2 - feat2.mean(dim=0, keepdim=True)
156
- style2 = campplus_model(feat2.unsqueeze(0))
157
-
158
- # Length regulation
159
- cond = model.length_regulator(S_alt, ylens=target_lengths, n_quantizers=int(n_quantizers))[0]
160
- prompt_condition = model.length_regulator(S_ori, ylens=target2_lengths, n_quantizers=int(n_quantizers))[0]
161
- cat_condition = torch.cat([prompt_condition, cond], dim=1)
162
-
163
- # Voice Conversion
164
- vc_target = model.cfm.inference(cat_condition, torch.LongTensor([cat_condition.size(1)]).to(mel2.device),
165
- mel2, style2, None, diffusion_steps, inference_cfg_rate=inference_cfg_rate)
166
- vc_target = vc_target[:, :, mel2.size(-1):]
167
-
168
- # Convert to waveform
169
- vc_wave = hift_gen.inference(vc_target)
170
-
171
- return sr, vc_wave.squeeze(0).cpu().numpy()
172
-
173
-
174
- if __name__ == "__main__":
175
- description = "Zero-shot voice conversion with in-context learning. Check out our [GitHub repository](https://github.com/Plachtaa/seed-vc) for details and updates."
176
- inputs = [
177
- gr.Audio(type="filepath", label="Source Audio"),
178
- gr.Audio(type="filepath", label="Reference Audio"),
179
- gr.Slider(minimum=1, maximum=200, value=10, step=1, label="Diffusion Steps", info="10 by default, 50~100 for best quality"),
180
- gr.Slider(minimum=0.5, maximum=2.0, step=0.1, value=1.0, label="Length Adjust", info="<1.0 for speed-up speech, >1.0 for slow-down speech"),
181
- gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=0.7, label="Inference CFG Rate", info="has subtle influence"),
182
- gr.Slider(minimum=1, maximum=3, step=1, value=3, label="N Quantizers", info="the less quantizer used, the less prosody of source audio is preserved"),
183
- ]
184
-
185
- examples = [["examples/source/yae_0.wav", "examples/reference/dingzhen_0.wav", 50, 1.0, 0.7, 1]]
186
-
187
- outputs = gr.Audio(label="Output Audio")
188
-
189
- gr.Interface(fn=voice_conversion,
190
- description=description,
191
- inputs=inputs,
192
- outputs=outputs,
193
- title="Seed Voice Conversion",
194
- examples=examples,
195
  ).launch()
 
1
+ import gradio as gr
2
+ import torch
3
+ import torchaudio
4
+ import librosa
5
+ from modules.commons import build_model, load_checkpoint, recursive_munch
6
+ import yaml
7
+ from hf_utils import load_custom_model_from_hf
8
+
9
+ # Load model and configuration
10
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
11
+
12
+ dit_checkpoint_path, dit_config_path = load_custom_model_from_hf("Plachta/Seed-VC",
13
+ "DiT_step_298000_seed_uvit_facodec_small_wavenet_pruned.pth",
14
+ "config_dit_mel_seed_facodec_small_wavenet.yml")
15
+
16
+ config = yaml.safe_load(open(dit_config_path, 'r'))
17
+ model_params = recursive_munch(config['model_params'])
18
+ model = build_model(model_params, stage='DiT')
19
+ hop_length = config['preprocess_params']['spect_params']['hop_length']
20
+ sr = config['preprocess_params']['sr']
21
+
22
+ # Load checkpoints
23
+ model, _, _, _ = load_checkpoint(model, None, dit_checkpoint_path,
24
+ load_only_params=True, ignore_modules=[], is_distributed=False)
25
+ for key in model:
26
+ model[key].eval()
27
+ model[key].to(device)
28
+ model.cfm.estimator.setup_caches(max_batch_size=1, max_seq_length=8192)
29
+
30
+ # Load additional modules
31
+ from modules.campplus.DTDNN import CAMPPlus
32
+
33
+ campplus_model = CAMPPlus(feat_dim=80, embedding_size=192)
34
+ campplus_model.load_state_dict(torch.load(config['model_params']['style_encoder']['campplus_path']), map_location='cpu')
35
+ campplus_model.eval()
36
+ campplus_model.to(device)
37
+
38
+ from modules.hifigan.generator import HiFTGenerator
39
+ from modules.hifigan.f0_predictor import ConvRNNF0Predictor
40
+
41
+ hift_checkpoint_path, hift_config_path = load_custom_model_from_hf("Plachta/Seed-VC",
42
+ "hift.pt",
43
+ "hifigan.yml")
44
+ hift_config = yaml.safe_load(open(hift_config_path, 'r'))
45
+ hift_gen = HiFTGenerator(**hift_config['hift'], f0_predictor=ConvRNNF0Predictor(**hift_config['f0_predictor']))
46
+ hift_gen.load_state_dict(torch.load(hift_checkpoint_path, map_location='cpu'))
47
+ hift_gen.eval()
48
+ hift_gen.to(device)
49
+
50
+ speech_tokenizer_type = config['model_params']['speech_tokenizer'].get('type', 'cosyvoice')
51
+ if speech_tokenizer_type == 'cosyvoice':
52
+ from modules.cosyvoice_tokenizer.frontend import CosyVoiceFrontEnd
53
+ speech_tokenizer_path = load_custom_model_from_hf("Plachta/Seed-VC", "speech_tokenizer_v1.onnx", None)
54
+ cosyvoice_frontend = CosyVoiceFrontEnd(speech_tokenizer_model=speech_tokenizer_path,
55
+ device='cuda', device_id=0)
56
+ elif speech_tokenizer_type == 'facodec':
57
+ ckpt_path, config_path = load_custom_model_from_hf("Plachta/FAcodec", 'pytorch_model.bin', 'config.yml')
58
+
59
+ codec_config = yaml.safe_load(open(config_path))
60
+ codec_model_params = recursive_munch(codec_config['model_params'])
61
+ codec_encoder = build_model(codec_model_params, stage="codec")
62
+
63
+ ckpt_params = torch.load(ckpt_path, map_location="cpu")
64
+
65
+ for key in codec_encoder:
66
+ codec_encoder[key].load_state_dict(ckpt_params[key], strict=False)
67
+ _ = [codec_encoder[key].eval() for key in codec_encoder]
68
+ _ = [codec_encoder[key].to(device) for key in codec_encoder]
69
+ # Generate mel spectrograms
70
+ mel_fn_args = {
71
+ "n_fft": config['preprocess_params']['spect_params']['n_fft'],
72
+ "win_size": config['preprocess_params']['spect_params']['win_length'],
73
+ "hop_size": config['preprocess_params']['spect_params']['hop_length'],
74
+ "num_mels": config['preprocess_params']['spect_params']['n_mels'],
75
+ "sampling_rate": sr,
76
+ "fmin": 0,
77
+ "fmax": 8000,
78
+ "center": False
79
+ }
80
+ from modules.audio import mel_spectrogram
81
+
82
+ to_mel = lambda x: mel_spectrogram(x, **mel_fn_args)
83
+
84
+ @torch.no_grad()
85
+ @torch.inference_mode()
86
+ def voice_conversion(source, target, diffusion_steps, length_adjust, inference_cfg_rate, n_quantizers):
87
+ # Load audio
88
+ source_audio = librosa.load(source, sr=sr)[0]
89
+ ref_audio = librosa.load(target, sr=sr)[0]
90
+ # source_sr, source_audio = source
91
+ # ref_sr, ref_audio = target
92
+ # # if any of the inputs has 2 channels, take the first only
93
+ # if source_audio.ndim == 2:
94
+ # source_audio = source_audio[:, 0]
95
+ # if ref_audio.ndim == 2:
96
+ # ref_audio = ref_audio[:, 0]
97
+ #
98
+ # source_audio, ref_audio = source_audio / 32768.0, ref_audio / 32768.0
99
+ #
100
+ # # if source or audio sr not equal to default sr, resample
101
+ # if source_sr != sr:
102
+ # source_audio = librosa.resample(source_audio, source_sr, sr)
103
+ # if ref_sr != sr:
104
+ # ref_audio = librosa.resample(ref_audio, ref_sr, sr)
105
+
106
+ # Process audio
107
+ source_audio = torch.tensor(source_audio[:sr * 30]).unsqueeze(0).float().to(device)
108
+ ref_audio = torch.tensor(ref_audio[:sr * 30]).unsqueeze(0).float().to(device)
109
+
110
+ # Resample
111
+ source_waves_16k = torchaudio.functional.resample(source_audio, sr, 16000)
112
+ ref_waves_16k = torchaudio.functional.resample(ref_audio, sr, 16000)
113
+
114
+ # Extract features
115
+ if speech_tokenizer_type == 'cosyvoice':
116
+ S_alt = cosyvoice_frontend.extract_speech_token(source_waves_16k)[0]
117
+ S_ori = cosyvoice_frontend.extract_speech_token(ref_waves_16k)[0]
118
+ elif speech_tokenizer_type == 'facodec':
119
+ converted_waves_24k = torchaudio.functional.resample(source_audio, sr, 24000)
120
+ wave_lengths_24k = torch.LongTensor([converted_waves_24k.size(1)]).to(converted_waves_24k.device)
121
+ waves_input = converted_waves_24k.unsqueeze(1)
122
+ z = codec_encoder.encoder(waves_input)
123
+ (
124
+ quantized,
125
+ codes
126
+ ) = codec_encoder.quantizer(
127
+ z,
128
+ waves_input,
129
+ )
130
+ S_alt = torch.cat([codes[1], codes[0]], dim=1)
131
+
132
+ # S_ori should be extracted in the same way
133
+ waves_24k = torchaudio.functional.resample(ref_audio, sr, 24000)
134
+ waves_input = waves_24k.unsqueeze(1)
135
+ z = codec_encoder.encoder(waves_input)
136
+ (
137
+ quantized,
138
+ codes
139
+ ) = codec_encoder.quantizer(
140
+ z,
141
+ waves_input,
142
+ )
143
+ S_ori = torch.cat([codes[1], codes[0]], dim=1)
144
+
145
+ mel = to_mel(source_audio.to(device).float())
146
+ mel2 = to_mel(ref_audio.to(device).float())
147
+
148
+ target_lengths = torch.LongTensor([int(mel.size(2) * length_adjust)]).to(mel.device)
149
+ target2_lengths = torch.LongTensor([mel2.size(2)]).to(mel2.device)
150
+
151
+ feat2 = torchaudio.compliance.kaldi.fbank(ref_waves_16k,
152
+ num_mel_bins=80,
153
+ dither=0,
154
+ sample_frequency=16000)
155
+ feat2 = feat2 - feat2.mean(dim=0, keepdim=True)
156
+ style2 = campplus_model(feat2.unsqueeze(0))
157
+
158
+ # Length regulation
159
+ cond = model.length_regulator(S_alt, ylens=target_lengths, n_quantizers=int(n_quantizers))[0]
160
+ prompt_condition = model.length_regulator(S_ori, ylens=target2_lengths, n_quantizers=int(n_quantizers))[0]
161
+ cat_condition = torch.cat([prompt_condition, cond], dim=1)
162
+
163
+ # Voice Conversion
164
+ vc_target = model.cfm.inference(cat_condition, torch.LongTensor([cat_condition.size(1)]).to(mel2.device),
165
+ mel2, style2, None, diffusion_steps, inference_cfg_rate=inference_cfg_rate)
166
+ vc_target = vc_target[:, :, mel2.size(-1):]
167
+
168
+ # Convert to waveform
169
+ vc_wave = hift_gen.inference(vc_target)
170
+
171
+ return sr, vc_wave.squeeze(0).cpu().numpy()
172
+
173
+
174
+ if __name__ == "__main__":
175
+ description = "Zero-shot voice conversion with in-context learning. Check out our [GitHub repository](https://github.com/Plachtaa/seed-vc) for details and updates."
176
+ inputs = [
177
+ gr.Audio(type="filepath", label="Source Audio"),
178
+ gr.Audio(type="filepath", label="Reference Audio"),
179
+ gr.Slider(minimum=1, maximum=200, value=10, step=1, label="Diffusion Steps", info="10 by default, 50~100 for best quality"),
180
+ gr.Slider(minimum=0.5, maximum=2.0, step=0.1, value=1.0, label="Length Adjust", info="<1.0 for speed-up speech, >1.0 for slow-down speech"),
181
+ gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=0.7, label="Inference CFG Rate", info="has subtle influence"),
182
+ gr.Slider(minimum=1, maximum=3, step=1, value=3, label="N Quantizers", info="the less quantizer used, the less prosody of source audio is preserved"),
183
+ ]
184
+
185
+ examples = [["examples/source/yae_0.wav", "examples/reference/dingzhen_0.wav", 50, 1.0, 0.7, 1]]
186
+
187
+ outputs = gr.Audio(label="Output Audio")
188
+
189
+ gr.Interface(fn=voice_conversion,
190
+ description=description,
191
+ inputs=inputs,
192
+ outputs=outputs,
193
+ title="Seed Voice Conversion",
194
+ examples=examples,
195
  ).launch()