Staticaliza commited on
Commit
b3480ec
Β·
verified Β·
1 Parent(s): 73db81a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +174 -374
app.py CHANGED
@@ -1,382 +1,182 @@
1
- import gradio as gr
2
- import numpy as np
3
- import soundfile as sf
4
  import spaces
5
- import torch
6
- import torchaudio
7
- import librosa
8
- import yaml
9
- import tempfile
10
  import os
 
 
11
 
12
- from huggingface_hub import hf_hub_download
13
- from transformers import AutoFeatureExtractor, WhisperModel
14
- from torch.nn.utils import parametrizations
15
-
16
- from modules.commons import build_model, load_checkpoint, recursive_munch
17
- from modules.campplus.DTDNN import CAMPPlus
18
- from modules.bigvgan import bigvgan
19
- from modules.rmvpe import RMVPE
20
- from modules.audio import mel_spectrogram
21
-
22
- # ----------------------------
23
- # Optimization Settings
24
- # ----------------------------
25
-
26
- # Set the number of threads to the number of CPU cores
27
- torch.set_num_threads(os.cpu_count())
28
- torch.set_num_interop_threads(os.cpu_count())
29
-
30
- # Enable optimized backends
31
- torch.backends.openmp.enabled = True
32
- torch.backends.mkldnn.enabled = True
33
- torch.backends.cudnn.enabled = False
34
- torch.backends.cuda.enabled = False
35
-
36
- torch.set_grad_enabled(False)
37
-
38
- # Force CPU usage
39
- device = torch.device("cpu")
40
- print(f"[DEVICE] | Using device: {device}")
41
-
42
- # ----------------------------
43
- # Load Models and Configuration
44
- # ----------------------------
45
-
46
- def load_custom_model_from_hf(repo_id, model_filename="pytorch_model.bin", config_filename="config.yml"):
47
- os.makedirs("./checkpoints", exist_ok=True)
48
- model_path = hf_hub_download(repo_id=repo_id, filename=model_filename, cache_dir="./checkpoints")
49
- if config_filename is None:
50
- return model_path
51
- config_path = hf_hub_download(repo_id=repo_id, filename=config_filename, cache_dir="./checkpoints")
52
-
53
- return model_path, config_path
54
-
55
- # Load DiT model
56
- dit_checkpoint_path, dit_config_path = load_custom_model_from_hf("Plachta/Seed-VC", "DiT_seed_v2_uvit_whisper_small_wavenet_bigvgan_pruned.pth", "config_dit_mel_seed_uvit_whisper_small_wavenet.yml")
57
- config = yaml.safe_load(open(dit_config_path, 'r'))
58
- model_params = recursive_munch(config['model_params'])
59
- model = build_model(model_params, stage='DiT')
60
-
61
- # Debug: Print model keys to identify correct key
62
- print(f"[INFO] | Model keys: {model.keys()}")
63
-
64
- hop_length = config['preprocess_params']['spect_params']['hop_length']
65
- sr = config['preprocess_params']['sr']
66
-
67
- # Load DiT checkpoints
68
- model, _, _, _ = load_checkpoint(model, None, dit_checkpoint_path, load_only_params=True, ignore_modules=[], is_distributed=False)
69
- for key in model:
70
- model[key].eval()
71
- model[key].to(device)
72
- print("[INFO] | DiT model loaded and set to eval mode.")
73
-
74
- model.cfm.estimator.setup_caches(max_batch_size=1, max_seq_length=8192)
75
-
76
- # Ensure 'CAMPPlus' is correctly imported and defined
77
- try:
78
- campplus_model = CAMPPlus(feat_dim=80, embedding_size=192)
79
- print("[INFO] | CAMPPlus model instantiated.")
80
- except NameError:
81
- print("[ERROR] | CAMPPlus is not defined. Please check the import path and ensure CAMPPlus is correctly defined.")
82
- raise
83
-
84
- # Set weights_only=True for security
85
- campplus_ckpt_path = load_custom_model_from_hf("funasr/campplus", "campplus_cn_common.bin", config_filename=None)
86
- campplus_state = torch.load(campplus_ckpt_path, map_location="cpu", weights_only=True)
87
- campplus_model.load_state_dict(campplus_state)
88
- campplus_model.eval()
89
- campplus_model.to(device)
90
- print("[INFO] | CAMPPlus model loaded, set to eval mode, and moved to CPU.")
91
-
92
- # Load BigVGAN model
93
- bigvgan_model = bigvgan.BigVGAN.from_pretrained('nvidia/bigvgan_v2_22khz_80band_256x', use_cuda_kernel=False)
94
- bigvgan_model.remove_weight_norm()
95
- bigvgan_model = bigvgan_model.eval().to(device)
96
- print("[INFO] | BigVGAN model loaded, weight norm removed, set to eval mode, and moved to CPU.")
97
-
98
- # Load FAcodec model
99
- ckpt_path, config_path = load_custom_model_from_hf("Plachta/FAcodec", 'pytorch_model.bin', 'config.yml')
100
- codec_config = yaml.safe_load(open(config_path))
101
- codec_model_params = recursive_munch(codec_config['model_params'])
102
- codec_encoder = build_model(codec_model_params, stage="codec")
103
- ckpt_params = torch.load(ckpt_path, map_location="cpu", weights_only=True)
104
- for key in codec_encoder:
105
- codec_encoder[key].load_state_dict(ckpt_params[key], strict=False)
106
- codec_encoder = {k: v.eval().to(device) for k, v in codec_encoder.items()}
107
- print("[INFO] | FAcodec model loaded, set to eval mode, and moved to CPU.")
108
-
109
- # Load Whisper model with float32 and compatible size
110
- whisper_name = model_params.speech_tokenizer.whisper_name if hasattr(model_params.speech_tokenizer, 'whisper_name') else "openai/whisper-small"
111
- whisper_model = WhisperModel.from_pretrained(whisper_name, torch_dtype=torch.float32).to(device)
112
- del whisper_model.decoder # Remove decoder as it's not used
113
- whisper_feature_extractor = AutoFeatureExtractor.from_pretrained(whisper_name)
114
- print(f"[INFO] | Whisper model '{whisper_name}' loaded with dtype {whisper_model.dtype} and moved to CPU.")
115
-
116
- # Generate mel spectrograms with optimized parameters
117
- mel_fn_args = {
118
- "n_fft": 1024,
119
- "win_size": 1024,
120
- "hop_size": 256,
121
- "num_mels": 80,
122
- "sampling_rate": sr,
123
- "fmin": 0,
124
- "fmax": None,
125
- "center": False
126
- }
127
- to_mel = lambda x: mel_spectrogram(x, **mel_fn_args)
128
-
129
- # Load F0 conditioned model
130
- dit_checkpoint_path_f0, dit_config_path_f0 = load_custom_model_from_hf("Plachta/Seed-VC", "DiT_seed_v2_uvit_whisper_base_f0_44k_bigvgan_pruned_ft_ema.pth", "config_dit_mel_seed_uvit_whisper_base_f0_44k.yml")
131
- config_f0 = yaml.safe_load(open(dit_config_path_f0, 'r'))
132
- model_params_f0 = recursive_munch(config_f0['model_params'])
133
- model_f0 = build_model(model_params_f0, stage='DiT')
134
-
135
- hop_length_f0 = config_f0['preprocess_params']['spect_params']['hop_length']
136
- sr_f0 = config_f0['preprocess_params']['sr']
137
-
138
- # Load F0 model checkpoints
139
- model_f0, _, _, _ = load_checkpoint(model_f0, None, dit_checkpoint_path_f0, load_only_params=True, ignore_modules=[], is_distributed=False)
140
- for key in model_f0:
141
- model_f0[key].eval()
142
- model_f0[key].to(device)
143
- print("[INFO] | F0 conditioned DiT model loaded and set to eval mode.")
144
-
145
- model_f0.cfm.estimator.setup_caches(max_batch_size=1, max_seq_length=8192)
146
-
147
- # Load F0 extractor
148
- model_path = load_custom_model_from_hf("lj1995/VoiceConversionWebUI", "rmvpe.pt", None)
149
- rmvpe = RMVPE(model_path, is_half=False, device=device)
150
- print("[INFO] | RMVPE model loaded and moved to CPU.")
151
-
152
- mel_fn_args_f0 = {
153
- "n_fft": config_f0['preprocess_params']['spect_params']['n_fft'],
154
- "win_size": config_f0['preprocess_params']['spect_params']['win_length'],
155
- "hop_size": config_f0['preprocess_params']['spect_params']['hop_length'],
156
- "num_mels": 80, # Ensure this matches the primary model
157
- "sampling_rate": sr_f0,
158
- "fmin": 0,
159
- "fmax": None,
160
- "center": False
161
- }
162
- to_mel_f0 = lambda x: mel_spectrogram(x, **mel_fn_args_f0)
163
-
164
- # Load BigVGAN 44kHz model
165
- bigvgan_44k_model = bigvgan.BigVGAN.from_pretrained('nvidia/bigvgan_v2_44khz_128band_512x', use_cuda_kernel=False)
166
- bigvgan_44k_model.remove_weight_norm()
167
- bigvgan_44k_model = bigvgan_44k_model.eval().to(device)
168
- print("[INFO] | BigVGAN 44kHz model loaded, weight norm removed, set to eval mode, and moved to CPU.")
169
-
170
- # CSS Styling
171
- css = '''
172
- .gradio-container{max-width: 560px !important}
173
- h1{text-align:center}
174
- footer {
175
- visibility: hidden
176
- }
177
- '''
178
-
179
- # ----------------------------
180
- # Functions
181
- # ----------------------------
182
-
183
- @torch.no_grad()
184
- @torch.inference_mode()
185
- def voice_conversion(input, reference, steps, guidance, pitch, speed):
186
- print("[INFO] | Voice conversion started.")
187
-
188
- inference_module, mel_fn, bigvgan_fn = model, to_mel, bigvgan_model
189
- bitrate, sampling_rate, sr_current, hop_length_current = "320k", 16000, 22050, 256
190
- max_context_window, overlap_wave_len = sr_current // hop_length_current * 30, 16 * hop_length_current
191
-
192
- # Load audio using librosa
193
- print("[INFO] | Loading source and reference audio.")
194
- source_audio, _ = librosa.load(input, sr=sr_current)
195
- ref_audio, _ = librosa.load(reference, sr=sr_current)
196
-
197
- # Clip reference audio to 25 seconds
198
- ref_audio = ref_audio[:sr_current * 25]
199
- print(f"[INFO] | Source audio length: {len(source_audio)/sr_current:.2f}s, Reference audio length: {len(ref_audio)/sr_current:.2f}s")
200
-
201
- # Convert audio to tensors
202
- source_audio_tensor = torch.tensor(source_audio).unsqueeze(0).float().to(device)
203
- ref_audio_tensor = torch.tensor(ref_audio).unsqueeze(0).float().to(device)
204
-
205
- # Resample to 16kHz
206
- ref_waves_16k = torchaudio.functional.resample(ref_audio_tensor, sr_current, sampling_rate)
207
- converted_waves_16k = torchaudio.functional.resample(source_audio_tensor, sr_current, sampling_rate)
208
-
209
- # Generate Whisper features
210
- print("[INFO] | Generating Whisper features for source audio.")
211
- if converted_waves_16k.size(-1) <= sampling_rate * 30:
212
- alt_inputs = whisper_feature_extractor([converted_waves_16k.squeeze(0).cpu().numpy()], return_tensors="pt", return_attention_mask=True, sampling_rate=sampling_rate)
213
- alt_input_features = whisper_model._mask_input_features(alt_inputs.input_features, attention_mask=alt_inputs.attention_mask).to(device)
214
- alt_outputs = whisper_model.encoder(alt_input_features.to(torch.float32), head_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True)
215
- S_alt = alt_outputs.last_hidden_state.to(torch.float32)
216
- S_alt = S_alt[:, :converted_waves_16k.size(-1) // 320 + 1]
217
- print(f"[INFO] | S_alt shape: {S_alt.shape}")
218
- else:
219
- # Process in chunks
220
- print("[INFO] | Processing source audio in chunks.")
221
- overlapping_time = 5 # seconds
222
- chunk_size = sampling_rate * 30 # 30 seconds
223
- overlap_size = sampling_rate * overlapping_time
224
- S_alt_list = []
225
- buffer = None
226
- traversed_time = 0
227
- total_length = converted_waves_16k.size(-1)
228
-
229
- while traversed_time < total_length:
230
- if buffer is None:
231
- chunk = converted_waves_16k[:, traversed_time:traversed_time + chunk_size]
232
  else:
233
- chunk = torch.cat([buffer, converted_waves_16k[:, traversed_time:traversed_time + chunk_size - overlap_size]], dim=-1)
234
- alt_inputs = whisper_feature_extractor([chunk.squeeze(0).cpu().numpy()], return_tensors="pt", return_attention_mask=True, sampling_rate=sampling_rate)
235
- alt_input_features = whisper_model._mask_input_features(alt_inputs.input_features, attention_mask=alt_inputs.attention_mask).to(device)
236
- alt_outputs = whisper_model.encoder(alt_input_features.to(torch.float32), head_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True)
237
- S_chunk = alt_outputs.last_hidden_state.to(torch.float32)
238
- S_chunk = S_chunk[:, :chunk.size(-1) // 320 + 1]
239
- print(f"[INFO] | Processed chunk with S_chunk shape: {S_chunk.shape}")
240
-
241
- if traversed_time == 0:
242
- S_alt_list.append(S_chunk)
243
  else:
244
- skip_frames = 50 * overlapping_time
245
- S_alt_list.append(S_chunk[:, skip_frames:])
246
-
247
- buffer = chunk[:, -overlap_size:]
248
- traversed_time += chunk_size - overlap_size
249
-
250
- S_alt = torch.cat(S_alt_list, dim=1)
251
- print(f"[INFO] | Final S_alt shape after chunk processing: {S_alt.shape}")
252
-
253
- # Original Whisper features
254
- print("[INFO] | Generating Whisper features for reference audio.")
255
- ori_waves_16k = torchaudio.functional.resample(ref_audio_tensor, sr_current, sampling_rate)
256
- ori_inputs = whisper_feature_extractor([ori_waves_16k.squeeze(0).cpu().numpy()], return_tensors="pt", return_attention_mask=True, sampling_rate=sampling_rate)
257
- ori_input_features = whisper_model._mask_input_features(ori_inputs.input_features, attention_mask=ori_inputs.attention_mask).to(device)
258
- ori_outputs = whisper_model.encoder(ori_input_features.to(torch.float32), head_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True)
259
- S_ori = ori_outputs.last_hidden_state.to(torch.float32)
260
- S_ori = S_ori[:, :ori_waves_16k.size(-1) // 320 + 1]
261
- print(f"[INFO] | S_ori shape: {S_ori.shape}")
262
-
263
- # Generate mel spectrograms
264
- print("[INFO] | Generating mel spectrograms.")
265
- mel = mel_fn(source_audio_tensor.float())
266
- mel2 = mel_fn(ref_audio_tensor.float())
267
- print(f"[INFO] | Mel spectrogram shapes: mel={mel.shape}, mel2={mel2.shape}")
268
-
269
- # Length adjustment
270
- target_lengths = torch.LongTensor([int(mel.size(2) / speed)]).to(mel.device)
271
- target2_lengths = torch.LongTensor([mel2.size(2)]).to(mel2.device)
272
- print(f"[INFO] | Target lengths: {target_lengths.item()}, {target2_lengths.item()}")
273
-
274
- # Extract style features
275
- print("[INFO] | Extracting style features from reference audio.")
276
- feat2 = torchaudio.compliance.kaldi.fbank(ref_waves_16k, num_mel_bins=80, dither=0, sample_frequency=sampling_rate)
277
- feat2 = feat2 - feat2.mean(dim=0, keepdim=True)
278
- style2 = campplus_model(feat2.unsqueeze(0))
279
- print(f"[INFO] | Style2 shape: {style2.shape}")
280
-
281
- # Length Regulation
282
- print("[INFO] | Applying length regulation.")
283
- cond, _, _, _, _ = inference_module.length_regulator(S_alt, ylens=target_lengths, n_quantizers=3, f0=None)
284
- prompt_condition, _, _, _, _ = inference_module.length_regulator(S_ori, ylens=target2_lengths, n_quantizers=3, f0=None)
285
- print(f"[INFO] | Cond shape: {cond.shape}, Prompt condition shape: {prompt_condition.shape}")
286
-
287
- # Initialize variables for audio generation
288
- max_source_window = max_context_window - mel2.size(2)
289
- processed_frames = 0
290
- generated_wave_chunks = []
291
-
292
- print("[INFO] | Starting inference and audio generation.")
293
-
294
- while processed_frames < cond.size(1):
295
- chunk_cond = cond[:, processed_frames:processed_frames + max_source_window]
296
- is_last_chunk = processed_frames + max_source_window >= cond.size(1)
297
- cat_condition = torch.cat([prompt_condition, chunk_cond], dim=1)
298
-
299
- # Perform inference
300
- vc_target = inference_module.cfm.inference(cat_condition, torch.LongTensor([cat_condition.size(1)]).to(mel2.device), mel2, style2, None, steps, inference_cfg_rate=guidance)
301
- vc_target = vc_target[:, :, mel2.size(2):]
302
- print(f"[INFO] | vc_target shape: {vc_target.shape}")
303
-
304
- # Generate waveform using BigVGAN
305
- vc_wave = bigvgan_fn(vc_target.float())[0]
306
- print(f"[INFO] | vc_wave shape: {vc_wave.shape}")
307
-
308
- # Handle the generated waveform
309
- output_wave = vc_wave[0].cpu().numpy()
310
- generated_wave_chunks.append(output_wave)
311
-
312
- # Ensure processed_frames increments correctly to avoid infinite loop
313
- processed_frames += vc_target.size(2)
314
-
315
- print(f"[INFO] | Processed frames updated to: {processed_frames}")
316
-
317
- # Concatenate all generated wave chunks
318
- final_audio = np.concatenate(generated_wave_chunks).astype(np.float32)
319
-
320
- # Pitch Shifting using librosa
321
- print("[INFO] | Applying pitch shifting.")
322
- try:
323
- if pitch != 0:
324
- final_audio = librosa.effects.pitch_shift(final_audio, sr=sr_current, n_steps=pitch)
325
- print(f"[INFO] | Pitch shifted by {pitch} semitones.")
326
- else:
327
- print("[INFO] | No pitch shift applied.")
328
- except Exception as e:
329
- print(f"[ERROR] | Pitch shifting failed: {e}")
330
-
331
- # Normalize the audio to ensure it's within [-1.0, 1.0]
332
- max_val = np.max(np.abs(final_audio))
333
- if max_val > 1.0:
334
- final_audio = final_audio / max_val
335
- print("[INFO] | Final audio normalized.")
336
-
337
- # Save the audio to a temporary WAV file
338
- print("[INFO] | Saving final audio to a temporary WAV file.")
339
- with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as tmp_file:
340
- sf.write(tmp_file.name, final_audio, sr_current, format='WAV')
341
- temp_file_path = tmp_file.name
342
-
343
- print(f"[INFO] | Final audio saved to {temp_file_path}")
344
-
345
- return temp_file_path
346
-
347
- def cloud():
348
- print("[CLOUD] | Space maintained.")
349
-
350
- @spaces.GPU(duration=15)
351
- def gpu():
352
- return
353
-
354
- # ----------------------------
355
- # Gradio Interface
356
- # ----------------------------
357
-
358
- with gr.Blocks(css=css) as main:
359
- with gr.Column():
360
- gr.Markdown("πŸͺ„ Add tone to audio.")
361
-
362
- with gr.Column():
363
- input = gr.Audio(label="Input Audio", type="filepath")
364
- reference_input = gr.Audio(label="Reference Audio", type="filepath")
365
-
366
- with gr.Column():
367
- steps = gr.Slider(label="Steps", value=4, minimum=1, maximum=100, step=1)
368
- guidance = gr.Slider(label="Guidance", value=0.7, minimum=0.0, maximum=1.0, step=0.1)
369
- pitch = gr.Slider(label="Pitch", value=0.0, minimum=-10.0, maximum=10.0, step=0.1)
370
- speed = gr.Slider(label="Speed", value=1.0, minimum=0.1, maximum=10.0, step=0.1)
371
 
372
- with gr.Column():
373
- submit = gr.Button("β–Ά")
374
- maintain = gr.Button("☁️")
375
-
376
- with gr.Column():
377
- output = gr.Audio(label="Output", type="filepath")
378
 
379
- submit.click(voice_conversion, inputs=[input, reference_input, steps, guidance, pitch, speed], outputs=output, queue=False)
380
- maintain.click(cloud, inputs=[], outputs=[], queue=False)
 
 
 
 
381
 
382
- main.launch(show_api=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import spaces
2
+ from kokoro import KModel, KPipeline
3
+ import gradio as gr
 
 
 
4
  import os
5
+ import random
6
+ import torch
7
 
8
+ IS_DUPLICATE = not os.getenv('SPACE_ID', '').startswith('hexgrad/')
9
+ CHAR_LIMIT = None if IS_DUPLICATE else 5000
10
+
11
+ CUDA_AVAILABLE = torch.cuda.is_available()
12
+ models = {gpu: KModel().to('cuda' if gpu else 'cpu').eval() for gpu in [False] + ([True] if CUDA_AVAILABLE else [])}
13
+ pipelines = {lang_code: KPipeline(lang_code=lang_code, model=False) for lang_code in 'ab'}
14
+ pipelines['a'].g2p.lexicon.golds['kokoro'] = 'kˈOkΙ™ΙΉO'
15
+ pipelines['b'].g2p.lexicon.golds['kokoro'] = 'kˈQkΙ™ΙΉQ'
16
+
17
+ @spaces.GPU(duration=10)
18
+ def forward_gpu(ps, ref_s, speed):
19
+ return models[True](ps, ref_s, speed)
20
+
21
+ def generate_first(text, voice='af_heart', speed=1, use_gpu=CUDA_AVAILABLE):
22
+ text = text if CHAR_LIMIT is None else text.strip()[:CHAR_LIMIT]
23
+ pipeline = pipelines[voice[0]]
24
+ pack = pipeline.load_voice(voice)
25
+ use_gpu = use_gpu and CUDA_AVAILABLE
26
+ for _, ps, _ in pipeline(text, voice, speed):
27
+ ref_s = pack[len(ps)-1]
28
+ try:
29
+ if use_gpu:
30
+ audio = forward_gpu(ps, ref_s, speed)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  else:
32
+ audio = models[False](ps, ref_s, speed)
33
+ except gr.exceptions.Error as e:
34
+ if use_gpu:
35
+ gr.Warning(str(e))
36
+ gr.Info('Retrying with CPU. To avoid this error, change Hardware to CPU.')
37
+ audio = models[False](ps, ref_s, speed)
 
 
 
 
38
  else:
39
+ raise gr.Error(e)
40
+ return (24000, audio.numpy()), ps
41
+ return None, ''
42
+
43
+ # Arena API
44
+ def predict(text, voice='af_heart', speed=1):
45
+ return generate_first(text, voice, speed, use_gpu=False)[0]
46
+
47
+ def tokenize_first(text, voice='af_heart'):
48
+ pipeline = pipelines[voice[0]]
49
+ for _, ps, _ in pipeline(text, voice):
50
+ return ps
51
+ return ''
52
+
53
+ def generate_all(text, voice='af_heart', speed=1, use_gpu=CUDA_AVAILABLE):
54
+ text = text if CHAR_LIMIT is None else text.strip()[:CHAR_LIMIT]
55
+ pipeline = pipelines[voice[0]]
56
+ pack = pipeline.load_voice(voice)
57
+ use_gpu = use_gpu and CUDA_AVAILABLE
58
+ for _, ps, _ in pipeline(text, voice, speed):
59
+ ref_s = pack[len(ps)-1]
60
+ try:
61
+ if use_gpu:
62
+ audio = forward_gpu(ps, ref_s, speed)
63
+ else:
64
+ audio = models[False](ps, ref_s, speed)
65
+ except gr.exceptions.Error as e:
66
+ if use_gpu:
67
+ gr.Warning(str(e))
68
+ gr.Info('Switching to CPU')
69
+ audio = models[False](ps, ref_s, speed)
70
+ else:
71
+ raise gr.Error(e)
72
+ yield 24000, audio.numpy()
73
+
74
+ random_texts = {}
75
+ for lang in ['en']:
76
+ with open(f'{lang}.txt', 'r') as r:
77
+ random_texts[lang] = [line.strip() for line in r]
78
+
79
+ def get_random_text(voice):
80
+ lang = dict(a='en', b='en')[voice[0]]
81
+ return random.choice(random_texts[lang])
82
+
83
+ CHOICES = {
84
+ 'πŸ‡ΊπŸ‡Έ 🚺 Heart ❀️': 'af_heart',
85
+ 'πŸ‡ΊπŸ‡Έ 🚺 Bella πŸ”₯': 'af_bella',
86
+ 'πŸ‡ΊπŸ‡Έ 🚺 Nicole 🎧': 'af_nicole',
87
+ 'πŸ‡ΊπŸ‡Έ 🚺 Aoede': 'af_aoede',
88
+ 'πŸ‡ΊπŸ‡Έ 🚺 Kore': 'af_kore',
89
+ 'πŸ‡ΊπŸ‡Έ 🚺 Sarah': 'af_sarah',
90
+ 'πŸ‡ΊπŸ‡Έ 🚺 Nova': 'af_nova',
91
+ 'πŸ‡ΊπŸ‡Έ 🚺 Sky': 'af_sky',
92
+ 'πŸ‡ΊπŸ‡Έ 🚺 Alloy': 'af_alloy',
93
+ 'πŸ‡ΊπŸ‡Έ 🚺 Jessica': 'af_jessica',
94
+ 'πŸ‡ΊπŸ‡Έ 🚺 River': 'af_river',
95
+ 'πŸ‡ΊπŸ‡Έ 🚹 Michael': 'am_michael',
96
+ 'πŸ‡ΊπŸ‡Έ 🚹 Fenrir': 'am_fenrir',
97
+ 'πŸ‡ΊπŸ‡Έ 🚹 Puck': 'am_puck',
98
+ 'πŸ‡ΊπŸ‡Έ 🚹 Echo': 'am_echo',
99
+ 'πŸ‡ΊπŸ‡Έ 🚹 Eric': 'am_eric',
100
+ 'πŸ‡ΊπŸ‡Έ 🚹 Liam': 'am_liam',
101
+ 'πŸ‡ΊπŸ‡Έ 🚹 Onyx': 'am_onyx',
102
+ 'πŸ‡ΊπŸ‡Έ 🚹 Santa': 'am_santa',
103
+ 'πŸ‡ΊπŸ‡Έ 🚹 Adam': 'am_adam',
104
+ 'πŸ‡¬πŸ‡§ 🚺 Emma': 'bf_emma',
105
+ 'πŸ‡¬πŸ‡§ 🚺 Isabella': 'bf_isabella',
106
+ 'πŸ‡¬πŸ‡§ 🚺 Alice': 'bf_alice',
107
+ 'πŸ‡¬πŸ‡§ 🚺 Lily': 'bf_lily',
108
+ 'πŸ‡¬πŸ‡§ 🚹 George': 'bm_george',
109
+ 'πŸ‡¬πŸ‡§ 🚹 Fable': 'bm_fable',
110
+ 'πŸ‡¬πŸ‡§ 🚹 Lewis': 'bm_lewis',
111
+ 'πŸ‡¬πŸ‡§ 🚹 Daniel': 'bm_daniel',
112
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
113
 
114
+ for v in CHOICES.values():
115
+ pipelines[v[0]].load_voice(v)
 
 
 
 
116
 
117
+ TOKEN_NOTE = '''
118
+ πŸ’‘ Customize pronunciation with Markdown link syntax and /slashes/ like `[Kokoro](/kˈOkΙ™ΙΉO/)`
119
+ πŸ’¬ To adjust intonation, try punctuation `;:,.!?—…"()β€œβ€` or stress `ˈ` and `ˌ`
120
+ ⬇️ Lower stress `[1 level](-1)` or `[2 levels](-2)`
121
+ ⬆️ Raise stress 1 level `[or](+2)` 2 levels (only works on less stressed, usually short words)
122
+ '''
123
 
124
+ with gr.Blocks() as generate_tab:
125
+ out_audio = gr.Audio(label='Output Audio', interactive=False, streaming=False, autoplay=True)
126
+ generate_btn = gr.Button('Generate', variant='primary')
127
+ with gr.Accordion('Output Tokens', open=True):
128
+ out_ps = gr.Textbox(interactive=False, show_label=False, info='Tokens used to generate the audio, up to 510 context length.')
129
+ tokenize_btn = gr.Button('Tokenize', variant='secondary')
130
+ gr.Markdown(TOKEN_NOTE)
131
+ predict_btn = gr.Button('Predict', variant='secondary', visible=False)
132
+
133
+ STREAM_NOTE = ['⚠️ There is an unknown Gradio bug that might yield no audio the first time you click `Stream`.']
134
+ if CHAR_LIMIT is not None:
135
+ STREAM_NOTE.append(f'βœ‚οΈ Each stream is capped at {CHAR_LIMIT} characters.')
136
+ STREAM_NOTE.append('πŸš€ Want more characters? You can [use Kokoro directly](https://huggingface.co/hexgrad/Kokoro-82M#usage) or duplicate this space:')
137
+ STREAM_NOTE = '\n\n'.join(STREAM_NOTE)
138
+
139
+ with gr.Blocks() as stream_tab:
140
+ out_stream = gr.Audio(label='Output Audio Stream', interactive=False, streaming=True, autoplay=True)
141
+ with gr.Row():
142
+ stream_btn = gr.Button('Stream', variant='primary')
143
+ stop_btn = gr.Button('Stop', variant='stop')
144
+ with gr.Accordion('Note', open=True):
145
+ gr.Markdown(STREAM_NOTE)
146
+ gr.DuplicateButton()
147
+
148
+ BANNER_TEXT = '''
149
+ [***Kokoro*** **is an open-weight TTS model with 82 million parameters.**](https://huggingface.co/hexgrad/Kokoro-82M)
150
+ As of January 31st, 2025, Kokoro was the most-liked [**TTS model**](https://huggingface.co/models?pipeline_tag=text-to-speech&sort=likes) and the most-liked [**TTS space**](https://huggingface.co/spaces?sort=likes&search=tts) on Hugging Face.
151
+ This demo only showcases English, but you can directly use the model to access other languages.
152
+ '''
153
+ API_OPEN = os.getenv('SPACE_ID') != 'hexgrad/Kokoro-TTS'
154
+ API_NAME = None if API_OPEN else False
155
+ with gr.Blocks() as app:
156
+ with gr.Row():
157
+ gr.Markdown(BANNER_TEXT, container=True)
158
+ with gr.Row():
159
+ with gr.Column():
160
+ text = gr.Textbox(label='Input Text', info=f"Up to ~500 characters per Generate, or {'∞' if CHAR_LIMIT is None else CHAR_LIMIT} characters per Stream")
161
+ with gr.Row():
162
+ voice = gr.Dropdown(list(CHOICES.items()), value='af_heart', label='Voice', info='Quality and availability vary by language')
163
+ use_gpu = gr.Dropdown(
164
+ [('ZeroGPU πŸš€', True), ('CPU 🐌', False)],
165
+ value=CUDA_AVAILABLE,
166
+ label='Hardware',
167
+ info='GPU is usually faster, but has a usage quota',
168
+ interactive=CUDA_AVAILABLE
169
+ )
170
+ speed = gr.Slider(minimum=0.5, maximum=2, value=1, step=0.1, label='Speed')
171
+ random_btn = gr.Button('Random Text', variant='secondary')
172
+ with gr.Column():
173
+ gr.TabbedInterface([generate_tab, stream_tab], ['Generate', 'Stream'])
174
+ random_btn.click(fn=get_random_text, inputs=[voice], outputs=[text], api_name=API_NAME)
175
+ generate_btn.click(fn=generate_first, inputs=[text, voice, speed, use_gpu], outputs=[out_audio, out_ps], api_name=API_NAME)
176
+ tokenize_btn.click(fn=tokenize_first, inputs=[text, voice], outputs=[out_ps], api_name=API_NAME)
177
+ stream_event = stream_btn.click(fn=generate_all, inputs=[text, voice, speed, use_gpu], outputs=[out_stream], api_name=API_NAME)
178
+ stop_btn.click(fn=None, cancels=stream_event)
179
+ predict_btn.click(fn=predict, inputs=[text, voice, speed], outputs=[out_audio], api_name=API_NAME)
180
+
181
+ if __name__ == '__main__':
182
+ app.queue(api_open=API_OPEN).launch(show_api=API_OPEN, ssr_mode=True)