Spaces:
Running
Running
Staticaliza
commited on
Commit
•
fcf7ece
1
Parent(s):
db0ac26
Upload 12 files
Browse files- DS_Store +0 -0
- Dockerfile +25 -0
- app.py +794 -0
- finetune-cli.py +108 -0
- finetune_gradio.py +734 -0
- gradio_app.py +824 -0
- inference-cli.py +428 -0
- inference-cli.toml +10 -0
- requirements.txt +23 -0
- requirements_eval.txt +5 -0
- speech_edit.py +183 -0
- train.py +94 -0
DS_Store
ADDED
Binary file (6.15 kB). View file
|
|
Dockerfile
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM pytorch/pytorch:2.4.0-cuda12.4-cudnn9-devel
|
2 |
+
|
3 |
+
USER root
|
4 |
+
|
5 |
+
ARG DEBIAN_FRONTEND=noninteractive
|
6 |
+
|
7 |
+
LABEL github_repo="https://github.com/SWivid/F5-TTS"
|
8 |
+
|
9 |
+
RUN set -x \
|
10 |
+
&& apt-get update \
|
11 |
+
&& apt-get -y install wget curl man git less openssl libssl-dev unzip unar build-essential aria2 tmux vim \
|
12 |
+
&& apt-get install -y openssh-server sox libsox-fmt-all libsox-fmt-mp3 libsndfile1-dev ffmpeg \
|
13 |
+
&& rm -rf /var/lib/apt/lists/* \
|
14 |
+
&& apt-get clean
|
15 |
+
|
16 |
+
WORKDIR /workspace
|
17 |
+
|
18 |
+
RUN git clone https://github.com/SWivid/F5-TTS.git \
|
19 |
+
&& cd F5-TTS \
|
20 |
+
&& pip install --no-cache-dir -r requirements.txt \
|
21 |
+
&& pip install --no-cache-dir -r requirements_eval.txt
|
22 |
+
|
23 |
+
ENV SHELL=/bin/bash
|
24 |
+
|
25 |
+
WORKDIR /workspace/F5-TTS
|
app.py
ADDED
@@ -0,0 +1,794 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
import torch
|
3 |
+
import torchaudio
|
4 |
+
import gradio as gr
|
5 |
+
import numpy as np
|
6 |
+
import tempfile
|
7 |
+
from einops import rearrange
|
8 |
+
from vocos import Vocos
|
9 |
+
from pydub import AudioSegment, silence
|
10 |
+
from model import CFM, UNetT, DiT, MMDiT
|
11 |
+
from cached_path import cached_path
|
12 |
+
from model.utils import (
|
13 |
+
load_checkpoint,
|
14 |
+
get_tokenizer,
|
15 |
+
convert_char_to_pinyin,
|
16 |
+
save_spectrogram,
|
17 |
+
)
|
18 |
+
from transformers import pipeline
|
19 |
+
import click
|
20 |
+
import soundfile as sf
|
21 |
+
|
22 |
+
try:
|
23 |
+
import spaces
|
24 |
+
USING_SPACES = True
|
25 |
+
except ImportError:
|
26 |
+
USING_SPACES = False
|
27 |
+
|
28 |
+
def gpu_decorator(func):
|
29 |
+
if USING_SPACES:
|
30 |
+
return spaces.GPU(func)
|
31 |
+
else:
|
32 |
+
return func
|
33 |
+
|
34 |
+
device = (
|
35 |
+
"cuda"
|
36 |
+
if torch.cuda.is_available()
|
37 |
+
else "mps" if torch.backends.mps.is_available() else "cpu"
|
38 |
+
)
|
39 |
+
|
40 |
+
print(f"Using {device} device")
|
41 |
+
|
42 |
+
pipe = pipeline(
|
43 |
+
"automatic-speech-recognition",
|
44 |
+
model="openai/whisper-large-v3-turbo",
|
45 |
+
torch_dtype=torch.float16,
|
46 |
+
device=device,
|
47 |
+
)
|
48 |
+
vocos = Vocos.from_pretrained("charactr/vocos-mel-24khz")
|
49 |
+
|
50 |
+
# --------------------- Settings -------------------- #
|
51 |
+
|
52 |
+
target_sample_rate = 24000
|
53 |
+
n_mel_channels = 100
|
54 |
+
hop_length = 256
|
55 |
+
target_rms = 0.1
|
56 |
+
nfe_step = 32 # 16, 32
|
57 |
+
cfg_strength = 2.0
|
58 |
+
ode_method = "euler"
|
59 |
+
sway_sampling_coef = -1.0
|
60 |
+
speed = 1.0
|
61 |
+
fix_duration = None
|
62 |
+
|
63 |
+
|
64 |
+
def load_model(repo_name, exp_name, model_cls, model_cfg, ckpt_step):
|
65 |
+
ckpt_path = str(cached_path(f"hf://SWivid/{repo_name}/{exp_name}/model_{ckpt_step}.safetensors"))
|
66 |
+
# ckpt_path = f"ckpts/{exp_name}/model_{ckpt_step}.pt" # .pt | .safetensors
|
67 |
+
vocab_char_map, vocab_size = get_tokenizer("Emilia_ZH_EN", "pinyin")
|
68 |
+
model = CFM(
|
69 |
+
transformer=model_cls(
|
70 |
+
**model_cfg, text_num_embeds=vocab_size, mel_dim=n_mel_channels
|
71 |
+
),
|
72 |
+
mel_spec_kwargs=dict(
|
73 |
+
target_sample_rate=target_sample_rate,
|
74 |
+
n_mel_channels=n_mel_channels,
|
75 |
+
hop_length=hop_length,
|
76 |
+
),
|
77 |
+
odeint_kwargs=dict(
|
78 |
+
method=ode_method,
|
79 |
+
),
|
80 |
+
vocab_char_map=vocab_char_map,
|
81 |
+
).to(device)
|
82 |
+
|
83 |
+
model = load_checkpoint(model, ckpt_path, device, use_ema = True)
|
84 |
+
|
85 |
+
return model
|
86 |
+
|
87 |
+
|
88 |
+
# load models
|
89 |
+
F5TTS_model_cfg = dict(
|
90 |
+
dim=1024, depth=22, heads=16, ff_mult=2, text_dim=512, conv_layers=4
|
91 |
+
)
|
92 |
+
E2TTS_model_cfg = dict(dim=1024, depth=24, heads=16, ff_mult=4)
|
93 |
+
|
94 |
+
F5TTS_ema_model = load_model(
|
95 |
+
"F5-TTS", "F5TTS_Base", DiT, F5TTS_model_cfg, 1200000
|
96 |
+
)
|
97 |
+
E2TTS_ema_model = load_model(
|
98 |
+
"E2-TTS", "E2TTS_Base", UNetT, E2TTS_model_cfg, 1200000
|
99 |
+
)
|
100 |
+
|
101 |
+
def chunk_text(text, max_chars=135):
|
102 |
+
"""
|
103 |
+
Splits the input text into chunks, each with a maximum number of characters.
|
104 |
+
|
105 |
+
Args:
|
106 |
+
text (str): The text to be split.
|
107 |
+
max_chars (int): The maximum number of characters per chunk.
|
108 |
+
|
109 |
+
Returns:
|
110 |
+
List[str]: A list of text chunks.
|
111 |
+
"""
|
112 |
+
chunks = []
|
113 |
+
current_chunk = ""
|
114 |
+
# Split the text into sentences based on punctuation followed by whitespace
|
115 |
+
sentences = re.split(r'(?<=[;:,.!?])\s+|(?<=[;:,。!?])', text)
|
116 |
+
|
117 |
+
for sentence in sentences:
|
118 |
+
if len(current_chunk.encode('utf-8')) + len(sentence.encode('utf-8')) <= max_chars:
|
119 |
+
current_chunk += sentence + " " if sentence and len(sentence[-1].encode('utf-8')) == 1 else sentence
|
120 |
+
else:
|
121 |
+
if current_chunk:
|
122 |
+
chunks.append(current_chunk.strip())
|
123 |
+
current_chunk = sentence + " " if sentence and len(sentence[-1].encode('utf-8')) == 1 else sentence
|
124 |
+
|
125 |
+
if current_chunk:
|
126 |
+
chunks.append(current_chunk.strip())
|
127 |
+
|
128 |
+
return chunks
|
129 |
+
|
130 |
+
@gpu_decorator
|
131 |
+
def infer_batch(ref_audio, ref_text, gen_text_batches, exp_name, remove_silence, cross_fade_duration=0.15, progress=gr.Progress()):
|
132 |
+
if exp_name == "F5-TTS":
|
133 |
+
ema_model = F5TTS_ema_model
|
134 |
+
elif exp_name == "E2-TTS":
|
135 |
+
ema_model = E2TTS_ema_model
|
136 |
+
|
137 |
+
audio, sr = ref_audio
|
138 |
+
if audio.shape[0] > 1:
|
139 |
+
audio = torch.mean(audio, dim=0, keepdim=True)
|
140 |
+
|
141 |
+
rms = torch.sqrt(torch.mean(torch.square(audio)))
|
142 |
+
if rms < target_rms:
|
143 |
+
audio = audio * target_rms / rms
|
144 |
+
if sr != target_sample_rate:
|
145 |
+
resampler = torchaudio.transforms.Resample(sr, target_sample_rate)
|
146 |
+
audio = resampler(audio)
|
147 |
+
audio = audio.to(device)
|
148 |
+
|
149 |
+
generated_waves = []
|
150 |
+
spectrograms = []
|
151 |
+
|
152 |
+
if len(ref_text[-1].encode('utf-8')) == 1:
|
153 |
+
ref_text = ref_text + " "
|
154 |
+
for i, gen_text in enumerate(progress.tqdm(gen_text_batches)):
|
155 |
+
# Prepare the text
|
156 |
+
text_list = [ref_text + gen_text]
|
157 |
+
final_text_list = convert_char_to_pinyin(text_list)
|
158 |
+
|
159 |
+
# Calculate duration
|
160 |
+
ref_audio_len = audio.shape[-1] // hop_length
|
161 |
+
zh_pause_punc = r"。,、;:?!"
|
162 |
+
ref_text_len = len(ref_text.encode('utf-8')) + 3 * len(re.findall(zh_pause_punc, ref_text))
|
163 |
+
gen_text_len = len(gen_text.encode('utf-8')) + 3 * len(re.findall(zh_pause_punc, gen_text))
|
164 |
+
duration = ref_audio_len + int(ref_audio_len / ref_text_len * gen_text_len / speed)
|
165 |
+
|
166 |
+
# inference
|
167 |
+
with torch.inference_mode():
|
168 |
+
generated, _ = ema_model.sample(
|
169 |
+
cond=audio,
|
170 |
+
text=final_text_list,
|
171 |
+
duration=duration,
|
172 |
+
steps=nfe_step,
|
173 |
+
cfg_strength=cfg_strength,
|
174 |
+
sway_sampling_coef=sway_sampling_coef,
|
175 |
+
)
|
176 |
+
|
177 |
+
generated = generated[:, ref_audio_len:, :]
|
178 |
+
generated_mel_spec = rearrange(generated, "1 n d -> 1 d n")
|
179 |
+
generated_wave = vocos.decode(generated_mel_spec.cpu())
|
180 |
+
if rms < target_rms:
|
181 |
+
generated_wave = generated_wave * rms / target_rms
|
182 |
+
|
183 |
+
# wav -> numpy
|
184 |
+
generated_wave = generated_wave.squeeze().cpu().numpy()
|
185 |
+
|
186 |
+
generated_waves.append(generated_wave)
|
187 |
+
spectrograms.append(generated_mel_spec[0].cpu().numpy())
|
188 |
+
|
189 |
+
# Combine all generated waves with cross-fading
|
190 |
+
if cross_fade_duration <= 0:
|
191 |
+
# Simply concatenate
|
192 |
+
final_wave = np.concatenate(generated_waves)
|
193 |
+
else:
|
194 |
+
final_wave = generated_waves[0]
|
195 |
+
for i in range(1, len(generated_waves)):
|
196 |
+
prev_wave = final_wave
|
197 |
+
next_wave = generated_waves[i]
|
198 |
+
|
199 |
+
# Calculate cross-fade samples, ensuring it does not exceed wave lengths
|
200 |
+
cross_fade_samples = int(cross_fade_duration * target_sample_rate)
|
201 |
+
cross_fade_samples = min(cross_fade_samples, len(prev_wave), len(next_wave))
|
202 |
+
|
203 |
+
if cross_fade_samples <= 0:
|
204 |
+
# No overlap possible, concatenate
|
205 |
+
final_wave = np.concatenate([prev_wave, next_wave])
|
206 |
+
continue
|
207 |
+
|
208 |
+
# Overlapping parts
|
209 |
+
prev_overlap = prev_wave[-cross_fade_samples:]
|
210 |
+
next_overlap = next_wave[:cross_fade_samples]
|
211 |
+
|
212 |
+
# Fade out and fade in
|
213 |
+
fade_out = np.linspace(1, 0, cross_fade_samples)
|
214 |
+
fade_in = np.linspace(0, 1, cross_fade_samples)
|
215 |
+
|
216 |
+
# Cross-faded overlap
|
217 |
+
cross_faded_overlap = prev_overlap * fade_out + next_overlap * fade_in
|
218 |
+
|
219 |
+
# Combine
|
220 |
+
new_wave = np.concatenate([
|
221 |
+
prev_wave[:-cross_fade_samples],
|
222 |
+
cross_faded_overlap,
|
223 |
+
next_wave[cross_fade_samples:]
|
224 |
+
])
|
225 |
+
|
226 |
+
final_wave = new_wave
|
227 |
+
|
228 |
+
# Remove silence
|
229 |
+
if remove_silence:
|
230 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as f:
|
231 |
+
sf.write(f.name, final_wave, target_sample_rate)
|
232 |
+
aseg = AudioSegment.from_file(f.name)
|
233 |
+
non_silent_segs = silence.split_on_silence(aseg, min_silence_len=1000, silence_thresh=-50, keep_silence=500)
|
234 |
+
non_silent_wave = AudioSegment.silent(duration=0)
|
235 |
+
for non_silent_seg in non_silent_segs:
|
236 |
+
non_silent_wave += non_silent_seg
|
237 |
+
aseg = non_silent_wave
|
238 |
+
aseg.export(f.name, format="wav")
|
239 |
+
final_wave, _ = torchaudio.load(f.name)
|
240 |
+
final_wave = final_wave.squeeze().cpu().numpy()
|
241 |
+
|
242 |
+
# Create a combined spectrogram
|
243 |
+
combined_spectrogram = np.concatenate(spectrograms, axis=1)
|
244 |
+
|
245 |
+
with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp_spectrogram:
|
246 |
+
spectrogram_path = tmp_spectrogram.name
|
247 |
+
save_spectrogram(combined_spectrogram, spectrogram_path)
|
248 |
+
|
249 |
+
return (target_sample_rate, final_wave), spectrogram_path
|
250 |
+
|
251 |
+
@gpu_decorator
|
252 |
+
def infer(ref_audio_orig, ref_text, gen_text, exp_name, remove_silence, cross_fade_duration=0.15):
|
253 |
+
|
254 |
+
print(gen_text)
|
255 |
+
|
256 |
+
gr.Info("Converting audio...")
|
257 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as f:
|
258 |
+
aseg = AudioSegment.from_file(ref_audio_orig)
|
259 |
+
|
260 |
+
non_silent_segs = silence.split_on_silence(
|
261 |
+
aseg, min_silence_len=1000, silence_thresh=-50, keep_silence=1000
|
262 |
+
)
|
263 |
+
non_silent_wave = AudioSegment.silent(duration=0)
|
264 |
+
for non_silent_seg in non_silent_segs:
|
265 |
+
non_silent_wave += non_silent_seg
|
266 |
+
aseg = non_silent_wave
|
267 |
+
|
268 |
+
audio_duration = len(aseg)
|
269 |
+
if audio_duration > 15000:
|
270 |
+
gr.Warning("Audio is over 15s, clipping to only first 15s.")
|
271 |
+
aseg = aseg[:15000]
|
272 |
+
aseg.export(f.name, format="wav")
|
273 |
+
ref_audio = f.name
|
274 |
+
|
275 |
+
if not ref_text.strip():
|
276 |
+
gr.Info("No reference text provided, transcribing reference audio...")
|
277 |
+
ref_text = pipe(
|
278 |
+
ref_audio,
|
279 |
+
chunk_length_s=30,
|
280 |
+
batch_size=128,
|
281 |
+
generate_kwargs={"task": "transcribe"},
|
282 |
+
return_timestamps=False,
|
283 |
+
)["text"].strip()
|
284 |
+
gr.Info("Finished transcription")
|
285 |
+
else:
|
286 |
+
gr.Info("Using custom reference text...")
|
287 |
+
|
288 |
+
# Add the functionality to ensure it ends with ". "
|
289 |
+
if not ref_text.endswith(". "):
|
290 |
+
if ref_text.endswith("."):
|
291 |
+
ref_text += " "
|
292 |
+
else:
|
293 |
+
ref_text += ". "
|
294 |
+
|
295 |
+
audio, sr = torchaudio.load(ref_audio)
|
296 |
+
|
297 |
+
# Use the new chunk_text function to split gen_text
|
298 |
+
max_chars = int(len(ref_text.encode('utf-8')) / (audio.shape[-1] / sr) * (25 - audio.shape[-1] / sr))
|
299 |
+
gen_text_batches = chunk_text(gen_text, max_chars=max_chars)
|
300 |
+
print('ref_text', ref_text)
|
301 |
+
for i, batch_text in enumerate(gen_text_batches):
|
302 |
+
print(f'gen_text {i}', batch_text)
|
303 |
+
|
304 |
+
gr.Info(f"Generating audio using {exp_name} in {len(gen_text_batches)} batches")
|
305 |
+
return infer_batch((audio, sr), ref_text, gen_text_batches, exp_name, remove_silence, cross_fade_duration)
|
306 |
+
|
307 |
+
|
308 |
+
@gpu_decorator
|
309 |
+
def generate_podcast(script, speaker1_name, ref_audio1, ref_text1, speaker2_name, ref_audio2, ref_text2, exp_name, remove_silence):
|
310 |
+
# Split the script into speaker blocks
|
311 |
+
speaker_pattern = re.compile(f"^({re.escape(speaker1_name)}|{re.escape(speaker2_name)}):", re.MULTILINE)
|
312 |
+
speaker_blocks = speaker_pattern.split(script)[1:] # Skip the first empty element
|
313 |
+
|
314 |
+
generated_audio_segments = []
|
315 |
+
|
316 |
+
for i in range(0, len(speaker_blocks), 2):
|
317 |
+
speaker = speaker_blocks[i]
|
318 |
+
text = speaker_blocks[i+1].strip()
|
319 |
+
|
320 |
+
# Determine which speaker is talking
|
321 |
+
if speaker == speaker1_name:
|
322 |
+
ref_audio = ref_audio1
|
323 |
+
ref_text = ref_text1
|
324 |
+
elif speaker == speaker2_name:
|
325 |
+
ref_audio = ref_audio2
|
326 |
+
ref_text = ref_text2
|
327 |
+
else:
|
328 |
+
continue # Skip if the speaker is neither speaker1 nor speaker2
|
329 |
+
|
330 |
+
# Generate audio for this block
|
331 |
+
audio, _ = infer(ref_audio, ref_text, text, exp_name, remove_silence)
|
332 |
+
|
333 |
+
# Convert the generated audio to a numpy array
|
334 |
+
sr, audio_data = audio
|
335 |
+
|
336 |
+
# Save the audio data as a WAV file
|
337 |
+
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as temp_file:
|
338 |
+
sf.write(temp_file.name, audio_data, sr)
|
339 |
+
audio_segment = AudioSegment.from_wav(temp_file.name)
|
340 |
+
|
341 |
+
generated_audio_segments.append(audio_segment)
|
342 |
+
|
343 |
+
# Add a short pause between speakers
|
344 |
+
pause = AudioSegment.silent(duration=500) # 500ms pause
|
345 |
+
generated_audio_segments.append(pause)
|
346 |
+
|
347 |
+
# Concatenate all audio segments
|
348 |
+
final_podcast = sum(generated_audio_segments)
|
349 |
+
|
350 |
+
# Export the final podcast
|
351 |
+
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as temp_file:
|
352 |
+
podcast_path = temp_file.name
|
353 |
+
final_podcast.export(podcast_path, format="wav")
|
354 |
+
|
355 |
+
return podcast_path
|
356 |
+
|
357 |
+
def parse_speechtypes_text(gen_text):
|
358 |
+
# Pattern to find (Emotion)
|
359 |
+
pattern = r'\((.*?)\)'
|
360 |
+
|
361 |
+
# Split the text by the pattern
|
362 |
+
tokens = re.split(pattern, gen_text)
|
363 |
+
|
364 |
+
segments = []
|
365 |
+
|
366 |
+
current_emotion = 'Regular'
|
367 |
+
|
368 |
+
for i in range(len(tokens)):
|
369 |
+
if i % 2 == 0:
|
370 |
+
# This is text
|
371 |
+
text = tokens[i].strip()
|
372 |
+
if text:
|
373 |
+
segments.append({'emotion': current_emotion, 'text': text})
|
374 |
+
else:
|
375 |
+
# This is emotion
|
376 |
+
emotion = tokens[i].strip()
|
377 |
+
current_emotion = emotion
|
378 |
+
|
379 |
+
return segments
|
380 |
+
|
381 |
+
def update_speed(new_speed):
|
382 |
+
global speed
|
383 |
+
speed = new_speed
|
384 |
+
return f"Speed set to: {speed}"
|
385 |
+
|
386 |
+
with gr.Blocks() as app_credits:
|
387 |
+
gr.Markdown("""
|
388 |
+
# Credits
|
389 |
+
|
390 |
+
* [mrfakename](https://github.com/fakerybakery) for the original [online demo](https://huggingface.co/spaces/mrfakename/E2-F5-TTS)
|
391 |
+
* [RootingInLoad](https://github.com/RootingInLoad) for the podcast generation
|
392 |
+
* [jpgallegoar](https://github.com/jpgallegoar) for multiple speech-type generation
|
393 |
+
""")
|
394 |
+
with gr.Blocks() as app_tts:
|
395 |
+
gr.Markdown("# Batched TTS")
|
396 |
+
ref_audio_input = gr.Audio(label="Reference Audio", type="filepath")
|
397 |
+
gen_text_input = gr.Textbox(label="Text to Generate", lines=10)
|
398 |
+
model_choice = gr.Radio(
|
399 |
+
choices=["F5-TTS", "E2-TTS"], label="Choose TTS Model", value="F5-TTS"
|
400 |
+
)
|
401 |
+
generate_btn = gr.Button("Synthesize", variant="primary")
|
402 |
+
with gr.Accordion("Advanced Settings", open=False):
|
403 |
+
ref_text_input = gr.Textbox(
|
404 |
+
label="Reference Text",
|
405 |
+
info="Leave blank to automatically transcribe the reference audio. If you enter text it will override automatic transcription.",
|
406 |
+
lines=2,
|
407 |
+
)
|
408 |
+
remove_silence = gr.Checkbox(
|
409 |
+
label="Remove Silences",
|
410 |
+
info="The model tends to produce silences, especially on longer audio. We can manually remove silences if needed. Note that this is an experimental feature and may produce strange results. This will also increase generation time.",
|
411 |
+
value=False,
|
412 |
+
)
|
413 |
+
speed_slider = gr.Slider(
|
414 |
+
label="Speed",
|
415 |
+
minimum=0.3,
|
416 |
+
maximum=2.0,
|
417 |
+
value=speed,
|
418 |
+
step=0.1,
|
419 |
+
info="Adjust the speed of the audio.",
|
420 |
+
)
|
421 |
+
cross_fade_duration_slider = gr.Slider(
|
422 |
+
label="Cross-Fade Duration (s)",
|
423 |
+
minimum=0.0,
|
424 |
+
maximum=1.0,
|
425 |
+
value=0.15,
|
426 |
+
step=0.01,
|
427 |
+
info="Set the duration of the cross-fade between audio clips.",
|
428 |
+
)
|
429 |
+
speed_slider.change(update_speed, inputs=speed_slider)
|
430 |
+
|
431 |
+
audio_output = gr.Audio(label="Synthesized Audio")
|
432 |
+
spectrogram_output = gr.Image(label="Spectrogram")
|
433 |
+
|
434 |
+
generate_btn.click(
|
435 |
+
infer,
|
436 |
+
inputs=[
|
437 |
+
ref_audio_input,
|
438 |
+
ref_text_input,
|
439 |
+
gen_text_input,
|
440 |
+
model_choice,
|
441 |
+
remove_silence,
|
442 |
+
cross_fade_duration_slider,
|
443 |
+
],
|
444 |
+
outputs=[audio_output, spectrogram_output],
|
445 |
+
)
|
446 |
+
|
447 |
+
with gr.Blocks() as app_podcast:
|
448 |
+
gr.Markdown("# Podcast Generation")
|
449 |
+
speaker1_name = gr.Textbox(label="Speaker 1 Name")
|
450 |
+
ref_audio_input1 = gr.Audio(label="Reference Audio (Speaker 1)", type="filepath")
|
451 |
+
ref_text_input1 = gr.Textbox(label="Reference Text (Speaker 1)", lines=2)
|
452 |
+
|
453 |
+
speaker2_name = gr.Textbox(label="Speaker 2 Name")
|
454 |
+
ref_audio_input2 = gr.Audio(label="Reference Audio (Speaker 2)", type="filepath")
|
455 |
+
ref_text_input2 = gr.Textbox(label="Reference Text (Speaker 2)", lines=2)
|
456 |
+
|
457 |
+
script_input = gr.Textbox(label="Podcast Script", lines=10,
|
458 |
+
placeholder="Enter the script with speaker names at the start of each block, e.g.:\nSean: How did you start studying...\n\nMeghan: I came to my interest in technology...\nIt was a long journey...\n\nSean: That's fascinating. Can you elaborate...")
|
459 |
+
|
460 |
+
podcast_model_choice = gr.Radio(
|
461 |
+
choices=["F5-TTS", "E2-TTS"], label="Choose TTS Model", value="F5-TTS"
|
462 |
+
)
|
463 |
+
podcast_remove_silence = gr.Checkbox(
|
464 |
+
label="Remove Silences",
|
465 |
+
value=True,
|
466 |
+
)
|
467 |
+
generate_podcast_btn = gr.Button("Generate Podcast", variant="primary")
|
468 |
+
podcast_output = gr.Audio(label="Generated Podcast")
|
469 |
+
|
470 |
+
def podcast_generation(script, speaker1, ref_audio1, ref_text1, speaker2, ref_audio2, ref_text2, model, remove_silence):
|
471 |
+
return generate_podcast(script, speaker1, ref_audio1, ref_text1, speaker2, ref_audio2, ref_text2, model, remove_silence)
|
472 |
+
|
473 |
+
generate_podcast_btn.click(
|
474 |
+
podcast_generation,
|
475 |
+
inputs=[
|
476 |
+
script_input,
|
477 |
+
speaker1_name,
|
478 |
+
ref_audio_input1,
|
479 |
+
ref_text_input1,
|
480 |
+
speaker2_name,
|
481 |
+
ref_audio_input2,
|
482 |
+
ref_text_input2,
|
483 |
+
podcast_model_choice,
|
484 |
+
podcast_remove_silence,
|
485 |
+
],
|
486 |
+
outputs=podcast_output,
|
487 |
+
)
|
488 |
+
|
489 |
+
def parse_emotional_text(gen_text):
|
490 |
+
# Pattern to find (Emotion)
|
491 |
+
pattern = r'\((.*?)\)'
|
492 |
+
|
493 |
+
# Split the text by the pattern
|
494 |
+
tokens = re.split(pattern, gen_text)
|
495 |
+
|
496 |
+
segments = []
|
497 |
+
|
498 |
+
current_emotion = 'Regular'
|
499 |
+
|
500 |
+
for i in range(len(tokens)):
|
501 |
+
if i % 2 == 0:
|
502 |
+
# This is text
|
503 |
+
text = tokens[i].strip()
|
504 |
+
if text:
|
505 |
+
segments.append({'emotion': current_emotion, 'text': text})
|
506 |
+
else:
|
507 |
+
# This is emotion
|
508 |
+
emotion = tokens[i].strip()
|
509 |
+
current_emotion = emotion
|
510 |
+
|
511 |
+
return segments
|
512 |
+
|
513 |
+
with gr.Blocks() as app_emotional:
|
514 |
+
# New section for emotional generation
|
515 |
+
gr.Markdown(
|
516 |
+
"""
|
517 |
+
# Multiple Speech-Type Generation
|
518 |
+
|
519 |
+
This section allows you to upload different audio clips for each speech type. 'Regular' emotion is mandatory. You can add additional speech types by clicking the "Add Speech Type" button. Enter your text in the format shown below, and the system will generate speech using the appropriate emotions. If unspecified, the model will use the regular speech type. The current speech type will be used until the next speech type is specified.
|
520 |
+
|
521 |
+
**Example Input:**
|
522 |
+
|
523 |
+
(Regular) Hello, I'd like to order a sandwich please. (Surprised) What do you mean you're out of bread? (Sad) I really wanted a sandwich though... (Angry) You know what, darn you and your little shop, you suck! (Whisper) I'll just go back home and cry now. (Shouting) Why me?!
|
524 |
+
"""
|
525 |
+
)
|
526 |
+
|
527 |
+
gr.Markdown("Upload different audio clips for each speech type. 'Regular' emotion is mandatory. You can add additional speech types by clicking the 'Add Speech Type' button.")
|
528 |
+
|
529 |
+
# Regular speech type (mandatory)
|
530 |
+
with gr.Row():
|
531 |
+
regular_name = gr.Textbox(value='Regular', label='Speech Type Name', interactive=False)
|
532 |
+
regular_audio = gr.Audio(label='Regular Reference Audio', type='filepath')
|
533 |
+
regular_ref_text = gr.Textbox(label='Reference Text (Regular)', lines=2)
|
534 |
+
|
535 |
+
# Additional speech types (up to 99 more)
|
536 |
+
max_speech_types = 100
|
537 |
+
speech_type_names = []
|
538 |
+
speech_type_audios = []
|
539 |
+
speech_type_ref_texts = []
|
540 |
+
speech_type_delete_btns = []
|
541 |
+
|
542 |
+
for i in range(max_speech_types - 1):
|
543 |
+
with gr.Row():
|
544 |
+
name_input = gr.Textbox(label='Speech Type Name', visible=False)
|
545 |
+
audio_input = gr.Audio(label='Reference Audio', type='filepath', visible=False)
|
546 |
+
ref_text_input = gr.Textbox(label='Reference Text', lines=2, visible=False)
|
547 |
+
delete_btn = gr.Button("Delete", variant="secondary", visible=False)
|
548 |
+
speech_type_names.append(name_input)
|
549 |
+
speech_type_audios.append(audio_input)
|
550 |
+
speech_type_ref_texts.append(ref_text_input)
|
551 |
+
speech_type_delete_btns.append(delete_btn)
|
552 |
+
|
553 |
+
# Button to add speech type
|
554 |
+
add_speech_type_btn = gr.Button("Add Speech Type")
|
555 |
+
|
556 |
+
# Keep track of current number of speech types
|
557 |
+
speech_type_count = gr.State(value=0)
|
558 |
+
|
559 |
+
# Function to add a speech type
|
560 |
+
def add_speech_type_fn(speech_type_count):
|
561 |
+
if speech_type_count < max_speech_types - 1:
|
562 |
+
speech_type_count += 1
|
563 |
+
# Prepare updates for the components
|
564 |
+
name_updates = []
|
565 |
+
audio_updates = []
|
566 |
+
ref_text_updates = []
|
567 |
+
delete_btn_updates = []
|
568 |
+
for i in range(max_speech_types - 1):
|
569 |
+
if i < speech_type_count:
|
570 |
+
name_updates.append(gr.update(visible=True))
|
571 |
+
audio_updates.append(gr.update(visible=True))
|
572 |
+
ref_text_updates.append(gr.update(visible=True))
|
573 |
+
delete_btn_updates.append(gr.update(visible=True))
|
574 |
+
else:
|
575 |
+
name_updates.append(gr.update())
|
576 |
+
audio_updates.append(gr.update())
|
577 |
+
ref_text_updates.append(gr.update())
|
578 |
+
delete_btn_updates.append(gr.update())
|
579 |
+
else:
|
580 |
+
# Optionally, show a warning
|
581 |
+
# gr.Warning("Maximum number of speech types reached.")
|
582 |
+
name_updates = [gr.update() for _ in range(max_speech_types - 1)]
|
583 |
+
audio_updates = [gr.update() for _ in range(max_speech_types - 1)]
|
584 |
+
ref_text_updates = [gr.update() for _ in range(max_speech_types - 1)]
|
585 |
+
delete_btn_updates = [gr.update() for _ in range(max_speech_types - 1)]
|
586 |
+
return [speech_type_count] + name_updates + audio_updates + ref_text_updates + delete_btn_updates
|
587 |
+
|
588 |
+
add_speech_type_btn.click(
|
589 |
+
add_speech_type_fn,
|
590 |
+
inputs=speech_type_count,
|
591 |
+
outputs=[speech_type_count] + speech_type_names + speech_type_audios + speech_type_ref_texts + speech_type_delete_btns
|
592 |
+
)
|
593 |
+
|
594 |
+
# Function to delete a speech type
|
595 |
+
def make_delete_speech_type_fn(index):
|
596 |
+
def delete_speech_type_fn(speech_type_count):
|
597 |
+
# Prepare updates
|
598 |
+
name_updates = []
|
599 |
+
audio_updates = []
|
600 |
+
ref_text_updates = []
|
601 |
+
delete_btn_updates = []
|
602 |
+
|
603 |
+
for i in range(max_speech_types - 1):
|
604 |
+
if i == index:
|
605 |
+
name_updates.append(gr.update(visible=False, value=''))
|
606 |
+
audio_updates.append(gr.update(visible=False, value=None))
|
607 |
+
ref_text_updates.append(gr.update(visible=False, value=''))
|
608 |
+
delete_btn_updates.append(gr.update(visible=False))
|
609 |
+
else:
|
610 |
+
name_updates.append(gr.update())
|
611 |
+
audio_updates.append(gr.update())
|
612 |
+
ref_text_updates.append(gr.update())
|
613 |
+
delete_btn_updates.append(gr.update())
|
614 |
+
|
615 |
+
speech_type_count = max(0, speech_type_count - 1)
|
616 |
+
|
617 |
+
return [speech_type_count] + name_updates + audio_updates + ref_text_updates + delete_btn_updates
|
618 |
+
|
619 |
+
return delete_speech_type_fn
|
620 |
+
|
621 |
+
for i, delete_btn in enumerate(speech_type_delete_btns):
|
622 |
+
delete_fn = make_delete_speech_type_fn(i)
|
623 |
+
delete_btn.click(
|
624 |
+
delete_fn,
|
625 |
+
inputs=speech_type_count,
|
626 |
+
outputs=[speech_type_count] + speech_type_names + speech_type_audios + speech_type_ref_texts + speech_type_delete_btns
|
627 |
+
)
|
628 |
+
|
629 |
+
# Text input for the prompt
|
630 |
+
gen_text_input_emotional = gr.Textbox(label="Text to Generate", lines=10)
|
631 |
+
|
632 |
+
# Model choice
|
633 |
+
model_choice_emotional = gr.Radio(
|
634 |
+
choices=["F5-TTS", "E2-TTS"], label="Choose TTS Model", value="F5-TTS"
|
635 |
+
)
|
636 |
+
|
637 |
+
with gr.Accordion("Advanced Settings", open=False):
|
638 |
+
remove_silence_emotional = gr.Checkbox(
|
639 |
+
label="Remove Silences",
|
640 |
+
value=True,
|
641 |
+
)
|
642 |
+
|
643 |
+
# Generate button
|
644 |
+
generate_emotional_btn = gr.Button("Generate Emotional Speech", variant="primary")
|
645 |
+
|
646 |
+
# Output audio
|
647 |
+
audio_output_emotional = gr.Audio(label="Synthesized Audio")
|
648 |
+
@gpu_decorator
|
649 |
+
def generate_emotional_speech(
|
650 |
+
regular_audio,
|
651 |
+
regular_ref_text,
|
652 |
+
gen_text,
|
653 |
+
*args,
|
654 |
+
):
|
655 |
+
num_additional_speech_types = max_speech_types - 1
|
656 |
+
speech_type_names_list = args[:num_additional_speech_types]
|
657 |
+
speech_type_audios_list = args[num_additional_speech_types:2 * num_additional_speech_types]
|
658 |
+
speech_type_ref_texts_list = args[2 * num_additional_speech_types:3 * num_additional_speech_types]
|
659 |
+
model_choice = args[3 * num_additional_speech_types]
|
660 |
+
remove_silence = args[3 * num_additional_speech_types + 1]
|
661 |
+
|
662 |
+
# Collect the speech types and their audios into a dict
|
663 |
+
speech_types = {'Regular': {'audio': regular_audio, 'ref_text': regular_ref_text}}
|
664 |
+
|
665 |
+
for name_input, audio_input, ref_text_input in zip(speech_type_names_list, speech_type_audios_list, speech_type_ref_texts_list):
|
666 |
+
if name_input and audio_input:
|
667 |
+
speech_types[name_input] = {'audio': audio_input, 'ref_text': ref_text_input}
|
668 |
+
|
669 |
+
# Parse the gen_text into segments
|
670 |
+
segments = parse_speechtypes_text(gen_text)
|
671 |
+
|
672 |
+
# For each segment, generate speech
|
673 |
+
generated_audio_segments = []
|
674 |
+
current_emotion = 'Regular'
|
675 |
+
|
676 |
+
for segment in segments:
|
677 |
+
emotion = segment['emotion']
|
678 |
+
text = segment['text']
|
679 |
+
|
680 |
+
if emotion in speech_types:
|
681 |
+
current_emotion = emotion
|
682 |
+
else:
|
683 |
+
# If emotion not available, default to Regular
|
684 |
+
current_emotion = 'Regular'
|
685 |
+
|
686 |
+
ref_audio = speech_types[current_emotion]['audio']
|
687 |
+
ref_text = speech_types[current_emotion].get('ref_text', '')
|
688 |
+
|
689 |
+
# Generate speech for this segment
|
690 |
+
audio, _ = infer(ref_audio, ref_text, text, model_choice, remove_silence, 0)
|
691 |
+
sr, audio_data = audio
|
692 |
+
|
693 |
+
generated_audio_segments.append(audio_data)
|
694 |
+
|
695 |
+
# Concatenate all audio segments
|
696 |
+
if generated_audio_segments:
|
697 |
+
final_audio_data = np.concatenate(generated_audio_segments)
|
698 |
+
return (sr, final_audio_data)
|
699 |
+
else:
|
700 |
+
gr.Warning("No audio generated.")
|
701 |
+
return None
|
702 |
+
|
703 |
+
generate_emotional_btn.click(
|
704 |
+
generate_emotional_speech,
|
705 |
+
inputs=[
|
706 |
+
regular_audio,
|
707 |
+
regular_ref_text,
|
708 |
+
gen_text_input_emotional,
|
709 |
+
] + speech_type_names + speech_type_audios + speech_type_ref_texts + [
|
710 |
+
model_choice_emotional,
|
711 |
+
remove_silence_emotional,
|
712 |
+
],
|
713 |
+
outputs=audio_output_emotional,
|
714 |
+
)
|
715 |
+
|
716 |
+
# Validation function to disable Generate button if speech types are missing
|
717 |
+
def validate_speech_types(
|
718 |
+
gen_text,
|
719 |
+
regular_name,
|
720 |
+
*args
|
721 |
+
):
|
722 |
+
num_additional_speech_types = max_speech_types - 1
|
723 |
+
speech_type_names_list = args[:num_additional_speech_types]
|
724 |
+
|
725 |
+
# Collect the speech types names
|
726 |
+
speech_types_available = set()
|
727 |
+
if regular_name:
|
728 |
+
speech_types_available.add(regular_name)
|
729 |
+
for name_input in speech_type_names_list:
|
730 |
+
if name_input:
|
731 |
+
speech_types_available.add(name_input)
|
732 |
+
|
733 |
+
# Parse the gen_text to get the speech types used
|
734 |
+
segments = parse_emotional_text(gen_text)
|
735 |
+
speech_types_in_text = set(segment['emotion'] for segment in segments)
|
736 |
+
|
737 |
+
# Check if all speech types in text are available
|
738 |
+
missing_speech_types = speech_types_in_text - speech_types_available
|
739 |
+
|
740 |
+
if missing_speech_types:
|
741 |
+
# Disable the generate button
|
742 |
+
return gr.update(interactive=False)
|
743 |
+
else:
|
744 |
+
# Enable the generate button
|
745 |
+
return gr.update(interactive=True)
|
746 |
+
|
747 |
+
gen_text_input_emotional.change(
|
748 |
+
validate_speech_types,
|
749 |
+
inputs=[gen_text_input_emotional, regular_name] + speech_type_names,
|
750 |
+
outputs=generate_emotional_btn
|
751 |
+
)
|
752 |
+
with gr.Blocks() as app:
|
753 |
+
gr.Markdown(
|
754 |
+
"""
|
755 |
+
# E2/F5 TTS
|
756 |
+
|
757 |
+
This is a local web UI for F5 TTS with advanced batch processing support. This app supports the following TTS models:
|
758 |
+
|
759 |
+
* [F5-TTS](https://arxiv.org/abs/2410.06885) (A Fairytaler that Fakes Fluent and Faithful Speech with Flow Matching)
|
760 |
+
* [E2 TTS](https://arxiv.org/abs/2406.18009) (Embarrassingly Easy Fully Non-Autoregressive Zero-Shot TTS)
|
761 |
+
|
762 |
+
The checkpoints support English and Chinese.
|
763 |
+
|
764 |
+
If you're having issues, try converting your reference audio to WAV or MP3, clipping it to 15s, and shortening your prompt.
|
765 |
+
|
766 |
+
**NOTE: Reference text will be automatically transcribed with Whisper if not provided. For best results, keep your reference clips short (<15s). Ensure the audio is fully uploaded before generating.**
|
767 |
+
"""
|
768 |
+
)
|
769 |
+
gr.TabbedInterface([app_tts, app_podcast, app_emotional, app_credits], ["TTS", "Podcast", "Multi-Style", "Credits"])
|
770 |
+
|
771 |
+
@click.command()
|
772 |
+
@click.option("--port", "-p", default=None, type=int, help="Port to run the app on")
|
773 |
+
@click.option("--host", "-H", default=None, help="Host to run the app on")
|
774 |
+
@click.option(
|
775 |
+
"--share",
|
776 |
+
"-s",
|
777 |
+
default=False,
|
778 |
+
is_flag=True,
|
779 |
+
help="Share the app via Gradio share link",
|
780 |
+
)
|
781 |
+
@click.option("--api", "-a", default=True, is_flag=True, help="Allow API access")
|
782 |
+
def main(port, host, share, api):
|
783 |
+
global app
|
784 |
+
print(f"Starting app...")
|
785 |
+
app.queue(api_open=api).launch(
|
786 |
+
server_name=host, server_port=port, share=share, show_api=api
|
787 |
+
)
|
788 |
+
|
789 |
+
|
790 |
+
if __name__ == "__main__":
|
791 |
+
if not USING_SPACES:
|
792 |
+
main()
|
793 |
+
else:
|
794 |
+
app.queue().launch()
|
finetune-cli.py
ADDED
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
from model import CFM, UNetT, DiT, MMDiT, Trainer
|
3 |
+
from model.utils import get_tokenizer
|
4 |
+
from model.dataset import load_dataset
|
5 |
+
from cached_path import cached_path
|
6 |
+
import shutil,os
|
7 |
+
# -------------------------- Dataset Settings --------------------------- #
|
8 |
+
target_sample_rate = 24000
|
9 |
+
n_mel_channels = 100
|
10 |
+
hop_length = 256
|
11 |
+
|
12 |
+
tokenizer = "pinyin" # 'pinyin', 'char', or 'custom'
|
13 |
+
tokenizer_path = None # if tokenizer = 'custom', define the path to the tokenizer you want to use (should be vocab.txt)
|
14 |
+
|
15 |
+
# -------------------------- Argument Parsing --------------------------- #
|
16 |
+
def parse_args():
|
17 |
+
parser = argparse.ArgumentParser(description='Train CFM Model')
|
18 |
+
|
19 |
+
parser.add_argument('--exp_name', type=str, default="F5TTS_Base", choices=["F5TTS_Base", "E2TTS_Base"],help='Experiment name')
|
20 |
+
parser.add_argument('--dataset_name', type=str, default="Emilia_ZH_EN", help='Name of the dataset to use')
|
21 |
+
parser.add_argument('--learning_rate', type=float, default=1e-4, help='Learning rate for training')
|
22 |
+
parser.add_argument('--batch_size_per_gpu', type=int, default=256, help='Batch size per GPU')
|
23 |
+
parser.add_argument('--batch_size_type', type=str, default="frame", choices=["frame", "sample"],help='Batch size type')
|
24 |
+
parser.add_argument('--max_samples', type=int, default=16, help='Max sequences per batch')
|
25 |
+
parser.add_argument('--grad_accumulation_steps', type=int, default=1,help='Gradient accumulation steps')
|
26 |
+
parser.add_argument('--max_grad_norm', type=float, default=1.0, help='Max gradient norm for clipping')
|
27 |
+
parser.add_argument('--epochs', type=int, default=10, help='Number of training epochs')
|
28 |
+
parser.add_argument('--num_warmup_updates', type=int, default=5, help='Warmup steps')
|
29 |
+
parser.add_argument('--save_per_updates', type=int, default=10, help='Save checkpoint every X steps')
|
30 |
+
parser.add_argument('--last_per_steps', type=int, default=10, help='Save last checkpoint every X steps')
|
31 |
+
parser.add_argument('--finetune', type=bool, default=True, help='Use Finetune')
|
32 |
+
|
33 |
+
return parser.parse_args()
|
34 |
+
|
35 |
+
# -------------------------- Training Settings -------------------------- #
|
36 |
+
|
37 |
+
def main():
|
38 |
+
args = parse_args()
|
39 |
+
|
40 |
+
|
41 |
+
# Model parameters based on experiment name
|
42 |
+
if args.exp_name == "F5TTS_Base":
|
43 |
+
wandb_resume_id = None
|
44 |
+
model_cls = DiT
|
45 |
+
model_cfg = dict(dim=1024, depth=22, heads=16, ff_mult=2, text_dim=512, conv_layers=4)
|
46 |
+
if args.finetune:
|
47 |
+
ckpt_path = str(cached_path(f"hf://SWivid/F5-TTS/F5TTS_Base/model_1200000.pt"))
|
48 |
+
elif args.exp_name == "E2TTS_Base":
|
49 |
+
wandb_resume_id = None
|
50 |
+
model_cls = UNetT
|
51 |
+
model_cfg = dict(dim=1024, depth=24, heads=16, ff_mult=4)
|
52 |
+
if args.finetune:
|
53 |
+
ckpt_path = str(cached_path(f"hf://SWivid/E2-TTS/E2TTS_Base/model_1200000.pt"))
|
54 |
+
|
55 |
+
if args.finetune:
|
56 |
+
path_ckpt = os.path.join("ckpts",args.dataset_name)
|
57 |
+
if os.path.isdir(path_ckpt)==False:
|
58 |
+
os.makedirs(path_ckpt,exist_ok=True)
|
59 |
+
shutil.copy2(ckpt_path,os.path.join(path_ckpt,os.path.basename(ckpt_path)))
|
60 |
+
|
61 |
+
checkpoint_path=os.path.join("ckpts",args.dataset_name)
|
62 |
+
|
63 |
+
# Use the dataset_name provided in the command line
|
64 |
+
tokenizer_path = args.dataset_name if tokenizer != "custom" else tokenizer_path
|
65 |
+
vocab_char_map, vocab_size = get_tokenizer(tokenizer_path, tokenizer)
|
66 |
+
|
67 |
+
mel_spec_kwargs = dict(
|
68 |
+
target_sample_rate=target_sample_rate,
|
69 |
+
n_mel_channels=n_mel_channels,
|
70 |
+
hop_length=hop_length,
|
71 |
+
)
|
72 |
+
|
73 |
+
e2tts = CFM(
|
74 |
+
transformer=model_cls(
|
75 |
+
**model_cfg,
|
76 |
+
text_num_embeds=vocab_size,
|
77 |
+
mel_dim=n_mel_channels
|
78 |
+
),
|
79 |
+
mel_spec_kwargs=mel_spec_kwargs,
|
80 |
+
vocab_char_map=vocab_char_map,
|
81 |
+
)
|
82 |
+
|
83 |
+
trainer = Trainer(
|
84 |
+
e2tts,
|
85 |
+
args.epochs,
|
86 |
+
args.learning_rate,
|
87 |
+
num_warmup_updates=args.num_warmup_updates,
|
88 |
+
save_per_updates=args.save_per_updates,
|
89 |
+
checkpoint_path=checkpoint_path,
|
90 |
+
batch_size=args.batch_size_per_gpu,
|
91 |
+
batch_size_type=args.batch_size_type,
|
92 |
+
max_samples=args.max_samples,
|
93 |
+
grad_accumulation_steps=args.grad_accumulation_steps,
|
94 |
+
max_grad_norm=args.max_grad_norm,
|
95 |
+
wandb_project="CFM-TTS",
|
96 |
+
wandb_run_name=args.exp_name,
|
97 |
+
wandb_resume_id=wandb_resume_id,
|
98 |
+
last_per_steps=args.last_per_steps,
|
99 |
+
)
|
100 |
+
|
101 |
+
train_dataset = load_dataset(args.dataset_name, tokenizer, mel_spec_kwargs=mel_spec_kwargs)
|
102 |
+
trainer.train(train_dataset,
|
103 |
+
resumable_with_seed=666 # seed for shuffling dataset
|
104 |
+
)
|
105 |
+
|
106 |
+
|
107 |
+
if __name__ == '__main__':
|
108 |
+
main()
|
finetune_gradio.py
ADDED
@@ -0,0 +1,734 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os,sys
|
2 |
+
|
3 |
+
from transformers import pipeline
|
4 |
+
import gradio as gr
|
5 |
+
import torch
|
6 |
+
import click
|
7 |
+
import torchaudio
|
8 |
+
from glob import glob
|
9 |
+
import librosa
|
10 |
+
import numpy as np
|
11 |
+
from scipy.io import wavfile
|
12 |
+
import shutil
|
13 |
+
import time
|
14 |
+
|
15 |
+
import json
|
16 |
+
from model.utils import convert_char_to_pinyin
|
17 |
+
import signal
|
18 |
+
import psutil
|
19 |
+
import platform
|
20 |
+
import subprocess
|
21 |
+
from datasets.arrow_writer import ArrowWriter
|
22 |
+
|
23 |
+
import json
|
24 |
+
|
25 |
+
training_process = None
|
26 |
+
system = platform.system()
|
27 |
+
python_executable = sys.executable or "python"
|
28 |
+
|
29 |
+
path_data="data"
|
30 |
+
|
31 |
+
device = (
|
32 |
+
"cuda"
|
33 |
+
if torch.cuda.is_available()
|
34 |
+
else "mps" if torch.backends.mps.is_available() else "cpu"
|
35 |
+
)
|
36 |
+
|
37 |
+
pipe = None
|
38 |
+
|
39 |
+
# Load metadata
|
40 |
+
def get_audio_duration(audio_path):
|
41 |
+
"""Calculate the duration of an audio file."""
|
42 |
+
audio, sample_rate = torchaudio.load(audio_path)
|
43 |
+
num_channels = audio.shape[0]
|
44 |
+
return audio.shape[1] / (sample_rate * num_channels)
|
45 |
+
|
46 |
+
def clear_text(text):
|
47 |
+
"""Clean and prepare text by lowering the case and stripping whitespace."""
|
48 |
+
return text.lower().strip()
|
49 |
+
|
50 |
+
def get_rms(y,frame_length=2048,hop_length=512,pad_mode="constant",): # https://github.com/RVC-Boss/GPT-SoVITS/blob/main/tools/slicer2.py
|
51 |
+
padding = (int(frame_length // 2), int(frame_length // 2))
|
52 |
+
y = np.pad(y, padding, mode=pad_mode)
|
53 |
+
|
54 |
+
axis = -1
|
55 |
+
# put our new within-frame axis at the end for now
|
56 |
+
out_strides = y.strides + tuple([y.strides[axis]])
|
57 |
+
# Reduce the shape on the framing axis
|
58 |
+
x_shape_trimmed = list(y.shape)
|
59 |
+
x_shape_trimmed[axis] -= frame_length - 1
|
60 |
+
out_shape = tuple(x_shape_trimmed) + tuple([frame_length])
|
61 |
+
xw = np.lib.stride_tricks.as_strided(y, shape=out_shape, strides=out_strides)
|
62 |
+
if axis < 0:
|
63 |
+
target_axis = axis - 1
|
64 |
+
else:
|
65 |
+
target_axis = axis + 1
|
66 |
+
xw = np.moveaxis(xw, -1, target_axis)
|
67 |
+
# Downsample along the target axis
|
68 |
+
slices = [slice(None)] * xw.ndim
|
69 |
+
slices[axis] = slice(0, None, hop_length)
|
70 |
+
x = xw[tuple(slices)]
|
71 |
+
|
72 |
+
# Calculate power
|
73 |
+
power = np.mean(np.abs(x) ** 2, axis=-2, keepdims=True)
|
74 |
+
|
75 |
+
return np.sqrt(power)
|
76 |
+
|
77 |
+
class Slicer: # https://github.com/RVC-Boss/GPT-SoVITS/blob/main/tools/slicer2.py
|
78 |
+
def __init__(
|
79 |
+
self,
|
80 |
+
sr: int,
|
81 |
+
threshold: float = -40.0,
|
82 |
+
min_length: int = 2000,
|
83 |
+
min_interval: int = 300,
|
84 |
+
hop_size: int = 20,
|
85 |
+
max_sil_kept: int = 2000,
|
86 |
+
):
|
87 |
+
if not min_length >= min_interval >= hop_size:
|
88 |
+
raise ValueError(
|
89 |
+
"The following condition must be satisfied: min_length >= min_interval >= hop_size"
|
90 |
+
)
|
91 |
+
if not max_sil_kept >= hop_size:
|
92 |
+
raise ValueError(
|
93 |
+
"The following condition must be satisfied: max_sil_kept >= hop_size"
|
94 |
+
)
|
95 |
+
min_interval = sr * min_interval / 1000
|
96 |
+
self.threshold = 10 ** (threshold / 20.0)
|
97 |
+
self.hop_size = round(sr * hop_size / 1000)
|
98 |
+
self.win_size = min(round(min_interval), 4 * self.hop_size)
|
99 |
+
self.min_length = round(sr * min_length / 1000 / self.hop_size)
|
100 |
+
self.min_interval = round(min_interval / self.hop_size)
|
101 |
+
self.max_sil_kept = round(sr * max_sil_kept / 1000 / self.hop_size)
|
102 |
+
|
103 |
+
def _apply_slice(self, waveform, begin, end):
|
104 |
+
if len(waveform.shape) > 1:
|
105 |
+
return waveform[
|
106 |
+
:, begin * self.hop_size : min(waveform.shape[1], end * self.hop_size)
|
107 |
+
]
|
108 |
+
else:
|
109 |
+
return waveform[
|
110 |
+
begin * self.hop_size : min(waveform.shape[0], end * self.hop_size)
|
111 |
+
]
|
112 |
+
|
113 |
+
# @timeit
|
114 |
+
def slice(self, waveform):
|
115 |
+
if len(waveform.shape) > 1:
|
116 |
+
samples = waveform.mean(axis=0)
|
117 |
+
else:
|
118 |
+
samples = waveform
|
119 |
+
if samples.shape[0] <= self.min_length:
|
120 |
+
return [waveform]
|
121 |
+
rms_list = get_rms(
|
122 |
+
y=samples, frame_length=self.win_size, hop_length=self.hop_size
|
123 |
+
).squeeze(0)
|
124 |
+
sil_tags = []
|
125 |
+
silence_start = None
|
126 |
+
clip_start = 0
|
127 |
+
for i, rms in enumerate(rms_list):
|
128 |
+
# Keep looping while frame is silent.
|
129 |
+
if rms < self.threshold:
|
130 |
+
# Record start of silent frames.
|
131 |
+
if silence_start is None:
|
132 |
+
silence_start = i
|
133 |
+
continue
|
134 |
+
# Keep looping while frame is not silent and silence start has not been recorded.
|
135 |
+
if silence_start is None:
|
136 |
+
continue
|
137 |
+
# Clear recorded silence start if interval is not enough or clip is too short
|
138 |
+
is_leading_silence = silence_start == 0 and i > self.max_sil_kept
|
139 |
+
need_slice_middle = (
|
140 |
+
i - silence_start >= self.min_interval
|
141 |
+
and i - clip_start >= self.min_length
|
142 |
+
)
|
143 |
+
if not is_leading_silence and not need_slice_middle:
|
144 |
+
silence_start = None
|
145 |
+
continue
|
146 |
+
# Need slicing. Record the range of silent frames to be removed.
|
147 |
+
if i - silence_start <= self.max_sil_kept:
|
148 |
+
pos = rms_list[silence_start : i + 1].argmin() + silence_start
|
149 |
+
if silence_start == 0:
|
150 |
+
sil_tags.append((0, pos))
|
151 |
+
else:
|
152 |
+
sil_tags.append((pos, pos))
|
153 |
+
clip_start = pos
|
154 |
+
elif i - silence_start <= self.max_sil_kept * 2:
|
155 |
+
pos = rms_list[
|
156 |
+
i - self.max_sil_kept : silence_start + self.max_sil_kept + 1
|
157 |
+
].argmin()
|
158 |
+
pos += i - self.max_sil_kept
|
159 |
+
pos_l = (
|
160 |
+
rms_list[
|
161 |
+
silence_start : silence_start + self.max_sil_kept + 1
|
162 |
+
].argmin()
|
163 |
+
+ silence_start
|
164 |
+
)
|
165 |
+
pos_r = (
|
166 |
+
rms_list[i - self.max_sil_kept : i + 1].argmin()
|
167 |
+
+ i
|
168 |
+
- self.max_sil_kept
|
169 |
+
)
|
170 |
+
if silence_start == 0:
|
171 |
+
sil_tags.append((0, pos_r))
|
172 |
+
clip_start = pos_r
|
173 |
+
else:
|
174 |
+
sil_tags.append((min(pos_l, pos), max(pos_r, pos)))
|
175 |
+
clip_start = max(pos_r, pos)
|
176 |
+
else:
|
177 |
+
pos_l = (
|
178 |
+
rms_list[
|
179 |
+
silence_start : silence_start + self.max_sil_kept + 1
|
180 |
+
].argmin()
|
181 |
+
+ silence_start
|
182 |
+
)
|
183 |
+
pos_r = (
|
184 |
+
rms_list[i - self.max_sil_kept : i + 1].argmin()
|
185 |
+
+ i
|
186 |
+
- self.max_sil_kept
|
187 |
+
)
|
188 |
+
if silence_start == 0:
|
189 |
+
sil_tags.append((0, pos_r))
|
190 |
+
else:
|
191 |
+
sil_tags.append((pos_l, pos_r))
|
192 |
+
clip_start = pos_r
|
193 |
+
silence_start = None
|
194 |
+
# Deal with trailing silence.
|
195 |
+
total_frames = rms_list.shape[0]
|
196 |
+
if (
|
197 |
+
silence_start is not None
|
198 |
+
and total_frames - silence_start >= self.min_interval
|
199 |
+
):
|
200 |
+
silence_end = min(total_frames, silence_start + self.max_sil_kept)
|
201 |
+
pos = rms_list[silence_start : silence_end + 1].argmin() + silence_start
|
202 |
+
sil_tags.append((pos, total_frames + 1))
|
203 |
+
# Apply and return slices.
|
204 |
+
####音频+起始时间+终止时间
|
205 |
+
if len(sil_tags) == 0:
|
206 |
+
return [[waveform,0,int(total_frames*self.hop_size)]]
|
207 |
+
else:
|
208 |
+
chunks = []
|
209 |
+
if sil_tags[0][0] > 0:
|
210 |
+
chunks.append([self._apply_slice(waveform, 0, sil_tags[0][0]),0,int(sil_tags[0][0]*self.hop_size)])
|
211 |
+
for i in range(len(sil_tags) - 1):
|
212 |
+
chunks.append(
|
213 |
+
[self._apply_slice(waveform, sil_tags[i][1], sil_tags[i + 1][0]),int(sil_tags[i][1]*self.hop_size),int(sil_tags[i + 1][0]*self.hop_size)]
|
214 |
+
)
|
215 |
+
if sil_tags[-1][1] < total_frames:
|
216 |
+
chunks.append(
|
217 |
+
[self._apply_slice(waveform, sil_tags[-1][1], total_frames),int(sil_tags[-1][1]*self.hop_size),int(total_frames*self.hop_size)]
|
218 |
+
)
|
219 |
+
return chunks
|
220 |
+
|
221 |
+
#terminal
|
222 |
+
def terminate_process_tree(pid, including_parent=True):
|
223 |
+
try:
|
224 |
+
parent = psutil.Process(pid)
|
225 |
+
except psutil.NoSuchProcess:
|
226 |
+
# Process already terminated
|
227 |
+
return
|
228 |
+
|
229 |
+
children = parent.children(recursive=True)
|
230 |
+
for child in children:
|
231 |
+
try:
|
232 |
+
os.kill(child.pid, signal.SIGTERM) # or signal.SIGKILL
|
233 |
+
except OSError:
|
234 |
+
pass
|
235 |
+
if including_parent:
|
236 |
+
try:
|
237 |
+
os.kill(parent.pid, signal.SIGTERM) # or signal.SIGKILL
|
238 |
+
except OSError:
|
239 |
+
pass
|
240 |
+
|
241 |
+
def terminate_process(pid):
|
242 |
+
if system == "Windows":
|
243 |
+
cmd = f"taskkill /t /f /pid {pid}"
|
244 |
+
os.system(cmd)
|
245 |
+
else:
|
246 |
+
terminate_process_tree(pid)
|
247 |
+
|
248 |
+
def start_training(dataset_name="",
|
249 |
+
exp_name="F5TTS_Base",
|
250 |
+
learning_rate=1e-4,
|
251 |
+
batch_size_per_gpu=400,
|
252 |
+
batch_size_type="frame",
|
253 |
+
max_samples=64,
|
254 |
+
grad_accumulation_steps=1,
|
255 |
+
max_grad_norm=1.0,
|
256 |
+
epochs=11,
|
257 |
+
num_warmup_updates=200,
|
258 |
+
save_per_updates=400,
|
259 |
+
last_per_steps=800,
|
260 |
+
finetune=True,
|
261 |
+
):
|
262 |
+
|
263 |
+
|
264 |
+
global training_process
|
265 |
+
|
266 |
+
path_project = os.path.join(path_data, dataset_name + "_pinyin")
|
267 |
+
|
268 |
+
if os.path.isdir(path_project)==False:
|
269 |
+
yield f"There is not project with name {dataset_name}",gr.update(interactive=True),gr.update(interactive=False)
|
270 |
+
return
|
271 |
+
|
272 |
+
file_raw = os.path.join(path_project,"raw.arrow")
|
273 |
+
if os.path.isfile(file_raw)==False:
|
274 |
+
yield f"There is no file {file_raw}",gr.update(interactive=True),gr.update(interactive=False)
|
275 |
+
return
|
276 |
+
|
277 |
+
# Check if a training process is already running
|
278 |
+
if training_process is not None:
|
279 |
+
return "Train run already!",gr.update(interactive=False),gr.update(interactive=True)
|
280 |
+
|
281 |
+
yield "start train",gr.update(interactive=False),gr.update(interactive=False)
|
282 |
+
|
283 |
+
# Command to run the training script with the specified arguments
|
284 |
+
cmd = f"accelerate launch finetune-cli.py --exp_name {exp_name} " \
|
285 |
+
f"--learning_rate {learning_rate} " \
|
286 |
+
f"--batch_size_per_gpu {batch_size_per_gpu} " \
|
287 |
+
f"--batch_size_type {batch_size_type} " \
|
288 |
+
f"--max_samples {max_samples} " \
|
289 |
+
f"--grad_accumulation_steps {grad_accumulation_steps} " \
|
290 |
+
f"--max_grad_norm {max_grad_norm} " \
|
291 |
+
f"--epochs {epochs} " \
|
292 |
+
f"--num_warmup_updates {num_warmup_updates} " \
|
293 |
+
f"--save_per_updates {save_per_updates} " \
|
294 |
+
f"--last_per_steps {last_per_steps} " \
|
295 |
+
f"--dataset_name {dataset_name}"
|
296 |
+
if finetune:cmd += f" --finetune {finetune}"
|
297 |
+
|
298 |
+
print(cmd)
|
299 |
+
|
300 |
+
try:
|
301 |
+
# Start the training process
|
302 |
+
training_process = subprocess.Popen(cmd, shell=True)
|
303 |
+
|
304 |
+
time.sleep(5)
|
305 |
+
yield "check terminal for wandb",gr.update(interactive=False),gr.update(interactive=True)
|
306 |
+
|
307 |
+
# Wait for the training process to finish
|
308 |
+
training_process.wait()
|
309 |
+
time.sleep(1)
|
310 |
+
|
311 |
+
if training_process is None:
|
312 |
+
text_info = 'train stop'
|
313 |
+
else:
|
314 |
+
text_info = "train complete !"
|
315 |
+
|
316 |
+
except Exception as e: # Catch all exceptions
|
317 |
+
# Ensure that we reset the training process variable in case of an error
|
318 |
+
text_info=f"An error occurred: {str(e)}"
|
319 |
+
|
320 |
+
training_process=None
|
321 |
+
|
322 |
+
yield text_info,gr.update(interactive=True),gr.update(interactive=False)
|
323 |
+
|
324 |
+
def stop_training():
|
325 |
+
global training_process
|
326 |
+
if training_process is None:return f"Train not run !",gr.update(interactive=True),gr.update(interactive=False)
|
327 |
+
terminate_process_tree(training_process.pid)
|
328 |
+
training_process = None
|
329 |
+
return 'train stop',gr.update(interactive=True),gr.update(interactive=False)
|
330 |
+
|
331 |
+
def create_data_project(name):
|
332 |
+
name+="_pinyin"
|
333 |
+
os.makedirs(os.path.join(path_data,name),exist_ok=True)
|
334 |
+
os.makedirs(os.path.join(path_data,name,"dataset"),exist_ok=True)
|
335 |
+
|
336 |
+
def transcribe(file_audio,language="english"):
|
337 |
+
global pipe
|
338 |
+
|
339 |
+
if pipe is None:
|
340 |
+
pipe = pipeline("automatic-speech-recognition",model="openai/whisper-large-v3-turbo", torch_dtype=torch.float16,device=device)
|
341 |
+
|
342 |
+
text_transcribe = pipe(
|
343 |
+
file_audio,
|
344 |
+
chunk_length_s=30,
|
345 |
+
batch_size=128,
|
346 |
+
generate_kwargs={"task": "transcribe","language": language},
|
347 |
+
return_timestamps=False,
|
348 |
+
)["text"].strip()
|
349 |
+
return text_transcribe
|
350 |
+
|
351 |
+
def transcribe_all(name_project,audio_files,language,user=False,progress=gr.Progress()):
|
352 |
+
name_project+="_pinyin"
|
353 |
+
path_project= os.path.join(path_data,name_project)
|
354 |
+
path_dataset = os.path.join(path_project,"dataset")
|
355 |
+
path_project_wavs = os.path.join(path_project,"wavs")
|
356 |
+
file_metadata = os.path.join(path_project,"metadata.csv")
|
357 |
+
|
358 |
+
if audio_files is None:return "You need to load an audio file."
|
359 |
+
|
360 |
+
if os.path.isdir(path_project_wavs):
|
361 |
+
shutil.rmtree(path_project_wavs)
|
362 |
+
|
363 |
+
if os.path.isfile(file_metadata):
|
364 |
+
os.remove(file_metadata)
|
365 |
+
|
366 |
+
os.makedirs(path_project_wavs,exist_ok=True)
|
367 |
+
|
368 |
+
if user:
|
369 |
+
file_audios = [file for format in ('*.wav', '*.ogg', '*.opus', '*.mp3', '*.flac') for file in glob(os.path.join(path_dataset, format))]
|
370 |
+
if file_audios==[]:return "No audio file was found in the dataset."
|
371 |
+
else:
|
372 |
+
file_audios = audio_files
|
373 |
+
|
374 |
+
|
375 |
+
alpha = 0.5
|
376 |
+
_max = 1.0
|
377 |
+
slicer = Slicer(24000)
|
378 |
+
|
379 |
+
num = 0
|
380 |
+
error_num = 0
|
381 |
+
data=""
|
382 |
+
for file_audio in progress.tqdm(file_audios, desc="transcribe files",total=len((file_audios))):
|
383 |
+
|
384 |
+
audio, _ = librosa.load(file_audio, sr=24000, mono=True)
|
385 |
+
|
386 |
+
list_slicer=slicer.slice(audio)
|
387 |
+
for chunk, start, end in progress.tqdm(list_slicer,total=len(list_slicer), desc="slicer files"):
|
388 |
+
|
389 |
+
name_segment = os.path.join(f"segment_{num}")
|
390 |
+
file_segment = os.path.join(path_project_wavs, f"{name_segment}.wav")
|
391 |
+
|
392 |
+
tmp_max = np.abs(chunk).max()
|
393 |
+
if(tmp_max>1):chunk/=tmp_max
|
394 |
+
chunk = (chunk / tmp_max * (_max * alpha)) + (1 - alpha) * chunk
|
395 |
+
wavfile.write(file_segment,24000, (chunk * 32767).astype(np.int16))
|
396 |
+
|
397 |
+
try:
|
398 |
+
text=transcribe(file_segment,language)
|
399 |
+
text = text.lower().strip().replace('"',"")
|
400 |
+
|
401 |
+
data+= f"{name_segment}|{text}\n"
|
402 |
+
|
403 |
+
num+=1
|
404 |
+
except:
|
405 |
+
error_num +=1
|
406 |
+
|
407 |
+
with open(file_metadata,"w",encoding="utf-8") as f:
|
408 |
+
f.write(data)
|
409 |
+
|
410 |
+
if error_num!=[]:
|
411 |
+
error_text=f"\nerror files : {error_num}"
|
412 |
+
else:
|
413 |
+
error_text=""
|
414 |
+
|
415 |
+
return f"transcribe complete samples : {num}\npath : {path_project_wavs}{error_text}"
|
416 |
+
|
417 |
+
def format_seconds_to_hms(seconds):
|
418 |
+
hours = int(seconds / 3600)
|
419 |
+
minutes = int((seconds % 3600) / 60)
|
420 |
+
seconds = seconds % 60
|
421 |
+
return "{:02d}:{:02d}:{:02d}".format(hours, minutes, int(seconds))
|
422 |
+
|
423 |
+
def create_metadata(name_project,progress=gr.Progress()):
|
424 |
+
name_project+="_pinyin"
|
425 |
+
path_project= os.path.join(path_data,name_project)
|
426 |
+
path_project_wavs = os.path.join(path_project,"wavs")
|
427 |
+
file_metadata = os.path.join(path_project,"metadata.csv")
|
428 |
+
file_raw = os.path.join(path_project,"raw.arrow")
|
429 |
+
file_duration = os.path.join(path_project,"duration.json")
|
430 |
+
file_vocab = os.path.join(path_project,"vocab.txt")
|
431 |
+
|
432 |
+
if os.path.isfile(file_metadata)==False: return "The file was not found in " + file_metadata
|
433 |
+
|
434 |
+
with open(file_metadata,"r",encoding="utf-8") as f:
|
435 |
+
data=f.read()
|
436 |
+
|
437 |
+
audio_path_list=[]
|
438 |
+
text_list=[]
|
439 |
+
duration_list=[]
|
440 |
+
|
441 |
+
count=data.split("\n")
|
442 |
+
lenght=0
|
443 |
+
result=[]
|
444 |
+
error_files=[]
|
445 |
+
for line in progress.tqdm(data.split("\n"),total=count):
|
446 |
+
sp_line=line.split("|")
|
447 |
+
if len(sp_line)!=2:continue
|
448 |
+
name_audio,text = sp_line[:2]
|
449 |
+
|
450 |
+
file_audio = os.path.join(path_project_wavs, name_audio + ".wav")
|
451 |
+
|
452 |
+
if os.path.isfile(file_audio)==False:
|
453 |
+
error_files.append(file_audio)
|
454 |
+
continue
|
455 |
+
|
456 |
+
duraction = get_audio_duration(file_audio)
|
457 |
+
if duraction<2 and duraction>15:continue
|
458 |
+
if len(text)<4:continue
|
459 |
+
|
460 |
+
text = clear_text(text)
|
461 |
+
text = convert_char_to_pinyin([text], polyphone = True)[0]
|
462 |
+
|
463 |
+
audio_path_list.append(file_audio)
|
464 |
+
duration_list.append(duraction)
|
465 |
+
text_list.append(text)
|
466 |
+
|
467 |
+
result.append({"audio_path": file_audio, "text": text, "duration": duraction})
|
468 |
+
|
469 |
+
lenght+=duraction
|
470 |
+
|
471 |
+
if duration_list==[]:
|
472 |
+
error_files_text="\n".join(error_files)
|
473 |
+
return f"Error: No audio files found in the specified path : \n{error_files_text}"
|
474 |
+
|
475 |
+
min_second = round(min(duration_list),2)
|
476 |
+
max_second = round(max(duration_list),2)
|
477 |
+
|
478 |
+
with ArrowWriter(path=file_raw, writer_batch_size=1) as writer:
|
479 |
+
for line in progress.tqdm(result,total=len(result), desc=f"prepare data"):
|
480 |
+
writer.write(line)
|
481 |
+
|
482 |
+
with open(file_duration, 'w', encoding='utf-8') as f:
|
483 |
+
json.dump({"duration": duration_list}, f, ensure_ascii=False)
|
484 |
+
|
485 |
+
file_vocab_finetune = "data/Emilia_ZH_EN_pinyin/vocab.txt"
|
486 |
+
if os.path.isfile(file_vocab_finetune==False):return "Error: Vocabulary file 'Emilia_ZH_EN_pinyin' not found!"
|
487 |
+
shutil.copy2(file_vocab_finetune, file_vocab)
|
488 |
+
|
489 |
+
if error_files!=[]:
|
490 |
+
error_text="error files\n" + "\n".join(error_files)
|
491 |
+
else:
|
492 |
+
error_text=""
|
493 |
+
|
494 |
+
return f"prepare complete \nsamples : {len(text_list)}\ntime data : {format_seconds_to_hms(lenght)}\nmin sec : {min_second}\nmax sec : {max_second}\nfile_arrow : {file_raw}\n{error_text}"
|
495 |
+
|
496 |
+
def check_user(value):
|
497 |
+
return gr.update(visible=not value),gr.update(visible=value)
|
498 |
+
|
499 |
+
def calculate_train(name_project,batch_size_type,max_samples,learning_rate,num_warmup_updates,save_per_updates,last_per_steps,finetune):
|
500 |
+
name_project+="_pinyin"
|
501 |
+
path_project= os.path.join(path_data,name_project)
|
502 |
+
file_duraction = os.path.join(path_project,"duration.json")
|
503 |
+
|
504 |
+
with open(file_duraction, 'r') as file:
|
505 |
+
data = json.load(file)
|
506 |
+
|
507 |
+
duration_list = data['duration']
|
508 |
+
|
509 |
+
samples = len(duration_list)
|
510 |
+
|
511 |
+
if torch.cuda.is_available():
|
512 |
+
gpu_properties = torch.cuda.get_device_properties(0)
|
513 |
+
total_memory = gpu_properties.total_memory / (1024 ** 3)
|
514 |
+
elif torch.backends.mps.is_available():
|
515 |
+
total_memory = psutil.virtual_memory().available / (1024 ** 3)
|
516 |
+
|
517 |
+
if batch_size_type=="frame":
|
518 |
+
batch = int(total_memory * 0.5)
|
519 |
+
batch = (lambda num: num + 1 if num % 2 != 0 else num)(batch)
|
520 |
+
batch_size_per_gpu = int(38400 / batch )
|
521 |
+
else:
|
522 |
+
batch_size_per_gpu = int(total_memory / 8)
|
523 |
+
batch_size_per_gpu = (lambda num: num + 1 if num % 2 != 0 else num)(batch_size_per_gpu)
|
524 |
+
batch = batch_size_per_gpu
|
525 |
+
|
526 |
+
if batch_size_per_gpu<=0:batch_size_per_gpu=1
|
527 |
+
|
528 |
+
if samples<64:
|
529 |
+
max_samples = int(samples * 0.25)
|
530 |
+
else:
|
531 |
+
max_samples = 64
|
532 |
+
|
533 |
+
num_warmup_updates = int(samples * 0.10)
|
534 |
+
save_per_updates = int(samples * 0.25)
|
535 |
+
last_per_steps =int(save_per_updates * 5)
|
536 |
+
|
537 |
+
max_samples = (lambda num: num + 1 if num % 2 != 0 else num)(max_samples)
|
538 |
+
num_warmup_updates = (lambda num: num + 1 if num % 2 != 0 else num)(num_warmup_updates)
|
539 |
+
save_per_updates = (lambda num: num + 1 if num % 2 != 0 else num)(save_per_updates)
|
540 |
+
last_per_steps = (lambda num: num + 1 if num % 2 != 0 else num)(last_per_steps)
|
541 |
+
|
542 |
+
if finetune:learning_rate=1e-4
|
543 |
+
else:learning_rate=7.5e-5
|
544 |
+
|
545 |
+
return batch_size_per_gpu,max_samples,num_warmup_updates,save_per_updates,last_per_steps,samples,learning_rate
|
546 |
+
|
547 |
+
def extract_and_save_ema_model(checkpoint_path: str, new_checkpoint_path: str) -> None:
|
548 |
+
try:
|
549 |
+
checkpoint = torch.load(checkpoint_path)
|
550 |
+
print("Original Checkpoint Keys:", checkpoint.keys())
|
551 |
+
|
552 |
+
ema_model_state_dict = checkpoint.get('ema_model_state_dict', None)
|
553 |
+
|
554 |
+
if ema_model_state_dict is not None:
|
555 |
+
new_checkpoint = {'ema_model_state_dict': ema_model_state_dict}
|
556 |
+
torch.save(new_checkpoint, new_checkpoint_path)
|
557 |
+
return f"New checkpoint saved at: {new_checkpoint_path}"
|
558 |
+
else:
|
559 |
+
return "No 'ema_model_state_dict' found in the checkpoint."
|
560 |
+
|
561 |
+
except Exception as e:
|
562 |
+
return f"An error occurred: {e}"
|
563 |
+
|
564 |
+
def vocab_check(project_name):
|
565 |
+
name_project = project_name + "_pinyin"
|
566 |
+
path_project = os.path.join(path_data, name_project)
|
567 |
+
|
568 |
+
file_metadata = os.path.join(path_project, "metadata.csv")
|
569 |
+
|
570 |
+
file_vocab="data/Emilia_ZH_EN_pinyin/vocab.txt"
|
571 |
+
if os.path.isfile(file_vocab)==False:
|
572 |
+
return f"the file {file_vocab} not found !"
|
573 |
+
|
574 |
+
with open(file_vocab,"r",encoding="utf-8") as f:
|
575 |
+
data=f.read()
|
576 |
+
|
577 |
+
vocab = data.split("\n")
|
578 |
+
|
579 |
+
if os.path.isfile(file_metadata)==False:
|
580 |
+
return f"the file {file_metadata} not found !"
|
581 |
+
|
582 |
+
with open(file_metadata,"r",encoding="utf-8") as f:
|
583 |
+
data=f.read()
|
584 |
+
|
585 |
+
miss_symbols=[]
|
586 |
+
miss_symbols_keep={}
|
587 |
+
for item in data.split("\n"):
|
588 |
+
sp=item.split("|")
|
589 |
+
if len(sp)!=2:continue
|
590 |
+
text=sp[1].lower().strip()
|
591 |
+
|
592 |
+
for t in text:
|
593 |
+
if (t in vocab)==False and (t in miss_symbols_keep)==False:
|
594 |
+
miss_symbols.append(t)
|
595 |
+
miss_symbols_keep[t]=t
|
596 |
+
|
597 |
+
|
598 |
+
if miss_symbols==[]:info ="You can train using your language !"
|
599 |
+
else:info = f"The following symbols are missing in your language : {len(miss_symbols)}\n\n" + "\n".join(miss_symbols)
|
600 |
+
|
601 |
+
return info
|
602 |
+
|
603 |
+
|
604 |
+
|
605 |
+
with gr.Blocks() as app:
|
606 |
+
|
607 |
+
with gr.Row():
|
608 |
+
project_name=gr.Textbox(label="project name",value="my_speak")
|
609 |
+
bt_create=gr.Button("create new project")
|
610 |
+
|
611 |
+
bt_create.click(fn=create_data_project,inputs=[project_name])
|
612 |
+
|
613 |
+
with gr.Tabs():
|
614 |
+
|
615 |
+
|
616 |
+
with gr.TabItem("transcribe Data"):
|
617 |
+
|
618 |
+
|
619 |
+
ch_manual = gr.Checkbox(label="user",value=False)
|
620 |
+
|
621 |
+
mark_info_transcribe=gr.Markdown(
|
622 |
+
"""```plaintext
|
623 |
+
Place your 'wavs' folder and 'metadata.csv' file in the {your_project_name}' directory.
|
624 |
+
|
625 |
+
my_speak/
|
626 |
+
│
|
627 |
+
└── dataset/
|
628 |
+
├── audio1.wav
|
629 |
+
└── audio2.wav
|
630 |
+
...
|
631 |
+
```""",visible=False)
|
632 |
+
|
633 |
+
audio_speaker = gr.File(label="voice",type="filepath",file_count="multiple")
|
634 |
+
txt_lang = gr.Text(label="Language",value="english")
|
635 |
+
bt_transcribe=bt_create=gr.Button("transcribe")
|
636 |
+
txt_info_transcribe=gr.Text(label="info",value="")
|
637 |
+
bt_transcribe.click(fn=transcribe_all,inputs=[project_name,audio_speaker,txt_lang,ch_manual],outputs=[txt_info_transcribe])
|
638 |
+
ch_manual.change(fn=check_user,inputs=[ch_manual],outputs=[audio_speaker,mark_info_transcribe])
|
639 |
+
|
640 |
+
with gr.TabItem("prepare Data"):
|
641 |
+
gr.Markdown(
|
642 |
+
"""```plaintext
|
643 |
+
place all your wavs folder and your metadata.csv file in {your name project}
|
644 |
+
my_speak/
|
645 |
+
│
|
646 |
+
├── wavs/
|
647 |
+
│ ├── audio1.wav
|
648 |
+
│ └── audio2.wav
|
649 |
+
| ...
|
650 |
+
│
|
651 |
+
└── metadata.csv
|
652 |
+
|
653 |
+
file format metadata.csv
|
654 |
+
|
655 |
+
audio1|text1
|
656 |
+
audio2|text1
|
657 |
+
...
|
658 |
+
|
659 |
+
```""")
|
660 |
+
|
661 |
+
bt_prepare=bt_create=gr.Button("prepare")
|
662 |
+
txt_info_prepare=gr.Text(label="info",value="")
|
663 |
+
bt_prepare.click(fn=create_metadata,inputs=[project_name],outputs=[txt_info_prepare])
|
664 |
+
|
665 |
+
with gr.TabItem("train Data"):
|
666 |
+
|
667 |
+
with gr.Row():
|
668 |
+
bt_calculate=bt_create=gr.Button("Auto Settings")
|
669 |
+
ch_finetune=bt_create=gr.Checkbox(label="finetune",value=True)
|
670 |
+
lb_samples = gr.Label(label="samples")
|
671 |
+
batch_size_type = gr.Radio(label="Batch Size Type", choices=["frame", "sample"], value="frame")
|
672 |
+
|
673 |
+
with gr.Row():
|
674 |
+
exp_name = gr.Radio(label="Model", choices=["F5TTS_Base", "E2TTS_Base"], value="F5TTS_Base")
|
675 |
+
learning_rate = gr.Number(label="Learning Rate", value=1e-4, step=1e-4)
|
676 |
+
|
677 |
+
with gr.Row():
|
678 |
+
batch_size_per_gpu = gr.Number(label="Batch Size per GPU", value=1000)
|
679 |
+
max_samples = gr.Number(label="Max Samples", value=16)
|
680 |
+
|
681 |
+
with gr.Row():
|
682 |
+
grad_accumulation_steps = gr.Number(label="Gradient Accumulation Steps", value=1)
|
683 |
+
max_grad_norm = gr.Number(label="Max Gradient Norm", value=1.0)
|
684 |
+
|
685 |
+
with gr.Row():
|
686 |
+
epochs = gr.Number(label="Epochs", value=10)
|
687 |
+
num_warmup_updates = gr.Number(label="Warmup Updates", value=5)
|
688 |
+
|
689 |
+
with gr.Row():
|
690 |
+
save_per_updates = gr.Number(label="Save per Updates", value=10)
|
691 |
+
last_per_steps = gr.Number(label="Last per Steps", value=50)
|
692 |
+
|
693 |
+
with gr.Row():
|
694 |
+
start_button = gr.Button("Start Training")
|
695 |
+
stop_button = gr.Button("Stop Training",interactive=False)
|
696 |
+
|
697 |
+
txt_info_train=gr.Text(label="info",value="")
|
698 |
+
start_button.click(fn=start_training,inputs=[project_name,exp_name,learning_rate,batch_size_per_gpu,batch_size_type,max_samples,grad_accumulation_steps,max_grad_norm,epochs,num_warmup_updates,save_per_updates,last_per_steps,ch_finetune],outputs=[txt_info_train,start_button,stop_button])
|
699 |
+
stop_button.click(fn=stop_training,outputs=[txt_info_train,start_button,stop_button])
|
700 |
+
bt_calculate.click(fn=calculate_train,inputs=[project_name,batch_size_type,max_samples,learning_rate,num_warmup_updates,save_per_updates,last_per_steps,ch_finetune],outputs=[batch_size_per_gpu,max_samples,num_warmup_updates,save_per_updates,last_per_steps,lb_samples,learning_rate])
|
701 |
+
|
702 |
+
with gr.TabItem("reduse checkpoint"):
|
703 |
+
txt_path_checkpoint = gr.Text(label="path checkpoint :")
|
704 |
+
txt_path_checkpoint_small = gr.Text(label="path output :")
|
705 |
+
txt_info_reduse = gr.Text(label="info",value="")
|
706 |
+
reduse_button = gr.Button("reduse")
|
707 |
+
reduse_button.click(fn=extract_and_save_ema_model,inputs=[txt_path_checkpoint,txt_path_checkpoint_small],outputs=[txt_info_reduse])
|
708 |
+
|
709 |
+
with gr.TabItem("vocab check experiment"):
|
710 |
+
check_button = gr.Button("check vocab")
|
711 |
+
txt_info_check=gr.Text(label="info",value="")
|
712 |
+
check_button.click(fn=vocab_check,inputs=[project_name],outputs=[txt_info_check])
|
713 |
+
|
714 |
+
|
715 |
+
@click.command()
|
716 |
+
@click.option("--port", "-p", default=None, type=int, help="Port to run the app on")
|
717 |
+
@click.option("--host", "-H", default=None, help="Host to run the app on")
|
718 |
+
@click.option(
|
719 |
+
"--share",
|
720 |
+
"-s",
|
721 |
+
default=False,
|
722 |
+
is_flag=True,
|
723 |
+
help="Share the app via Gradio share link",
|
724 |
+
)
|
725 |
+
@click.option("--api", "-a", default=True, is_flag=True, help="Allow API access")
|
726 |
+
def main(port, host, share, api):
|
727 |
+
global app
|
728 |
+
print(f"Starting app...")
|
729 |
+
app.queue(api_open=api).launch(
|
730 |
+
server_name=host, server_port=port, share=share, show_api=api
|
731 |
+
)
|
732 |
+
|
733 |
+
if __name__ == "__main__":
|
734 |
+
main()
|
gradio_app.py
ADDED
@@ -0,0 +1,824 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import re
|
3 |
+
import torch
|
4 |
+
import torchaudio
|
5 |
+
import gradio as gr
|
6 |
+
import numpy as np
|
7 |
+
import tempfile
|
8 |
+
from einops import rearrange
|
9 |
+
from vocos import Vocos
|
10 |
+
from pydub import AudioSegment, silence
|
11 |
+
from model import CFM, UNetT, DiT, MMDiT
|
12 |
+
from cached_path import cached_path
|
13 |
+
from model.utils import (
|
14 |
+
load_checkpoint,
|
15 |
+
get_tokenizer,
|
16 |
+
convert_char_to_pinyin,
|
17 |
+
save_spectrogram,
|
18 |
+
)
|
19 |
+
from transformers import pipeline
|
20 |
+
import librosa
|
21 |
+
import click
|
22 |
+
import soundfile as sf
|
23 |
+
|
24 |
+
try:
|
25 |
+
import spaces
|
26 |
+
USING_SPACES = True
|
27 |
+
except ImportError:
|
28 |
+
USING_SPACES = False
|
29 |
+
|
30 |
+
def gpu_decorator(func):
|
31 |
+
if USING_SPACES:
|
32 |
+
return spaces.GPU(func)
|
33 |
+
else:
|
34 |
+
return func
|
35 |
+
|
36 |
+
|
37 |
+
|
38 |
+
SPLIT_WORDS = [
|
39 |
+
"but", "however", "nevertheless", "yet", "still",
|
40 |
+
"therefore", "thus", "hence", "consequently",
|
41 |
+
"moreover", "furthermore", "additionally",
|
42 |
+
"meanwhile", "alternatively", "otherwise",
|
43 |
+
"namely", "specifically", "for example", "such as",
|
44 |
+
"in fact", "indeed", "notably",
|
45 |
+
"in contrast", "on the other hand", "conversely",
|
46 |
+
"in conclusion", "to summarize", "finally"
|
47 |
+
]
|
48 |
+
|
49 |
+
device = (
|
50 |
+
"cuda"
|
51 |
+
if torch.cuda.is_available()
|
52 |
+
else "mps" if torch.backends.mps.is_available() else "cpu"
|
53 |
+
)
|
54 |
+
|
55 |
+
print(f"Using {device} device")
|
56 |
+
|
57 |
+
pipe = pipeline(
|
58 |
+
"automatic-speech-recognition",
|
59 |
+
model="openai/whisper-large-v3-turbo",
|
60 |
+
torch_dtype=torch.float16,
|
61 |
+
device=device,
|
62 |
+
)
|
63 |
+
vocos = Vocos.from_pretrained("charactr/vocos-mel-24khz")
|
64 |
+
|
65 |
+
# --------------------- Settings -------------------- #
|
66 |
+
|
67 |
+
target_sample_rate = 24000
|
68 |
+
n_mel_channels = 100
|
69 |
+
hop_length = 256
|
70 |
+
target_rms = 0.1
|
71 |
+
nfe_step = 32 # 16, 32
|
72 |
+
cfg_strength = 2.0
|
73 |
+
ode_method = "euler"
|
74 |
+
sway_sampling_coef = -1.0
|
75 |
+
speed = 1.0
|
76 |
+
# fix_duration = 27 # None or float (duration in seconds)
|
77 |
+
fix_duration = None
|
78 |
+
|
79 |
+
|
80 |
+
def load_model(repo_name, exp_name, model_cls, model_cfg, ckpt_step):
|
81 |
+
ckpt_path = str(cached_path(f"hf://SWivid/{repo_name}/{exp_name}/model_{ckpt_step}.safetensors"))
|
82 |
+
# ckpt_path = f"ckpts/{exp_name}/model_{ckpt_step}.pt" # .pt | .safetensors
|
83 |
+
vocab_char_map, vocab_size = get_tokenizer("Emilia_ZH_EN", "pinyin")
|
84 |
+
model = CFM(
|
85 |
+
transformer=model_cls(
|
86 |
+
**model_cfg, text_num_embeds=vocab_size, mel_dim=n_mel_channels
|
87 |
+
),
|
88 |
+
mel_spec_kwargs=dict(
|
89 |
+
target_sample_rate=target_sample_rate,
|
90 |
+
n_mel_channels=n_mel_channels,
|
91 |
+
hop_length=hop_length,
|
92 |
+
),
|
93 |
+
odeint_kwargs=dict(
|
94 |
+
method=ode_method,
|
95 |
+
),
|
96 |
+
vocab_char_map=vocab_char_map,
|
97 |
+
).to(device)
|
98 |
+
|
99 |
+
model = load_checkpoint(model, ckpt_path, device, use_ema = True)
|
100 |
+
|
101 |
+
return model
|
102 |
+
|
103 |
+
|
104 |
+
# load models
|
105 |
+
F5TTS_model_cfg = dict(
|
106 |
+
dim=1024, depth=22, heads=16, ff_mult=2, text_dim=512, conv_layers=4
|
107 |
+
)
|
108 |
+
E2TTS_model_cfg = dict(dim=1024, depth=24, heads=16, ff_mult=4)
|
109 |
+
|
110 |
+
F5TTS_ema_model = load_model(
|
111 |
+
"F5-TTS", "F5TTS_Base", DiT, F5TTS_model_cfg, 1200000
|
112 |
+
)
|
113 |
+
E2TTS_ema_model = load_model(
|
114 |
+
"E2-TTS", "E2TTS_Base", UNetT, E2TTS_model_cfg, 1200000
|
115 |
+
)
|
116 |
+
|
117 |
+
def split_text_into_batches(text, max_chars=200, split_words=SPLIT_WORDS):
|
118 |
+
if len(text.encode('utf-8')) <= max_chars:
|
119 |
+
return [text]
|
120 |
+
if text[-1] not in ['。', '.', '!', '!', '?', '?']:
|
121 |
+
text += '.'
|
122 |
+
|
123 |
+
sentences = re.split('([。.!?!?])', text)
|
124 |
+
sentences = [''.join(i) for i in zip(sentences[0::2], sentences[1::2])]
|
125 |
+
|
126 |
+
batches = []
|
127 |
+
current_batch = ""
|
128 |
+
|
129 |
+
def split_by_words(text):
|
130 |
+
words = text.split()
|
131 |
+
current_word_part = ""
|
132 |
+
word_batches = []
|
133 |
+
for word in words:
|
134 |
+
if len(current_word_part.encode('utf-8')) + len(word.encode('utf-8')) + 1 <= max_chars:
|
135 |
+
current_word_part += word + ' '
|
136 |
+
else:
|
137 |
+
if current_word_part:
|
138 |
+
# Try to find a suitable split word
|
139 |
+
for split_word in split_words:
|
140 |
+
split_index = current_word_part.rfind(' ' + split_word + ' ')
|
141 |
+
if split_index != -1:
|
142 |
+
word_batches.append(current_word_part[:split_index].strip())
|
143 |
+
current_word_part = current_word_part[split_index:].strip() + ' '
|
144 |
+
break
|
145 |
+
else:
|
146 |
+
# If no suitable split word found, just append the current part
|
147 |
+
word_batches.append(current_word_part.strip())
|
148 |
+
current_word_part = ""
|
149 |
+
current_word_part += word + ' '
|
150 |
+
if current_word_part:
|
151 |
+
word_batches.append(current_word_part.strip())
|
152 |
+
return word_batches
|
153 |
+
|
154 |
+
for sentence in sentences:
|
155 |
+
if len(current_batch.encode('utf-8')) + len(sentence.encode('utf-8')) <= max_chars:
|
156 |
+
current_batch += sentence
|
157 |
+
else:
|
158 |
+
# If adding this sentence would exceed the limit
|
159 |
+
if current_batch:
|
160 |
+
batches.append(current_batch)
|
161 |
+
current_batch = ""
|
162 |
+
|
163 |
+
# If the sentence itself is longer than max_chars, split it
|
164 |
+
if len(sentence.encode('utf-8')) > max_chars:
|
165 |
+
# First, try to split by colon
|
166 |
+
colon_parts = sentence.split(':')
|
167 |
+
if len(colon_parts) > 1:
|
168 |
+
for part in colon_parts:
|
169 |
+
if len(part.encode('utf-8')) <= max_chars:
|
170 |
+
batches.append(part)
|
171 |
+
else:
|
172 |
+
# If colon part is still too long, split by comma
|
173 |
+
comma_parts = re.split('[,,]', part)
|
174 |
+
if len(comma_parts) > 1:
|
175 |
+
current_comma_part = ""
|
176 |
+
for comma_part in comma_parts:
|
177 |
+
if len(current_comma_part.encode('utf-8')) + len(comma_part.encode('utf-8')) <= max_chars:
|
178 |
+
current_comma_part += comma_part + ','
|
179 |
+
else:
|
180 |
+
if current_comma_part:
|
181 |
+
batches.append(current_comma_part.rstrip(','))
|
182 |
+
current_comma_part = comma_part + ','
|
183 |
+
if current_comma_part:
|
184 |
+
batches.append(current_comma_part.rstrip(','))
|
185 |
+
else:
|
186 |
+
# If no comma, split by words
|
187 |
+
batches.extend(split_by_words(part))
|
188 |
+
else:
|
189 |
+
# If no colon, split by comma
|
190 |
+
comma_parts = re.split('[,,]', sentence)
|
191 |
+
if len(comma_parts) > 1:
|
192 |
+
current_comma_part = ""
|
193 |
+
for comma_part in comma_parts:
|
194 |
+
if len(current_comma_part.encode('utf-8')) + len(comma_part.encode('utf-8')) <= max_chars:
|
195 |
+
current_comma_part += comma_part + ','
|
196 |
+
else:
|
197 |
+
if current_comma_part:
|
198 |
+
batches.append(current_comma_part.rstrip(','))
|
199 |
+
current_comma_part = comma_part + ','
|
200 |
+
if current_comma_part:
|
201 |
+
batches.append(current_comma_part.rstrip(','))
|
202 |
+
else:
|
203 |
+
# If no comma, split by words
|
204 |
+
batches.extend(split_by_words(sentence))
|
205 |
+
else:
|
206 |
+
current_batch = sentence
|
207 |
+
|
208 |
+
if current_batch:
|
209 |
+
batches.append(current_batch)
|
210 |
+
|
211 |
+
return batches
|
212 |
+
|
213 |
+
def infer_batch(ref_audio, ref_text, gen_text_batches, exp_name, remove_silence, progress=gr.Progress()):
|
214 |
+
if exp_name == "F5-TTS":
|
215 |
+
ema_model = F5TTS_ema_model
|
216 |
+
elif exp_name == "E2-TTS":
|
217 |
+
ema_model = E2TTS_ema_model
|
218 |
+
|
219 |
+
audio, sr = ref_audio
|
220 |
+
if audio.shape[0] > 1:
|
221 |
+
audio = torch.mean(audio, dim=0, keepdim=True)
|
222 |
+
|
223 |
+
rms = torch.sqrt(torch.mean(torch.square(audio)))
|
224 |
+
if rms < target_rms:
|
225 |
+
audio = audio * target_rms / rms
|
226 |
+
if sr != target_sample_rate:
|
227 |
+
resampler = torchaudio.transforms.Resample(sr, target_sample_rate)
|
228 |
+
audio = resampler(audio)
|
229 |
+
audio = audio.to(device)
|
230 |
+
|
231 |
+
generated_waves = []
|
232 |
+
spectrograms = []
|
233 |
+
|
234 |
+
for i, gen_text in enumerate(progress.tqdm(gen_text_batches)):
|
235 |
+
# Prepare the text
|
236 |
+
if len(ref_text[-1].encode('utf-8')) == 1:
|
237 |
+
ref_text = ref_text + " "
|
238 |
+
text_list = [ref_text + gen_text]
|
239 |
+
final_text_list = convert_char_to_pinyin(text_list)
|
240 |
+
|
241 |
+
# Calculate duration
|
242 |
+
ref_audio_len = audio.shape[-1] // hop_length
|
243 |
+
zh_pause_punc = r"。,、;:?!"
|
244 |
+
ref_text_len = len(ref_text.encode('utf-8')) + 3 * len(re.findall(zh_pause_punc, ref_text))
|
245 |
+
gen_text_len = len(gen_text.encode('utf-8')) + 3 * len(re.findall(zh_pause_punc, gen_text))
|
246 |
+
duration = ref_audio_len + int(ref_audio_len / ref_text_len * gen_text_len / speed)
|
247 |
+
|
248 |
+
# inference
|
249 |
+
with torch.inference_mode():
|
250 |
+
generated, _ = ema_model.sample(
|
251 |
+
cond=audio,
|
252 |
+
text=final_text_list,
|
253 |
+
duration=duration,
|
254 |
+
steps=nfe_step,
|
255 |
+
cfg_strength=cfg_strength,
|
256 |
+
sway_sampling_coef=sway_sampling_coef,
|
257 |
+
)
|
258 |
+
|
259 |
+
generated = generated[:, ref_audio_len:, :]
|
260 |
+
generated_mel_spec = rearrange(generated, "1 n d -> 1 d n")
|
261 |
+
generated_wave = vocos.decode(generated_mel_spec.cpu())
|
262 |
+
if rms < target_rms:
|
263 |
+
generated_wave = generated_wave * rms / target_rms
|
264 |
+
|
265 |
+
# wav -> numpy
|
266 |
+
generated_wave = generated_wave.squeeze().cpu().numpy()
|
267 |
+
|
268 |
+
generated_waves.append(generated_wave)
|
269 |
+
spectrograms.append(generated_mel_spec[0].cpu().numpy())
|
270 |
+
|
271 |
+
# Combine all generated waves
|
272 |
+
final_wave = np.concatenate(generated_waves)
|
273 |
+
|
274 |
+
# Remove silence
|
275 |
+
if remove_silence:
|
276 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as f:
|
277 |
+
sf.write(f.name, final_wave, target_sample_rate)
|
278 |
+
aseg = AudioSegment.from_file(f.name)
|
279 |
+
non_silent_segs = silence.split_on_silence(aseg, min_silence_len=1000, silence_thresh=-50, keep_silence=500)
|
280 |
+
non_silent_wave = AudioSegment.silent(duration=0)
|
281 |
+
for non_silent_seg in non_silent_segs:
|
282 |
+
non_silent_wave += non_silent_seg
|
283 |
+
aseg = non_silent_wave
|
284 |
+
aseg.export(f.name, format="wav")
|
285 |
+
final_wave, _ = torchaudio.load(f.name)
|
286 |
+
final_wave = final_wave.squeeze().cpu().numpy()
|
287 |
+
|
288 |
+
# Create a combined spectrogram
|
289 |
+
combined_spectrogram = np.concatenate(spectrograms, axis=1)
|
290 |
+
|
291 |
+
with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp_spectrogram:
|
292 |
+
spectrogram_path = tmp_spectrogram.name
|
293 |
+
save_spectrogram(combined_spectrogram, spectrogram_path)
|
294 |
+
|
295 |
+
return (target_sample_rate, final_wave), spectrogram_path
|
296 |
+
|
297 |
+
def infer(ref_audio_orig, ref_text, gen_text, exp_name, remove_silence, custom_split_words=''):
|
298 |
+
if not custom_split_words.strip():
|
299 |
+
custom_words = [word.strip() for word in custom_split_words.split(',')]
|
300 |
+
global SPLIT_WORDS
|
301 |
+
SPLIT_WORDS = custom_words
|
302 |
+
|
303 |
+
print(gen_text)
|
304 |
+
|
305 |
+
gr.Info("Converting audio...")
|
306 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as f:
|
307 |
+
aseg = AudioSegment.from_file(ref_audio_orig)
|
308 |
+
|
309 |
+
non_silent_segs = silence.split_on_silence(aseg, min_silence_len=1000, silence_thresh=-50, keep_silence=500)
|
310 |
+
non_silent_wave = AudioSegment.silent(duration=0)
|
311 |
+
for non_silent_seg in non_silent_segs:
|
312 |
+
non_silent_wave += non_silent_seg
|
313 |
+
aseg = non_silent_wave
|
314 |
+
|
315 |
+
audio_duration = len(aseg)
|
316 |
+
if audio_duration > 15000:
|
317 |
+
gr.Warning("Audio is over 15s, clipping to only first 15s.")
|
318 |
+
aseg = aseg[:15000]
|
319 |
+
aseg.export(f.name, format="wav")
|
320 |
+
ref_audio = f.name
|
321 |
+
|
322 |
+
if not ref_text.strip():
|
323 |
+
gr.Info("No reference text provided, transcribing reference audio...")
|
324 |
+
ref_text = pipe(
|
325 |
+
ref_audio,
|
326 |
+
chunk_length_s=30,
|
327 |
+
batch_size=128,
|
328 |
+
generate_kwargs={"task": "transcribe"},
|
329 |
+
return_timestamps=False,
|
330 |
+
)["text"].strip()
|
331 |
+
gr.Info("Finished transcription")
|
332 |
+
else:
|
333 |
+
gr.Info("Using custom reference text...")
|
334 |
+
|
335 |
+
# Split the input text into batches
|
336 |
+
audio, sr = torchaudio.load(ref_audio)
|
337 |
+
max_chars = int(len(ref_text.encode('utf-8')) / (audio.shape[-1] / sr) * (30 - audio.shape[-1] / sr))
|
338 |
+
gen_text_batches = split_text_into_batches(gen_text, max_chars=max_chars)
|
339 |
+
print('ref_text', ref_text)
|
340 |
+
for i, gen_text in enumerate(gen_text_batches):
|
341 |
+
print(f'gen_text {i}', gen_text)
|
342 |
+
|
343 |
+
gr.Info(f"Generating audio using {exp_name} in {len(gen_text_batches)} batches")
|
344 |
+
return infer_batch((audio, sr), ref_text, gen_text_batches, exp_name, remove_silence)
|
345 |
+
|
346 |
+
def generate_podcast(script, speaker1_name, ref_audio1, ref_text1, speaker2_name, ref_audio2, ref_text2, exp_name, remove_silence):
|
347 |
+
# Split the script into speaker blocks
|
348 |
+
speaker_pattern = re.compile(f"^({re.escape(speaker1_name)}|{re.escape(speaker2_name)}):", re.MULTILINE)
|
349 |
+
speaker_blocks = speaker_pattern.split(script)[1:] # Skip the first empty element
|
350 |
+
|
351 |
+
generated_audio_segments = []
|
352 |
+
|
353 |
+
for i in range(0, len(speaker_blocks), 2):
|
354 |
+
speaker = speaker_blocks[i]
|
355 |
+
text = speaker_blocks[i+1].strip()
|
356 |
+
|
357 |
+
# Determine which speaker is talking
|
358 |
+
if speaker == speaker1_name:
|
359 |
+
ref_audio = ref_audio1
|
360 |
+
ref_text = ref_text1
|
361 |
+
elif speaker == speaker2_name:
|
362 |
+
ref_audio = ref_audio2
|
363 |
+
ref_text = ref_text2
|
364 |
+
else:
|
365 |
+
continue # Skip if the speaker is neither speaker1 nor speaker2
|
366 |
+
|
367 |
+
# Generate audio for this block
|
368 |
+
audio, _ = infer(ref_audio, ref_text, text, exp_name, remove_silence)
|
369 |
+
|
370 |
+
# Convert the generated audio to a numpy array
|
371 |
+
sr, audio_data = audio
|
372 |
+
|
373 |
+
# Save the audio data as a WAV file
|
374 |
+
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as temp_file:
|
375 |
+
sf.write(temp_file.name, audio_data, sr)
|
376 |
+
audio_segment = AudioSegment.from_wav(temp_file.name)
|
377 |
+
|
378 |
+
generated_audio_segments.append(audio_segment)
|
379 |
+
|
380 |
+
# Add a short pause between speakers
|
381 |
+
pause = AudioSegment.silent(duration=500) # 500ms pause
|
382 |
+
generated_audio_segments.append(pause)
|
383 |
+
|
384 |
+
# Concatenate all audio segments
|
385 |
+
final_podcast = sum(generated_audio_segments)
|
386 |
+
|
387 |
+
# Export the final podcast
|
388 |
+
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as temp_file:
|
389 |
+
podcast_path = temp_file.name
|
390 |
+
final_podcast.export(podcast_path, format="wav")
|
391 |
+
|
392 |
+
return podcast_path
|
393 |
+
|
394 |
+
def parse_speechtypes_text(gen_text):
|
395 |
+
# Pattern to find (Emotion)
|
396 |
+
pattern = r'\((.*?)\)'
|
397 |
+
|
398 |
+
# Split the text by the pattern
|
399 |
+
tokens = re.split(pattern, gen_text)
|
400 |
+
|
401 |
+
segments = []
|
402 |
+
|
403 |
+
current_emotion = 'Regular'
|
404 |
+
|
405 |
+
for i in range(len(tokens)):
|
406 |
+
if i % 2 == 0:
|
407 |
+
# This is text
|
408 |
+
text = tokens[i].strip()
|
409 |
+
if text:
|
410 |
+
segments.append({'emotion': current_emotion, 'text': text})
|
411 |
+
else:
|
412 |
+
# This is emotion
|
413 |
+
emotion = tokens[i].strip()
|
414 |
+
current_emotion = emotion
|
415 |
+
|
416 |
+
return segments
|
417 |
+
|
418 |
+
def update_speed(new_speed):
|
419 |
+
global speed
|
420 |
+
speed = new_speed
|
421 |
+
return f"Speed set to: {speed}"
|
422 |
+
|
423 |
+
with gr.Blocks() as app_credits:
|
424 |
+
gr.Markdown("""
|
425 |
+
# Credits
|
426 |
+
|
427 |
+
* [mrfakename](https://github.com/fakerybakery) for the original [online demo](https://huggingface.co/spaces/mrfakename/E2-F5-TTS)
|
428 |
+
* [RootingInLoad](https://github.com/RootingInLoad) for the podcast generation
|
429 |
+
""")
|
430 |
+
with gr.Blocks() as app_tts:
|
431 |
+
gr.Markdown("# Batched TTS")
|
432 |
+
ref_audio_input = gr.Audio(label="Reference Audio", type="filepath")
|
433 |
+
gen_text_input = gr.Textbox(label="Text to Generate", lines=10)
|
434 |
+
model_choice = gr.Radio(
|
435 |
+
choices=["F5-TTS", "E2-TTS"], label="Choose TTS Model", value="F5-TTS"
|
436 |
+
)
|
437 |
+
generate_btn = gr.Button("Synthesize", variant="primary")
|
438 |
+
with gr.Accordion("Advanced Settings", open=False):
|
439 |
+
ref_text_input = gr.Textbox(
|
440 |
+
label="Reference Text",
|
441 |
+
info="Leave blank to automatically transcribe the reference audio. If you enter text it will override automatic transcription.",
|
442 |
+
lines=2,
|
443 |
+
)
|
444 |
+
remove_silence = gr.Checkbox(
|
445 |
+
label="Remove Silences",
|
446 |
+
info="The model tends to produce silences, especially on longer audio. We can manually remove silences if needed. Note that this is an experimental feature and may produce strange results. This will also increase generation time.",
|
447 |
+
value=True,
|
448 |
+
)
|
449 |
+
split_words_input = gr.Textbox(
|
450 |
+
label="Custom Split Words",
|
451 |
+
info="Enter custom words to split on, separated by commas. Leave blank to use default list.",
|
452 |
+
lines=2,
|
453 |
+
)
|
454 |
+
speed_slider = gr.Slider(
|
455 |
+
label="Speed",
|
456 |
+
minimum=0.3,
|
457 |
+
maximum=2.0,
|
458 |
+
value=speed,
|
459 |
+
step=0.1,
|
460 |
+
info="Adjust the speed of the audio.",
|
461 |
+
)
|
462 |
+
speed_slider.change(update_speed, inputs=speed_slider)
|
463 |
+
|
464 |
+
audio_output = gr.Audio(label="Synthesized Audio")
|
465 |
+
spectrogram_output = gr.Image(label="Spectrogram")
|
466 |
+
|
467 |
+
generate_btn.click(
|
468 |
+
infer,
|
469 |
+
inputs=[
|
470 |
+
ref_audio_input,
|
471 |
+
ref_text_input,
|
472 |
+
gen_text_input,
|
473 |
+
model_choice,
|
474 |
+
remove_silence,
|
475 |
+
split_words_input,
|
476 |
+
],
|
477 |
+
outputs=[audio_output, spectrogram_output],
|
478 |
+
)
|
479 |
+
|
480 |
+
with gr.Blocks() as app_podcast:
|
481 |
+
gr.Markdown("# Podcast Generation")
|
482 |
+
speaker1_name = gr.Textbox(label="Speaker 1 Name")
|
483 |
+
ref_audio_input1 = gr.Audio(label="Reference Audio (Speaker 1)", type="filepath")
|
484 |
+
ref_text_input1 = gr.Textbox(label="Reference Text (Speaker 1)", lines=2)
|
485 |
+
|
486 |
+
speaker2_name = gr.Textbox(label="Speaker 2 Name")
|
487 |
+
ref_audio_input2 = gr.Audio(label="Reference Audio (Speaker 2)", type="filepath")
|
488 |
+
ref_text_input2 = gr.Textbox(label="Reference Text (Speaker 2)", lines=2)
|
489 |
+
|
490 |
+
script_input = gr.Textbox(label="Podcast Script", lines=10,
|
491 |
+
placeholder="Enter the script with speaker names at the start of each block, e.g.:\nSean: How did you start studying...\n\nMeghan: I came to my interest in technology...\nIt was a long journey...\n\nSean: That's fascinating. Can you elaborate...")
|
492 |
+
|
493 |
+
podcast_model_choice = gr.Radio(
|
494 |
+
choices=["F5-TTS", "E2-TTS"], label="Choose TTS Model", value="F5-TTS"
|
495 |
+
)
|
496 |
+
podcast_remove_silence = gr.Checkbox(
|
497 |
+
label="Remove Silences",
|
498 |
+
value=True,
|
499 |
+
)
|
500 |
+
generate_podcast_btn = gr.Button("Generate Podcast", variant="primary")
|
501 |
+
podcast_output = gr.Audio(label="Generated Podcast")
|
502 |
+
|
503 |
+
def podcast_generation(script, speaker1, ref_audio1, ref_text1, speaker2, ref_audio2, ref_text2, model, remove_silence):
|
504 |
+
return generate_podcast(script, speaker1, ref_audio1, ref_text1, speaker2, ref_audio2, ref_text2, model, remove_silence)
|
505 |
+
|
506 |
+
generate_podcast_btn.click(
|
507 |
+
podcast_generation,
|
508 |
+
inputs=[
|
509 |
+
script_input,
|
510 |
+
speaker1_name,
|
511 |
+
ref_audio_input1,
|
512 |
+
ref_text_input1,
|
513 |
+
speaker2_name,
|
514 |
+
ref_audio_input2,
|
515 |
+
ref_text_input2,
|
516 |
+
podcast_model_choice,
|
517 |
+
podcast_remove_silence,
|
518 |
+
],
|
519 |
+
outputs=podcast_output,
|
520 |
+
)
|
521 |
+
|
522 |
+
def parse_emotional_text(gen_text):
|
523 |
+
# Pattern to find (Emotion)
|
524 |
+
pattern = r'\((.*?)\)'
|
525 |
+
|
526 |
+
# Split the text by the pattern
|
527 |
+
tokens = re.split(pattern, gen_text)
|
528 |
+
|
529 |
+
segments = []
|
530 |
+
|
531 |
+
current_emotion = 'Regular'
|
532 |
+
|
533 |
+
for i in range(len(tokens)):
|
534 |
+
if i % 2 == 0:
|
535 |
+
# This is text
|
536 |
+
text = tokens[i].strip()
|
537 |
+
if text:
|
538 |
+
segments.append({'emotion': current_emotion, 'text': text})
|
539 |
+
else:
|
540 |
+
# This is emotion
|
541 |
+
emotion = tokens[i].strip()
|
542 |
+
current_emotion = emotion
|
543 |
+
|
544 |
+
return segments
|
545 |
+
|
546 |
+
with gr.Blocks() as app_emotional:
|
547 |
+
# New section for emotional generation
|
548 |
+
gr.Markdown(
|
549 |
+
"""
|
550 |
+
# Multiple Speech-Type Generation
|
551 |
+
|
552 |
+
This section allows you to upload different audio clips for each speech type. 'Regular' emotion is mandatory. You can add additional speech types by clicking the "Add Speech Type" button. Enter your text in the format shown below, and the system will generate speech using the appropriate emotions. If unspecified, the model will use the regular speech type. The current speech type will be used until the next speech type is specified.
|
553 |
+
|
554 |
+
**Example Input:**
|
555 |
+
|
556 |
+
(Regular) Hello, I'd like to order a sandwich please. (Surprised) What do you mean you're out of bread? (Sad) I really wanted a sandwich though... (Angry) You know what, darn you and your little shop, you suck! (Whisper) I'll just go back home and cry now. (Shouting) Why me?!
|
557 |
+
"""
|
558 |
+
)
|
559 |
+
|
560 |
+
gr.Markdown("Upload different audio clips for each speech type. 'Regular' emotion is mandatory. You can add additional speech types by clicking the 'Add Speech Type' button.")
|
561 |
+
|
562 |
+
# Regular speech type (mandatory)
|
563 |
+
with gr.Row():
|
564 |
+
regular_name = gr.Textbox(value='Regular', label='Speech Type Name', interactive=False)
|
565 |
+
regular_audio = gr.Audio(label='Regular Reference Audio', type='filepath')
|
566 |
+
regular_ref_text = gr.Textbox(label='Reference Text (Regular)', lines=2)
|
567 |
+
|
568 |
+
# Additional speech types (up to 9 more)
|
569 |
+
max_speech_types = 10
|
570 |
+
speech_type_names = []
|
571 |
+
speech_type_audios = []
|
572 |
+
speech_type_ref_texts = []
|
573 |
+
speech_type_delete_btns = []
|
574 |
+
|
575 |
+
for i in range(max_speech_types - 1):
|
576 |
+
with gr.Row():
|
577 |
+
name_input = gr.Textbox(label='Speech Type Name', visible=False)
|
578 |
+
audio_input = gr.Audio(label='Reference Audio', type='filepath', visible=False)
|
579 |
+
ref_text_input = gr.Textbox(label='Reference Text', lines=2, visible=False)
|
580 |
+
delete_btn = gr.Button("Delete", variant="secondary", visible=False)
|
581 |
+
speech_type_names.append(name_input)
|
582 |
+
speech_type_audios.append(audio_input)
|
583 |
+
speech_type_ref_texts.append(ref_text_input)
|
584 |
+
speech_type_delete_btns.append(delete_btn)
|
585 |
+
|
586 |
+
# Button to add speech type
|
587 |
+
add_speech_type_btn = gr.Button("Add Speech Type")
|
588 |
+
|
589 |
+
# Keep track of current number of speech types
|
590 |
+
speech_type_count = gr.State(value=0)
|
591 |
+
|
592 |
+
# Function to add a speech type
|
593 |
+
def add_speech_type_fn(speech_type_count):
|
594 |
+
if speech_type_count < max_speech_types - 1:
|
595 |
+
speech_type_count += 1
|
596 |
+
# Prepare updates for the components
|
597 |
+
name_updates = []
|
598 |
+
audio_updates = []
|
599 |
+
ref_text_updates = []
|
600 |
+
delete_btn_updates = []
|
601 |
+
for i in range(max_speech_types - 1):
|
602 |
+
if i < speech_type_count:
|
603 |
+
name_updates.append(gr.update(visible=True))
|
604 |
+
audio_updates.append(gr.update(visible=True))
|
605 |
+
ref_text_updates.append(gr.update(visible=True))
|
606 |
+
delete_btn_updates.append(gr.update(visible=True))
|
607 |
+
else:
|
608 |
+
name_updates.append(gr.update())
|
609 |
+
audio_updates.append(gr.update())
|
610 |
+
ref_text_updates.append(gr.update())
|
611 |
+
delete_btn_updates.append(gr.update())
|
612 |
+
else:
|
613 |
+
# Optionally, show a warning
|
614 |
+
# gr.Warning("Maximum number of speech types reached.")
|
615 |
+
name_updates = [gr.update() for _ in range(max_speech_types - 1)]
|
616 |
+
audio_updates = [gr.update() for _ in range(max_speech_types - 1)]
|
617 |
+
ref_text_updates = [gr.update() for _ in range(max_speech_types - 1)]
|
618 |
+
delete_btn_updates = [gr.update() for _ in range(max_speech_types - 1)]
|
619 |
+
return [speech_type_count] + name_updates + audio_updates + ref_text_updates + delete_btn_updates
|
620 |
+
|
621 |
+
add_speech_type_btn.click(
|
622 |
+
add_speech_type_fn,
|
623 |
+
inputs=speech_type_count,
|
624 |
+
outputs=[speech_type_count] + speech_type_names + speech_type_audios + speech_type_ref_texts + speech_type_delete_btns
|
625 |
+
)
|
626 |
+
|
627 |
+
# Function to delete a speech type
|
628 |
+
def make_delete_speech_type_fn(index):
|
629 |
+
def delete_speech_type_fn(speech_type_count):
|
630 |
+
# Prepare updates
|
631 |
+
name_updates = []
|
632 |
+
audio_updates = []
|
633 |
+
ref_text_updates = []
|
634 |
+
delete_btn_updates = []
|
635 |
+
|
636 |
+
for i in range(max_speech_types - 1):
|
637 |
+
if i == index:
|
638 |
+
name_updates.append(gr.update(visible=False, value=''))
|
639 |
+
audio_updates.append(gr.update(visible=False, value=None))
|
640 |
+
ref_text_updates.append(gr.update(visible=False, value=''))
|
641 |
+
delete_btn_updates.append(gr.update(visible=False))
|
642 |
+
else:
|
643 |
+
name_updates.append(gr.update())
|
644 |
+
audio_updates.append(gr.update())
|
645 |
+
ref_text_updates.append(gr.update())
|
646 |
+
delete_btn_updates.append(gr.update())
|
647 |
+
|
648 |
+
speech_type_count = max(0, speech_type_count - 1)
|
649 |
+
|
650 |
+
return [speech_type_count] + name_updates + audio_updates + ref_text_updates + delete_btn_updates
|
651 |
+
|
652 |
+
return delete_speech_type_fn
|
653 |
+
|
654 |
+
for i, delete_btn in enumerate(speech_type_delete_btns):
|
655 |
+
delete_fn = make_delete_speech_type_fn(i)
|
656 |
+
delete_btn.click(
|
657 |
+
delete_fn,
|
658 |
+
inputs=speech_type_count,
|
659 |
+
outputs=[speech_type_count] + speech_type_names + speech_type_audios + speech_type_ref_texts + speech_type_delete_btns
|
660 |
+
)
|
661 |
+
|
662 |
+
# Text input for the prompt
|
663 |
+
gen_text_input_emotional = gr.Textbox(label="Text to Generate", lines=10)
|
664 |
+
|
665 |
+
# Model choice
|
666 |
+
model_choice_emotional = gr.Radio(
|
667 |
+
choices=["F5-TTS", "E2-TTS"], label="Choose TTS Model", value="F5-TTS"
|
668 |
+
)
|
669 |
+
|
670 |
+
with gr.Accordion("Advanced Settings", open=False):
|
671 |
+
remove_silence_emotional = gr.Checkbox(
|
672 |
+
label="Remove Silences",
|
673 |
+
value=True,
|
674 |
+
)
|
675 |
+
|
676 |
+
# Generate button
|
677 |
+
generate_emotional_btn = gr.Button("Generate Emotional Speech", variant="primary")
|
678 |
+
|
679 |
+
# Output audio
|
680 |
+
audio_output_emotional = gr.Audio(label="Synthesized Audio")
|
681 |
+
|
682 |
+
def generate_emotional_speech(
|
683 |
+
regular_audio,
|
684 |
+
regular_ref_text,
|
685 |
+
gen_text,
|
686 |
+
*args,
|
687 |
+
):
|
688 |
+
num_additional_speech_types = max_speech_types - 1
|
689 |
+
speech_type_names_list = args[:num_additional_speech_types]
|
690 |
+
speech_type_audios_list = args[num_additional_speech_types:2 * num_additional_speech_types]
|
691 |
+
speech_type_ref_texts_list = args[2 * num_additional_speech_types:3 * num_additional_speech_types]
|
692 |
+
model_choice = args[3 * num_additional_speech_types]
|
693 |
+
remove_silence = args[3 * num_additional_speech_types + 1]
|
694 |
+
|
695 |
+
# Collect the speech types and their audios into a dict
|
696 |
+
speech_types = {'Regular': {'audio': regular_audio, 'ref_text': regular_ref_text}}
|
697 |
+
|
698 |
+
for name_input, audio_input, ref_text_input in zip(speech_type_names_list, speech_type_audios_list, speech_type_ref_texts_list):
|
699 |
+
if name_input and audio_input:
|
700 |
+
speech_types[name_input] = {'audio': audio_input, 'ref_text': ref_text_input}
|
701 |
+
|
702 |
+
# Parse the gen_text into segments
|
703 |
+
segments = parse_speechtypes_text(gen_text)
|
704 |
+
|
705 |
+
# For each segment, generate speech
|
706 |
+
generated_audio_segments = []
|
707 |
+
current_emotion = 'Regular'
|
708 |
+
|
709 |
+
for segment in segments:
|
710 |
+
emotion = segment['emotion']
|
711 |
+
text = segment['text']
|
712 |
+
|
713 |
+
if emotion in speech_types:
|
714 |
+
current_emotion = emotion
|
715 |
+
else:
|
716 |
+
# If emotion not available, default to Regular
|
717 |
+
current_emotion = 'Regular'
|
718 |
+
|
719 |
+
ref_audio = speech_types[current_emotion]['audio']
|
720 |
+
ref_text = speech_types[current_emotion].get('ref_text', '')
|
721 |
+
|
722 |
+
# Generate speech for this segment
|
723 |
+
audio, _ = infer(ref_audio, ref_text, text, model_choice, remove_silence, "")
|
724 |
+
sr, audio_data = audio
|
725 |
+
|
726 |
+
generated_audio_segments.append(audio_data)
|
727 |
+
|
728 |
+
# Concatenate all audio segments
|
729 |
+
if generated_audio_segments:
|
730 |
+
final_audio_data = np.concatenate(generated_audio_segments)
|
731 |
+
return (sr, final_audio_data)
|
732 |
+
else:
|
733 |
+
gr.Warning("No audio generated.")
|
734 |
+
return None
|
735 |
+
|
736 |
+
generate_emotional_btn.click(
|
737 |
+
generate_emotional_speech,
|
738 |
+
inputs=[
|
739 |
+
regular_audio,
|
740 |
+
regular_ref_text,
|
741 |
+
gen_text_input_emotional,
|
742 |
+
] + speech_type_names + speech_type_audios + speech_type_ref_texts + [
|
743 |
+
model_choice_emotional,
|
744 |
+
remove_silence_emotional,
|
745 |
+
],
|
746 |
+
outputs=audio_output_emotional,
|
747 |
+
)
|
748 |
+
|
749 |
+
# Validation function to disable Generate button if speech types are missing
|
750 |
+
def validate_speech_types(
|
751 |
+
gen_text,
|
752 |
+
regular_name,
|
753 |
+
*args
|
754 |
+
):
|
755 |
+
num_additional_speech_types = max_speech_types - 1
|
756 |
+
speech_type_names_list = args[:num_additional_speech_types]
|
757 |
+
|
758 |
+
# Collect the speech types names
|
759 |
+
speech_types_available = set()
|
760 |
+
if regular_name:
|
761 |
+
speech_types_available.add(regular_name)
|
762 |
+
for name_input in speech_type_names_list:
|
763 |
+
if name_input:
|
764 |
+
speech_types_available.add(name_input)
|
765 |
+
|
766 |
+
# Parse the gen_text to get the speech types used
|
767 |
+
segments = parse_emotional_text(gen_text)
|
768 |
+
speech_types_in_text = set(segment['emotion'] for segment in segments)
|
769 |
+
|
770 |
+
# Check if all speech types in text are available
|
771 |
+
missing_speech_types = speech_types_in_text - speech_types_available
|
772 |
+
|
773 |
+
if missing_speech_types:
|
774 |
+
# Disable the generate button
|
775 |
+
return gr.update(interactive=False)
|
776 |
+
else:
|
777 |
+
# Enable the generate button
|
778 |
+
return gr.update(interactive=True)
|
779 |
+
|
780 |
+
gen_text_input_emotional.change(
|
781 |
+
validate_speech_types,
|
782 |
+
inputs=[gen_text_input_emotional, regular_name] + speech_type_names,
|
783 |
+
outputs=generate_emotional_btn
|
784 |
+
)
|
785 |
+
with gr.Blocks() as app:
|
786 |
+
gr.Markdown(
|
787 |
+
"""
|
788 |
+
# E2/F5 TTS
|
789 |
+
|
790 |
+
This is a local web UI for F5 TTS with advanced batch processing support. This app supports the following TTS models:
|
791 |
+
|
792 |
+
* [F5-TTS](https://arxiv.org/abs/2410.06885) (A Fairytaler that Fakes Fluent and Faithful Speech with Flow Matching)
|
793 |
+
* [E2 TTS](https://arxiv.org/abs/2406.18009) (Embarrassingly Easy Fully Non-Autoregressive Zero-Shot TTS)
|
794 |
+
|
795 |
+
The checkpoints support English and Chinese.
|
796 |
+
|
797 |
+
If you're having issues, try converting your reference audio to WAV or MP3, clipping it to 15s, and shortening your prompt.
|
798 |
+
|
799 |
+
**NOTE: Reference text will be automatically transcribed with Whisper if not provided. For best results, keep your reference clips short (<15s). Ensure the audio is fully uploaded before generating.**
|
800 |
+
"""
|
801 |
+
)
|
802 |
+
gr.TabbedInterface([app_tts, app_podcast, app_emotional, app_credits], ["TTS", "Podcast", "Multi-Style", "Credits"])
|
803 |
+
|
804 |
+
@click.command()
|
805 |
+
@click.option("--port", "-p", default=None, type=int, help="Port to run the app on")
|
806 |
+
@click.option("--host", "-H", default=None, help="Host to run the app on")
|
807 |
+
@click.option(
|
808 |
+
"--share",
|
809 |
+
"-s",
|
810 |
+
default=False,
|
811 |
+
is_flag=True,
|
812 |
+
help="Share the app via Gradio share link",
|
813 |
+
)
|
814 |
+
@click.option("--api", "-a", default=True, is_flag=True, help="Allow API access")
|
815 |
+
def main(port, host, share, api):
|
816 |
+
global app
|
817 |
+
print(f"Starting app...")
|
818 |
+
app.queue(api_open=api).launch(
|
819 |
+
server_name=host, server_port=port, share=share, show_api=api
|
820 |
+
)
|
821 |
+
|
822 |
+
|
823 |
+
if __name__ == "__main__":
|
824 |
+
main()
|
inference-cli.py
ADDED
@@ -0,0 +1,428 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import codecs
|
3 |
+
import re
|
4 |
+
import tempfile
|
5 |
+
from pathlib import Path
|
6 |
+
|
7 |
+
import numpy as np
|
8 |
+
import soundfile as sf
|
9 |
+
import tomli
|
10 |
+
import torch
|
11 |
+
import torchaudio
|
12 |
+
import tqdm
|
13 |
+
from cached_path import cached_path
|
14 |
+
from einops import rearrange
|
15 |
+
from pydub import AudioSegment, silence
|
16 |
+
from transformers import pipeline
|
17 |
+
from vocos import Vocos
|
18 |
+
|
19 |
+
from model import CFM, DiT, MMDiT, UNetT
|
20 |
+
from model.utils import (convert_char_to_pinyin, get_tokenizer,
|
21 |
+
load_checkpoint, save_spectrogram)
|
22 |
+
|
23 |
+
parser = argparse.ArgumentParser(
|
24 |
+
prog="python3 inference-cli.py",
|
25 |
+
description="Commandline interface for E2/F5 TTS with Advanced Batch Processing.",
|
26 |
+
epilog="Specify options above to override one or more settings from config.",
|
27 |
+
)
|
28 |
+
parser.add_argument(
|
29 |
+
"-c",
|
30 |
+
"--config",
|
31 |
+
help="Configuration file. Default=cli-config.toml",
|
32 |
+
default="inference-cli.toml",
|
33 |
+
)
|
34 |
+
parser.add_argument(
|
35 |
+
"-m",
|
36 |
+
"--model",
|
37 |
+
help="F5-TTS | E2-TTS",
|
38 |
+
)
|
39 |
+
parser.add_argument(
|
40 |
+
"-p",
|
41 |
+
"--ckpt_file",
|
42 |
+
help="The Checkpoint .pt",
|
43 |
+
)
|
44 |
+
parser.add_argument(
|
45 |
+
"-v",
|
46 |
+
"--vocab_file",
|
47 |
+
help="The vocab .txt",
|
48 |
+
)
|
49 |
+
parser.add_argument(
|
50 |
+
"-r",
|
51 |
+
"--ref_audio",
|
52 |
+
type=str,
|
53 |
+
help="Reference audio file < 15 seconds."
|
54 |
+
)
|
55 |
+
parser.add_argument(
|
56 |
+
"-s",
|
57 |
+
"--ref_text",
|
58 |
+
type=str,
|
59 |
+
default="666",
|
60 |
+
help="Subtitle for the reference audio."
|
61 |
+
)
|
62 |
+
parser.add_argument(
|
63 |
+
"-t",
|
64 |
+
"--gen_text",
|
65 |
+
type=str,
|
66 |
+
help="Text to generate.",
|
67 |
+
)
|
68 |
+
parser.add_argument(
|
69 |
+
"-f",
|
70 |
+
"--gen_file",
|
71 |
+
type=str,
|
72 |
+
help="File with text to generate. Ignores --text",
|
73 |
+
)
|
74 |
+
parser.add_argument(
|
75 |
+
"-o",
|
76 |
+
"--output_dir",
|
77 |
+
type=str,
|
78 |
+
help="Path to output folder..",
|
79 |
+
)
|
80 |
+
parser.add_argument(
|
81 |
+
"--remove_silence",
|
82 |
+
help="Remove silence.",
|
83 |
+
)
|
84 |
+
parser.add_argument(
|
85 |
+
"--load_vocoder_from_local",
|
86 |
+
action="store_true",
|
87 |
+
help="load vocoder from local. Default: ../checkpoints/charactr/vocos-mel-24khz",
|
88 |
+
)
|
89 |
+
args = parser.parse_args()
|
90 |
+
|
91 |
+
config = tomli.load(open(args.config, "rb"))
|
92 |
+
|
93 |
+
ref_audio = args.ref_audio if args.ref_audio else config["ref_audio"]
|
94 |
+
ref_text = args.ref_text if args.ref_text != "666" else config["ref_text"]
|
95 |
+
gen_text = args.gen_text if args.gen_text else config["gen_text"]
|
96 |
+
gen_file = args.gen_file if args.gen_file else config["gen_file"]
|
97 |
+
if gen_file:
|
98 |
+
gen_text = codecs.open(gen_file, "r", "utf-8").read()
|
99 |
+
output_dir = args.output_dir if args.output_dir else config["output_dir"]
|
100 |
+
model = args.model if args.model else config["model"]
|
101 |
+
ckpt_file = args.ckpt_file if args.ckpt_file else ""
|
102 |
+
vocab_file = args.vocab_file if args.vocab_file else ""
|
103 |
+
remove_silence = args.remove_silence if args.remove_silence else config["remove_silence"]
|
104 |
+
wave_path = Path(output_dir)/"out.wav"
|
105 |
+
spectrogram_path = Path(output_dir)/"out.png"
|
106 |
+
vocos_local_path = "../checkpoints/charactr/vocos-mel-24khz"
|
107 |
+
|
108 |
+
device = (
|
109 |
+
"cuda"
|
110 |
+
if torch.cuda.is_available()
|
111 |
+
else "mps" if torch.backends.mps.is_available() else "cpu"
|
112 |
+
)
|
113 |
+
|
114 |
+
if args.load_vocoder_from_local:
|
115 |
+
print(f"Load vocos from local path {vocos_local_path}")
|
116 |
+
vocos = Vocos.from_hparams(f"{vocos_local_path}/config.yaml")
|
117 |
+
state_dict = torch.load(f"{vocos_local_path}/pytorch_model.bin", map_location=device)
|
118 |
+
vocos.load_state_dict(state_dict)
|
119 |
+
vocos.eval()
|
120 |
+
else:
|
121 |
+
print("Donwload Vocos from huggingface charactr/vocos-mel-24khz")
|
122 |
+
vocos = Vocos.from_pretrained("charactr/vocos-mel-24khz")
|
123 |
+
|
124 |
+
print(f"Using {device} device")
|
125 |
+
|
126 |
+
# --------------------- Settings -------------------- #
|
127 |
+
|
128 |
+
target_sample_rate = 24000
|
129 |
+
n_mel_channels = 100
|
130 |
+
hop_length = 256
|
131 |
+
target_rms = 0.1
|
132 |
+
nfe_step = 32 # 16, 32
|
133 |
+
cfg_strength = 2.0
|
134 |
+
ode_method = "euler"
|
135 |
+
sway_sampling_coef = -1.0
|
136 |
+
speed = 1.0
|
137 |
+
# fix_duration = 27 # None or float (duration in seconds)
|
138 |
+
fix_duration = None
|
139 |
+
|
140 |
+
def load_model(model_cls, model_cfg, ckpt_path,file_vocab):
|
141 |
+
|
142 |
+
if file_vocab=="":
|
143 |
+
file_vocab="Emilia_ZH_EN"
|
144 |
+
tokenizer="pinyin"
|
145 |
+
else:
|
146 |
+
tokenizer="custom"
|
147 |
+
|
148 |
+
print("\nvocab : ",vocab_file,tokenizer)
|
149 |
+
print("tokenizer : ",tokenizer)
|
150 |
+
print("model : ",ckpt_path,"\n")
|
151 |
+
|
152 |
+
vocab_char_map, vocab_size = get_tokenizer(file_vocab, tokenizer)
|
153 |
+
model = CFM(
|
154 |
+
transformer=model_cls(
|
155 |
+
**model_cfg, text_num_embeds=vocab_size, mel_dim=n_mel_channels
|
156 |
+
),
|
157 |
+
mel_spec_kwargs=dict(
|
158 |
+
target_sample_rate=target_sample_rate,
|
159 |
+
n_mel_channels=n_mel_channels,
|
160 |
+
hop_length=hop_length,
|
161 |
+
),
|
162 |
+
odeint_kwargs=dict(
|
163 |
+
method=ode_method,
|
164 |
+
),
|
165 |
+
vocab_char_map=vocab_char_map,
|
166 |
+
).to(device)
|
167 |
+
|
168 |
+
model = load_checkpoint(model, ckpt_path, device, use_ema = True)
|
169 |
+
|
170 |
+
return model
|
171 |
+
|
172 |
+
# load models
|
173 |
+
F5TTS_model_cfg = dict(
|
174 |
+
dim=1024, depth=22, heads=16, ff_mult=2, text_dim=512, conv_layers=4
|
175 |
+
)
|
176 |
+
E2TTS_model_cfg = dict(dim=1024, depth=24, heads=16, ff_mult=4)
|
177 |
+
|
178 |
+
def chunk_text(text, max_chars=135):
|
179 |
+
"""
|
180 |
+
Splits the input text into chunks, each with a maximum number of characters.
|
181 |
+
Args:
|
182 |
+
text (str): The text to be split.
|
183 |
+
max_chars (int): The maximum number of characters per chunk.
|
184 |
+
Returns:
|
185 |
+
List[str]: A list of text chunks.
|
186 |
+
"""
|
187 |
+
chunks = []
|
188 |
+
current_chunk = ""
|
189 |
+
# Split the text into sentences based on punctuation followed by whitespace
|
190 |
+
sentences = re.split(r'(?<=[;:,.!?])\s+|(?<=[;:,。!?])', text)
|
191 |
+
|
192 |
+
for sentence in sentences:
|
193 |
+
if len(current_chunk.encode('utf-8')) + len(sentence.encode('utf-8')) <= max_chars:
|
194 |
+
current_chunk += sentence + " " if sentence and len(sentence[-1].encode('utf-8')) == 1 else sentence
|
195 |
+
else:
|
196 |
+
if current_chunk:
|
197 |
+
chunks.append(current_chunk.strip())
|
198 |
+
current_chunk = sentence + " " if sentence and len(sentence[-1].encode('utf-8')) == 1 else sentence
|
199 |
+
|
200 |
+
if current_chunk:
|
201 |
+
chunks.append(current_chunk.strip())
|
202 |
+
|
203 |
+
return chunks
|
204 |
+
|
205 |
+
#ckpt_path = f"ckpts/{exp_name}/model_{ckpt_step}.pt" # .pt | .safetensors
|
206 |
+
#if not Path(ckpt_path).exists():
|
207 |
+
#ckpt_path = str(cached_path(f"hf://SWivid/{repo_name}/{exp_name}/model_{ckpt_step}.safetensors"))
|
208 |
+
|
209 |
+
def infer_batch(ref_audio, ref_text, gen_text_batches, model,ckpt_file,file_vocab, remove_silence, cross_fade_duration=0.15):
|
210 |
+
if model == "F5-TTS":
|
211 |
+
|
212 |
+
if ckpt_file == "":
|
213 |
+
repo_name= "F5-TTS"
|
214 |
+
exp_name = "F5TTS_Base"
|
215 |
+
ckpt_step= 1200000
|
216 |
+
ckpt_file = str(cached_path(f"hf://SWivid/{repo_name}/{exp_name}/model_{ckpt_step}.safetensors"))
|
217 |
+
|
218 |
+
ema_model = load_model(DiT, F5TTS_model_cfg, ckpt_file,file_vocab)
|
219 |
+
|
220 |
+
elif model == "E2-TTS":
|
221 |
+
if ckpt_file == "":
|
222 |
+
repo_name= "E2-TTS"
|
223 |
+
exp_name = "E2TTS_Base"
|
224 |
+
ckpt_step= 1200000
|
225 |
+
ckpt_file = str(cached_path(f"hf://SWivid/{repo_name}/{exp_name}/model_{ckpt_step}.safetensors"))
|
226 |
+
|
227 |
+
ema_model = load_model(UNetT, E2TTS_model_cfg, ckpt_file,file_vocab)
|
228 |
+
|
229 |
+
audio, sr = ref_audio
|
230 |
+
if audio.shape[0] > 1:
|
231 |
+
audio = torch.mean(audio, dim=0, keepdim=True)
|
232 |
+
|
233 |
+
rms = torch.sqrt(torch.mean(torch.square(audio)))
|
234 |
+
if rms < target_rms:
|
235 |
+
audio = audio * target_rms / rms
|
236 |
+
if sr != target_sample_rate:
|
237 |
+
resampler = torchaudio.transforms.Resample(sr, target_sample_rate)
|
238 |
+
audio = resampler(audio)
|
239 |
+
audio = audio.to(device)
|
240 |
+
|
241 |
+
generated_waves = []
|
242 |
+
spectrograms = []
|
243 |
+
|
244 |
+
if len(ref_text[-1].encode('utf-8')) == 1:
|
245 |
+
ref_text = ref_text + " "
|
246 |
+
for i, gen_text in enumerate(tqdm.tqdm(gen_text_batches)):
|
247 |
+
# Prepare the text
|
248 |
+
text_list = [ref_text + gen_text]
|
249 |
+
final_text_list = convert_char_to_pinyin(text_list)
|
250 |
+
|
251 |
+
# Calculate duration
|
252 |
+
ref_audio_len = audio.shape[-1] // hop_length
|
253 |
+
zh_pause_punc = r"。,、;:?!"
|
254 |
+
ref_text_len = len(ref_text.encode('utf-8')) + 3 * len(re.findall(zh_pause_punc, ref_text))
|
255 |
+
gen_text_len = len(gen_text.encode('utf-8')) + 3 * len(re.findall(zh_pause_punc, gen_text))
|
256 |
+
duration = ref_audio_len + int(ref_audio_len / ref_text_len * gen_text_len / speed)
|
257 |
+
|
258 |
+
# inference
|
259 |
+
with torch.inference_mode():
|
260 |
+
generated, _ = ema_model.sample(
|
261 |
+
cond=audio,
|
262 |
+
text=final_text_list,
|
263 |
+
duration=duration,
|
264 |
+
steps=nfe_step,
|
265 |
+
cfg_strength=cfg_strength,
|
266 |
+
sway_sampling_coef=sway_sampling_coef,
|
267 |
+
)
|
268 |
+
|
269 |
+
generated = generated[:, ref_audio_len:, :]
|
270 |
+
generated_mel_spec = rearrange(generated, "1 n d -> 1 d n")
|
271 |
+
generated_wave = vocos.decode(generated_mel_spec.cpu())
|
272 |
+
if rms < target_rms:
|
273 |
+
generated_wave = generated_wave * rms / target_rms
|
274 |
+
|
275 |
+
# wav -> numpy
|
276 |
+
generated_wave = generated_wave.squeeze().cpu().numpy()
|
277 |
+
|
278 |
+
generated_waves.append(generated_wave)
|
279 |
+
spectrograms.append(generated_mel_spec[0].cpu().numpy())
|
280 |
+
|
281 |
+
# Combine all generated waves with cross-fading
|
282 |
+
if cross_fade_duration <= 0:
|
283 |
+
# Simply concatenate
|
284 |
+
final_wave = np.concatenate(generated_waves)
|
285 |
+
else:
|
286 |
+
final_wave = generated_waves[0]
|
287 |
+
for i in range(1, len(generated_waves)):
|
288 |
+
prev_wave = final_wave
|
289 |
+
next_wave = generated_waves[i]
|
290 |
+
|
291 |
+
# Calculate cross-fade samples, ensuring it does not exceed wave lengths
|
292 |
+
cross_fade_samples = int(cross_fade_duration * target_sample_rate)
|
293 |
+
cross_fade_samples = min(cross_fade_samples, len(prev_wave), len(next_wave))
|
294 |
+
|
295 |
+
if cross_fade_samples <= 0:
|
296 |
+
# No overlap possible, concatenate
|
297 |
+
final_wave = np.concatenate([prev_wave, next_wave])
|
298 |
+
continue
|
299 |
+
|
300 |
+
# Overlapping parts
|
301 |
+
prev_overlap = prev_wave[-cross_fade_samples:]
|
302 |
+
next_overlap = next_wave[:cross_fade_samples]
|
303 |
+
|
304 |
+
# Fade out and fade in
|
305 |
+
fade_out = np.linspace(1, 0, cross_fade_samples)
|
306 |
+
fade_in = np.linspace(0, 1, cross_fade_samples)
|
307 |
+
|
308 |
+
# Cross-faded overlap
|
309 |
+
cross_faded_overlap = prev_overlap * fade_out + next_overlap * fade_in
|
310 |
+
|
311 |
+
# Combine
|
312 |
+
new_wave = np.concatenate([
|
313 |
+
prev_wave[:-cross_fade_samples],
|
314 |
+
cross_faded_overlap,
|
315 |
+
next_wave[cross_fade_samples:]
|
316 |
+
])
|
317 |
+
|
318 |
+
final_wave = new_wave
|
319 |
+
|
320 |
+
# Create a combined spectrogram
|
321 |
+
combined_spectrogram = np.concatenate(spectrograms, axis=1)
|
322 |
+
|
323 |
+
return final_wave, combined_spectrogram
|
324 |
+
|
325 |
+
def process_voice(ref_audio_orig, ref_text):
|
326 |
+
print("Converting audio...")
|
327 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as f:
|
328 |
+
aseg = AudioSegment.from_file(ref_audio_orig)
|
329 |
+
|
330 |
+
non_silent_segs = silence.split_on_silence(aseg, min_silence_len=1000, silence_thresh=-50, keep_silence=1000)
|
331 |
+
non_silent_wave = AudioSegment.silent(duration=0)
|
332 |
+
for non_silent_seg in non_silent_segs:
|
333 |
+
non_silent_wave += non_silent_seg
|
334 |
+
aseg = non_silent_wave
|
335 |
+
|
336 |
+
audio_duration = len(aseg)
|
337 |
+
if audio_duration > 15000:
|
338 |
+
print("Audio is over 15s, clipping to only first 15s.")
|
339 |
+
aseg = aseg[:15000]
|
340 |
+
aseg.export(f.name, format="wav")
|
341 |
+
ref_audio = f.name
|
342 |
+
|
343 |
+
if not ref_text.strip():
|
344 |
+
print("No reference text provided, transcribing reference audio...")
|
345 |
+
pipe = pipeline(
|
346 |
+
"automatic-speech-recognition",
|
347 |
+
model="openai/whisper-large-v3-turbo",
|
348 |
+
torch_dtype=torch.float16,
|
349 |
+
device=device,
|
350 |
+
)
|
351 |
+
ref_text = pipe(
|
352 |
+
ref_audio,
|
353 |
+
chunk_length_s=30,
|
354 |
+
batch_size=128,
|
355 |
+
generate_kwargs={"task": "transcribe"},
|
356 |
+
return_timestamps=False,
|
357 |
+
)["text"].strip()
|
358 |
+
print("Finished transcription")
|
359 |
+
else:
|
360 |
+
print("Using custom reference text...")
|
361 |
+
return ref_audio, ref_text
|
362 |
+
|
363 |
+
def infer(ref_audio, ref_text, gen_text, model,ckpt_file,file_vocab, remove_silence, cross_fade_duration=0.15):
|
364 |
+
print(gen_text)
|
365 |
+
# Add the functionality to ensure it ends with ". "
|
366 |
+
if not ref_text.endswith(". ") and not ref_text.endswith("。"):
|
367 |
+
if ref_text.endswith("."):
|
368 |
+
ref_text += " "
|
369 |
+
else:
|
370 |
+
ref_text += ". "
|
371 |
+
|
372 |
+
# Split the input text into batches
|
373 |
+
audio, sr = torchaudio.load(ref_audio)
|
374 |
+
max_chars = int(len(ref_text.encode('utf-8')) / (audio.shape[-1] / sr) * (25 - audio.shape[-1] / sr))
|
375 |
+
gen_text_batches = chunk_text(gen_text, max_chars=max_chars)
|
376 |
+
print('ref_text', ref_text)
|
377 |
+
for i, gen_text in enumerate(gen_text_batches):
|
378 |
+
print(f'gen_text {i}', gen_text)
|
379 |
+
|
380 |
+
print(f"Generating audio using {model} in {len(gen_text_batches)} batches, loading models...")
|
381 |
+
return infer_batch((audio, sr), ref_text, gen_text_batches, model,ckpt_file,file_vocab, remove_silence, cross_fade_duration)
|
382 |
+
|
383 |
+
|
384 |
+
def process(ref_audio, ref_text, text_gen, model,ckpt_file,file_vocab, remove_silence):
|
385 |
+
main_voice = {"ref_audio":ref_audio, "ref_text":ref_text}
|
386 |
+
if "voices" not in config:
|
387 |
+
voices = {"main": main_voice}
|
388 |
+
else:
|
389 |
+
voices = config["voices"]
|
390 |
+
voices["main"] = main_voice
|
391 |
+
for voice in voices:
|
392 |
+
voices[voice]['ref_audio'], voices[voice]['ref_text'] = process_voice(voices[voice]['ref_audio'], voices[voice]['ref_text'])
|
393 |
+
|
394 |
+
generated_audio_segments = []
|
395 |
+
reg1 = r'(?=\[\w+\])'
|
396 |
+
chunks = re.split(reg1, text_gen)
|
397 |
+
reg2 = r'\[(\w+)\]'
|
398 |
+
for text in chunks:
|
399 |
+
match = re.match(reg2, text)
|
400 |
+
if not match or voice not in voices:
|
401 |
+
voice = "main"
|
402 |
+
else:
|
403 |
+
voice = match[1]
|
404 |
+
text = re.sub(reg2, "", text)
|
405 |
+
gen_text = text.strip()
|
406 |
+
ref_audio = voices[voice]['ref_audio']
|
407 |
+
ref_text = voices[voice]['ref_text']
|
408 |
+
print(f"Voice: {voice}")
|
409 |
+
audio, spectragram = infer(ref_audio, ref_text, gen_text, model,ckpt_file,file_vocab, remove_silence)
|
410 |
+
generated_audio_segments.append(audio)
|
411 |
+
|
412 |
+
if generated_audio_segments:
|
413 |
+
final_wave = np.concatenate(generated_audio_segments)
|
414 |
+
with open(wave_path, "wb") as f:
|
415 |
+
sf.write(f.name, final_wave, target_sample_rate)
|
416 |
+
# Remove silence
|
417 |
+
if remove_silence:
|
418 |
+
aseg = AudioSegment.from_file(f.name)
|
419 |
+
non_silent_segs = silence.split_on_silence(aseg, min_silence_len=1000, silence_thresh=-50, keep_silence=500)
|
420 |
+
non_silent_wave = AudioSegment.silent(duration=0)
|
421 |
+
for non_silent_seg in non_silent_segs:
|
422 |
+
non_silent_wave += non_silent_seg
|
423 |
+
aseg = non_silent_wave
|
424 |
+
aseg.export(f.name, format="wav")
|
425 |
+
print(f.name)
|
426 |
+
|
427 |
+
|
428 |
+
process(ref_audio, ref_text, gen_text, model,ckpt_file,vocab_file, remove_silence)
|
inference-cli.toml
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# F5-TTS | E2-TTS
|
2 |
+
model = "F5-TTS"
|
3 |
+
ref_audio = "tests/ref_audio/test_en_1_ref_short.wav"
|
4 |
+
# If an empty "", transcribes the reference audio automatically.
|
5 |
+
ref_text = "Some call me nature, others call me mother nature."
|
6 |
+
gen_text = "I don't really care what you call me. I've been a silent spectator, watching species evolve, empires rise and fall. But always remember, I am mighty and enduring. Respect me and I'll nurture you; ignore me and you shall face the consequences."
|
7 |
+
# File with text to generate. Ignores the text above.
|
8 |
+
gen_file = ""
|
9 |
+
remove_silence = false
|
10 |
+
output_dir = "tests"
|
requirements.txt
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
accelerate>=0.33.0
|
2 |
+
cached_path
|
3 |
+
click
|
4 |
+
datasets
|
5 |
+
einops>=0.8.0
|
6 |
+
einx>=0.3.0
|
7 |
+
ema_pytorch>=0.5.2
|
8 |
+
gradio
|
9 |
+
jieba
|
10 |
+
librosa
|
11 |
+
matplotlib
|
12 |
+
numpy<=1.26.4
|
13 |
+
pydub
|
14 |
+
pypinyin
|
15 |
+
safetensors
|
16 |
+
soundfile
|
17 |
+
tomli
|
18 |
+
torchdiffeq
|
19 |
+
tqdm>=4.65.0
|
20 |
+
transformers
|
21 |
+
vocos
|
22 |
+
wandb
|
23 |
+
x_transformers>=1.31.14
|
requirements_eval.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
faster_whisper
|
2 |
+
funasr
|
3 |
+
jiwer
|
4 |
+
zhconv
|
5 |
+
zhon
|
speech_edit.py
ADDED
@@ -0,0 +1,183 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
import torch
|
4 |
+
import torch.nn.functional as F
|
5 |
+
import torchaudio
|
6 |
+
from einops import rearrange
|
7 |
+
from vocos import Vocos
|
8 |
+
|
9 |
+
from model import CFM, UNetT, DiT, MMDiT
|
10 |
+
from model.utils import (
|
11 |
+
load_checkpoint,
|
12 |
+
get_tokenizer,
|
13 |
+
convert_char_to_pinyin,
|
14 |
+
save_spectrogram,
|
15 |
+
)
|
16 |
+
|
17 |
+
device = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
|
18 |
+
|
19 |
+
|
20 |
+
# --------------------- Dataset Settings -------------------- #
|
21 |
+
|
22 |
+
target_sample_rate = 24000
|
23 |
+
n_mel_channels = 100
|
24 |
+
hop_length = 256
|
25 |
+
target_rms = 0.1
|
26 |
+
|
27 |
+
tokenizer = "pinyin"
|
28 |
+
dataset_name = "Emilia_ZH_EN"
|
29 |
+
|
30 |
+
|
31 |
+
# ---------------------- infer setting ---------------------- #
|
32 |
+
|
33 |
+
seed = None # int | None
|
34 |
+
|
35 |
+
exp_name = "F5TTS_Base" # F5TTS_Base | E2TTS_Base
|
36 |
+
ckpt_step = 1200000
|
37 |
+
|
38 |
+
nfe_step = 32 # 16, 32
|
39 |
+
cfg_strength = 2.
|
40 |
+
ode_method = 'euler' # euler | midpoint
|
41 |
+
sway_sampling_coef = -1.
|
42 |
+
speed = 1.
|
43 |
+
|
44 |
+
if exp_name == "F5TTS_Base":
|
45 |
+
model_cls = DiT
|
46 |
+
model_cfg = dict(dim = 1024, depth = 22, heads = 16, ff_mult = 2, text_dim = 512, conv_layers = 4)
|
47 |
+
|
48 |
+
elif exp_name == "E2TTS_Base":
|
49 |
+
model_cls = UNetT
|
50 |
+
model_cfg = dict(dim = 1024, depth = 24, heads = 16, ff_mult = 4)
|
51 |
+
|
52 |
+
ckpt_path = f"ckpts/{exp_name}/model_{ckpt_step}.pt"
|
53 |
+
output_dir = "tests"
|
54 |
+
|
55 |
+
# [leverage https://github.com/MahmoudAshraf97/ctc-forced-aligner to get char level alignment]
|
56 |
+
# pip install git+https://github.com/MahmoudAshraf97/ctc-forced-aligner.git
|
57 |
+
# [write the origin_text into a file, e.g. tests/test_edit.txt]
|
58 |
+
# ctc-forced-aligner --audio_path "tests/ref_audio/test_en_1_ref_short.wav" --text_path "tests/test_edit.txt" --language "zho" --romanize --split_size "char"
|
59 |
+
# [result will be saved at same path of audio file]
|
60 |
+
# [--language "zho" for Chinese, "eng" for English]
|
61 |
+
# [if local ckpt, set --alignment_model "../checkpoints/mms-300m-1130-forced-aligner"]
|
62 |
+
|
63 |
+
audio_to_edit = "tests/ref_audio/test_en_1_ref_short.wav"
|
64 |
+
origin_text = "Some call me nature, others call me mother nature."
|
65 |
+
target_text = "Some call me optimist, others call me realist."
|
66 |
+
parts_to_edit = [[1.42, 2.44], [4.04, 4.9], ] # stard_ends of "nature" & "mother nature", in seconds
|
67 |
+
fix_duration = [1.2, 1, ] # fix duration for "optimist" & "realist", in seconds
|
68 |
+
|
69 |
+
# audio_to_edit = "tests/ref_audio/test_zh_1_ref_short.wav"
|
70 |
+
# origin_text = "对,这就是我,万人敬仰的太乙真人。"
|
71 |
+
# target_text = "对,那就是你,万人敬仰的太白金星。"
|
72 |
+
# parts_to_edit = [[0.84, 1.4], [1.92, 2.4], [4.26, 6.26], ]
|
73 |
+
# fix_duration = None # use origin text duration
|
74 |
+
|
75 |
+
|
76 |
+
# -------------------------------------------------#
|
77 |
+
|
78 |
+
use_ema = True
|
79 |
+
|
80 |
+
if not os.path.exists(output_dir):
|
81 |
+
os.makedirs(output_dir)
|
82 |
+
|
83 |
+
# Vocoder model
|
84 |
+
local = False
|
85 |
+
if local:
|
86 |
+
vocos_local_path = "../checkpoints/charactr/vocos-mel-24khz"
|
87 |
+
vocos = Vocos.from_hparams(f"{vocos_local_path}/config.yaml")
|
88 |
+
state_dict = torch.load(f"{vocos_local_path}/pytorch_model.bin", weights_only=True, map_location=device)
|
89 |
+
vocos.load_state_dict(state_dict)
|
90 |
+
|
91 |
+
vocos.eval()
|
92 |
+
else:
|
93 |
+
vocos = Vocos.from_pretrained("charactr/vocos-mel-24khz")
|
94 |
+
|
95 |
+
# Tokenizer
|
96 |
+
vocab_char_map, vocab_size = get_tokenizer(dataset_name, tokenizer)
|
97 |
+
|
98 |
+
# Model
|
99 |
+
model = CFM(
|
100 |
+
transformer = model_cls(
|
101 |
+
**model_cfg,
|
102 |
+
text_num_embeds = vocab_size,
|
103 |
+
mel_dim = n_mel_channels
|
104 |
+
),
|
105 |
+
mel_spec_kwargs = dict(
|
106 |
+
target_sample_rate = target_sample_rate,
|
107 |
+
n_mel_channels = n_mel_channels,
|
108 |
+
hop_length = hop_length,
|
109 |
+
),
|
110 |
+
odeint_kwargs = dict(
|
111 |
+
method = ode_method,
|
112 |
+
),
|
113 |
+
vocab_char_map = vocab_char_map,
|
114 |
+
).to(device)
|
115 |
+
|
116 |
+
model = load_checkpoint(model, ckpt_path, device, use_ema = use_ema)
|
117 |
+
|
118 |
+
# Audio
|
119 |
+
audio, sr = torchaudio.load(audio_to_edit)
|
120 |
+
if audio.shape[0] > 1:
|
121 |
+
audio = torch.mean(audio, dim=0, keepdim=True)
|
122 |
+
rms = torch.sqrt(torch.mean(torch.square(audio)))
|
123 |
+
if rms < target_rms:
|
124 |
+
audio = audio * target_rms / rms
|
125 |
+
if sr != target_sample_rate:
|
126 |
+
resampler = torchaudio.transforms.Resample(sr, target_sample_rate)
|
127 |
+
audio = resampler(audio)
|
128 |
+
offset = 0
|
129 |
+
audio_ = torch.zeros(1, 0)
|
130 |
+
edit_mask = torch.zeros(1, 0, dtype=torch.bool)
|
131 |
+
for part in parts_to_edit:
|
132 |
+
start, end = part
|
133 |
+
part_dur = end - start if fix_duration is None else fix_duration.pop(0)
|
134 |
+
part_dur = part_dur * target_sample_rate
|
135 |
+
start = start * target_sample_rate
|
136 |
+
audio_ = torch.cat((audio_, audio[:, round(offset):round(start)], torch.zeros(1, round(part_dur))), dim = -1)
|
137 |
+
edit_mask = torch.cat((edit_mask,
|
138 |
+
torch.ones(1, round((start - offset) / hop_length), dtype = torch.bool),
|
139 |
+
torch.zeros(1, round(part_dur / hop_length), dtype = torch.bool)
|
140 |
+
), dim = -1)
|
141 |
+
offset = end * target_sample_rate
|
142 |
+
# audio = torch.cat((audio_, audio[:, round(offset):]), dim = -1)
|
143 |
+
edit_mask = F.pad(edit_mask, (0, audio.shape[-1] // hop_length - edit_mask.shape[-1] + 1), value = True)
|
144 |
+
audio = audio.to(device)
|
145 |
+
edit_mask = edit_mask.to(device)
|
146 |
+
|
147 |
+
# Text
|
148 |
+
text_list = [target_text]
|
149 |
+
if tokenizer == "pinyin":
|
150 |
+
final_text_list = convert_char_to_pinyin(text_list)
|
151 |
+
else:
|
152 |
+
final_text_list = [text_list]
|
153 |
+
print(f"text : {text_list}")
|
154 |
+
print(f"pinyin: {final_text_list}")
|
155 |
+
|
156 |
+
# Duration
|
157 |
+
ref_audio_len = 0
|
158 |
+
duration = audio.shape[-1] // hop_length
|
159 |
+
|
160 |
+
# Inference
|
161 |
+
with torch.inference_mode():
|
162 |
+
generated, trajectory = model.sample(
|
163 |
+
cond = audio,
|
164 |
+
text = final_text_list,
|
165 |
+
duration = duration,
|
166 |
+
steps = nfe_step,
|
167 |
+
cfg_strength = cfg_strength,
|
168 |
+
sway_sampling_coef = sway_sampling_coef,
|
169 |
+
seed = seed,
|
170 |
+
edit_mask = edit_mask,
|
171 |
+
)
|
172 |
+
print(f"Generated mel: {generated.shape}")
|
173 |
+
|
174 |
+
# Final result
|
175 |
+
generated = generated[:, ref_audio_len:, :]
|
176 |
+
generated_mel_spec = rearrange(generated, '1 n d -> 1 d n')
|
177 |
+
generated_wave = vocos.decode(generated_mel_spec.cpu())
|
178 |
+
if rms < target_rms:
|
179 |
+
generated_wave = generated_wave * rms / target_rms
|
180 |
+
|
181 |
+
save_spectrogram(generated_mel_spec[0].cpu().numpy(), f"{output_dir}/test_single_edit.png")
|
182 |
+
torchaudio.save(f"{output_dir}/test_single_edit.wav", generated_wave, target_sample_rate)
|
183 |
+
print(f"Generated wav: {generated_wave.shape}")
|
train.py
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from model import CFM, UNetT, DiT, MMDiT, Trainer
|
2 |
+
from model.utils import get_tokenizer
|
3 |
+
from model.dataset import load_dataset
|
4 |
+
|
5 |
+
|
6 |
+
# -------------------------- Dataset Settings --------------------------- #
|
7 |
+
|
8 |
+
target_sample_rate = 24000
|
9 |
+
n_mel_channels = 100
|
10 |
+
hop_length = 256
|
11 |
+
|
12 |
+
tokenizer = "pinyin" # 'pinyin', 'char', or 'custom'
|
13 |
+
tokenizer_path = None # if tokenizer = 'custom', define the path to the tokenizer you want to use (should be vocab.txt)
|
14 |
+
dataset_name = "Emilia_ZH_EN"
|
15 |
+
|
16 |
+
# -------------------------- Training Settings -------------------------- #
|
17 |
+
|
18 |
+
exp_name = "F5TTS_Base" # F5TTS_Base | E2TTS_Base
|
19 |
+
|
20 |
+
learning_rate = 7.5e-5
|
21 |
+
|
22 |
+
batch_size_per_gpu = 38400 # 8 GPUs, 8 * 38400 = 307200
|
23 |
+
batch_size_type = "frame" # "frame" or "sample"
|
24 |
+
max_samples = 64 # max sequences per batch if use frame-wise batch_size. we set 32 for small models, 64 for base models
|
25 |
+
grad_accumulation_steps = 1 # note: updates = steps / grad_accumulation_steps
|
26 |
+
max_grad_norm = 1.
|
27 |
+
|
28 |
+
epochs = 11 # use linear decay, thus epochs control the slope
|
29 |
+
num_warmup_updates = 20000 # warmup steps
|
30 |
+
save_per_updates = 50000 # save checkpoint per steps
|
31 |
+
last_per_steps = 5000 # save last checkpoint per steps
|
32 |
+
|
33 |
+
# model params
|
34 |
+
if exp_name == "F5TTS_Base":
|
35 |
+
wandb_resume_id = None
|
36 |
+
model_cls = DiT
|
37 |
+
model_cfg = dict(dim = 1024, depth = 22, heads = 16, ff_mult = 2, text_dim = 512, conv_layers = 4)
|
38 |
+
elif exp_name == "E2TTS_Base":
|
39 |
+
wandb_resume_id = None
|
40 |
+
model_cls = UNetT
|
41 |
+
model_cfg = dict(dim = 1024, depth = 24, heads = 16, ff_mult = 4)
|
42 |
+
|
43 |
+
|
44 |
+
# ----------------------------------------------------------------------- #
|
45 |
+
|
46 |
+
def main():
|
47 |
+
if tokenizer == "custom":
|
48 |
+
tokenizer_path = tokenizer_path
|
49 |
+
else:
|
50 |
+
tokenizer_path = dataset_name
|
51 |
+
vocab_char_map, vocab_size = get_tokenizer(tokenizer_path, tokenizer)
|
52 |
+
|
53 |
+
mel_spec_kwargs = dict(
|
54 |
+
target_sample_rate = target_sample_rate,
|
55 |
+
n_mel_channels = n_mel_channels,
|
56 |
+
hop_length = hop_length,
|
57 |
+
)
|
58 |
+
|
59 |
+
e2tts = CFM(
|
60 |
+
transformer = model_cls(
|
61 |
+
**model_cfg,
|
62 |
+
text_num_embeds = vocab_size,
|
63 |
+
mel_dim = n_mel_channels
|
64 |
+
),
|
65 |
+
mel_spec_kwargs = mel_spec_kwargs,
|
66 |
+
vocab_char_map = vocab_char_map,
|
67 |
+
)
|
68 |
+
|
69 |
+
trainer = Trainer(
|
70 |
+
e2tts,
|
71 |
+
epochs,
|
72 |
+
learning_rate,
|
73 |
+
num_warmup_updates = num_warmup_updates,
|
74 |
+
save_per_updates = save_per_updates,
|
75 |
+
checkpoint_path = f'ckpts/{exp_name}',
|
76 |
+
batch_size = batch_size_per_gpu,
|
77 |
+
batch_size_type = batch_size_type,
|
78 |
+
max_samples = max_samples,
|
79 |
+
grad_accumulation_steps = grad_accumulation_steps,
|
80 |
+
max_grad_norm = max_grad_norm,
|
81 |
+
wandb_project = "CFM-TTS",
|
82 |
+
wandb_run_name = exp_name,
|
83 |
+
wandb_resume_id = wandb_resume_id,
|
84 |
+
last_per_steps = last_per_steps,
|
85 |
+
)
|
86 |
+
|
87 |
+
train_dataset = load_dataset(dataset_name, tokenizer, mel_spec_kwargs=mel_spec_kwargs)
|
88 |
+
trainer.train(train_dataset,
|
89 |
+
resumable_with_seed = 666 # seed for shuffling dataset
|
90 |
+
)
|
91 |
+
|
92 |
+
|
93 |
+
if __name__ == '__main__':
|
94 |
+
main()
|