ylacombe's picture
Update app.py
5f88d62 verified
raw
history blame
10 kB
import spaces
import gradio as gr
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.models.speecht5.number_normalizer import EnglishNumberNormalizer
from string import punctuation
import re
from parler_tts import ParlerTTSForConditionalGeneration
from transformers import AutoTokenizer, AutoFeatureExtractor, set_seed
from transformers import pipeline
# Device setup
device = "cuda:0" if torch.cuda.is_available() else "cpu"
# Gemma setup
pipe = pipeline(
"text-generation",
model="google/gemma-2-2b-it",
model_kwargs={"torch_dtype": torch.bfloat16},
device=device
)
# Original model setup
repo_id = "parler-tts/parler-tts-mini-multilingual"
model = ParlerTTSForConditionalGeneration.from_pretrained(repo_id).to(device)
text_tokenizer = AutoTokenizer.from_pretrained(repo_id)
description_tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-large")
feature_extractor = AutoFeatureExtractor.from_pretrained(repo_id)
SAMPLE_RATE = feature_extractor.sampling_rate
SEED = 42
default_text = "La voix humaine est un instrument de musique au-dessus de tous les autres."
default_description = "a woman with a slightly low- pitched voice speaks slowly in a clear and close- sounding environment, but her delivery is quite monotone."
examples = [
# English
[
"The human voice is nature's most perfect instrument.",
"Aa woman with a slightly low- pitched voice speaks slowly in a very distant- sounding environment with a clean audio quality, delivering her message in a very monotone manner.",
None,
],
# French
[
"La voix humaine est un instrument de musique au-dessus de tous les autres.",
"a woman with a slightly low- pitched voice speaks slowly in a clear and close- sounding environment, but her delivery is quite monotone.",
None,
],
# Spanish
[
"La voz es el reflejo del alma en el espejo del tiempo.",
"a man with a moderate pitch voice speaks slowly with a slightly animated delivery in a very close- sounding environment with minimal background noise.",
None,
],
# Italian
[
"La voce umana è la più bella musica che esista al mondo.",
"a man with a moderate pitch speaks slowly in a very noisy environment that sounds very distant, delivering his words in a monotone manner.",
None,
],
# Portuguese
[
"A voz é o espelho da alma e o som do coração.",
"a man speaks slowly in a distant- sounding environment with a clean audio quality, delivering his message in a monotone voice at a moderate pitch. ",
None,
],
# Polish
[
"Głos ludzki jest najpiękniejszym instrumentem świata.",
"a man with a moderate pitch speaks in a monotone manner at a slightly slow pace, but the recording is quite noisy and sounds very distant.",
None,
],
# German
[
"Die menschliche Stimme ist das schönste Instrument der Welt.",
"a man with a moderate pitch speaks slowly in a noisy environment with a flat tone of voice, creating a slightly close- sounding effect.",
None,
],
# Dutch
[
"De menselijke stem is het mooiste instrument dat er bestaat.",
"a man with a moderate pitch speaks slightly slowly with an expressive and animated delivery in a very close- sounding environment with a bit of background noise.",
None,
]
]
number_normalizer = EnglishNumberNormalizer()
def format_description(raw_description, do_format=True):
if not do_format:
return raw_description
# Extract defaults from the raw description or use fallbacks
defaults = {
"gender": "woman" if "woman" in raw_description.lower() else "man",
"pitch": "moderate pitch",
"speed": "slowly",
"environment": "close-sounding and clear",
"delivery": "with monotone delivery"
}
messages = [{
"role": "user",
"content": f"""Format this voice description and fill in any missing parameters with defaults:
"a [gender] with a [pitch] voice speaks [speed] in a [environment], [delivery]"
Required parameters (use these exact terms):
- gender: {defaults['gender']} if not specified
- pitch: {defaults['pitch']} if not specified
- speed: {defaults['speed']} if not specified
- environment: {defaults['environment']} if not specified
- delivery: {defaults['delivery']} if not specified
Input: {raw_description}
Return only the formatted description, nothing else."""
}]
outputs = pipe(messages, max_new_tokens=100)
formatted = outputs[0]["generated_text"][-1]["content"].strip()
if "a woman" in formatted.lower() or "a man" in formatted.lower():
return formatted
return raw_description
def preprocess(text):
text = number_normalizer(text).strip()
text = text.replace("-", " ")
if text[-1] not in punctuation:
text = f"{text}."
abbreviations_pattern = r'\b[A-Z][A-Z\.]+\b'
def separate_abb(chunk):
chunk = chunk.replace(".","")
return " ".join(chunk)
abbreviations = re.findall(abbreviations_pattern, text)
for abv in abbreviations:
if abv in text:
text = text.replace(abv, separate_abb(abv))
return text
@spaces.GPU
def gen_tts(text, description, do_format=True):
formatted_desc = format_description(description, do_format)
inputs = description_tokenizer(formatted_desc.strip(), return_tensors="pt").to(device)
prompt = text_tokenizer(preprocess(text), return_tensors="pt").to(device)
set_seed(SEED)
generation = model.generate(
input_ids=inputs.input_ids,
prompt_input_ids=prompt.input_ids,
attention_mask=inputs.attention_mask,
prompt_attention_mask=prompt.attention_mask,
do_sample=True,
temperature=1.0
)
audio_arr = generation.cpu().numpy().squeeze()
return formatted_desc, (SAMPLE_RATE, audio_arr)
# Rest of the code remains unchanged
css = """
#share-btn-container {
display: flex;
padding-left: 0.5rem !important;
padding-right: 0.5rem !important;
background-color: #000000;
justify-content: center;
align-items: center;
border-radius: 9999px !important;
width: 13rem;
margin-top: 10px;
margin-left: auto;
flex: unset !important;
}
#share-btn {
all: initial;
color: #ffffff;
font-weight: 600;
cursor: pointer;
font-family: 'IBM Plex Sans', sans-serif;
margin-left: 0.5rem !important;
padding-top: 0.25rem !important;
padding-bottom: 0.25rem !important;
right:0;
}
#share-btn * {
all: unset !important;
}
#share-btn-container div:nth-child(-n+2){
width: auto !important;
min-height: 0px !important;
}
#share-btn-container .wrap {
display: none !important;
}
"""
with gr.Blocks(css=css) as block:
gr.HTML(
"""
<div style="text-align: center; max-width: 700px; margin: 0 auto;">
<div style="display: inline-flex; align-items: center; gap: 0.8rem; font-size: 1.75rem;">
<h1 style="font-weight: 900; margin-bottom: 7px; line-height: normal;">
Multi Parler-TTS 🗣️
</h1>
</div>
</div>
"""
)
gr.HTML(
"""<p><a href="https://github.com/huggingface/parler-tts">Parler-TTS</a> is a training and inference library for
high-fidelity text-to-speech (TTS) models.</p>
<p>This <a href="https://huggingface.co/parler-tts/parler-tts-mini-multilingual">multilingual model</a> supports French, Spanish, Italian, Portuguese, Polish, German, Dutch, and English. It generates high-quality speech with features that can be controlled using a simple text prompt.</p>
<p>By default, Parler-TTS generates 🎲 random voice characteristics. To ensure 🎯 <b>speaker consistency</b> across generations, try to use consistent descriptions in your prompts.</p>"""
)
with gr.Row():
with gr.Column():
input_text = gr.Textbox(
label="Input Text",
lines=2,
value=default_text
)
raw_description = gr.Textbox(
label="Voice Description",
lines=2,
value=default_description
)
do_format = gr.Checkbox(
label="Reformat description using Gemma 2b",
value=True
)
formatted_description = gr.Textbox(
label="Used Description",
lines=2
)
generate_button = gr.Button("Generate Audio", variant="primary")
with gr.Column():
audio_out = gr.Audio(label="Parler-TTS generation", type="numpy")
generate_button.click(
fn=gen_tts,
inputs=[input_text, raw_description, do_format],
outputs=[formatted_description, audio_out]
)
gr.Examples(
examples=examples,
fn=gen_tts,
inputs=[input_text, raw_description, do_format],
outputs=[formatted_description, audio_out],
cache_examples=True
)
gr.HTML(
"""<p>Tips for ensuring good generation:
<ul>
<li>Include the term "very clear audio" to generate the highest quality audio, and "very noisy audio" for high levels of background noise</li>
<li>Punctuation can be used to control the prosody of the generations</li>
<li>The remaining speech features (gender, speaking rate, pitch and reverberation) can be controlled directly through the prompt</li>
</ul>
</p>"""
)
block.queue()
block.launch(share=True)