|
|
|
pip install gradio |
|
|
|
|
|
import gradio as gr |
|
import json |
|
import torch |
|
from transformers import SpeechT5Processor, SpeechT5ForTextToSpeech, SpeechT5HifiGan |
|
from datasets import load_dataset |
|
import soundfile as sf |
|
|
|
|
|
processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts") |
|
model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts") |
|
vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan") |
|
|
|
|
|
with open("/content/pronunciation_dict.json", "r") as f: |
|
pronunciation_dict = json.load(f) |
|
|
|
|
|
def preprocess_text(text): |
|
for term, phonetic in pronunciation_dict.items(): |
|
text = text.replace(term, phonetic) |
|
return text |
|
|
|
|
|
def text_to_speech(input_text): |
|
|
|
processed_text = preprocess_text(input_text) |
|
|
|
|
|
inputs = processor(text=processed_text, return_tensors="pt") |
|
|
|
|
|
embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation") |
|
speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0) |
|
|
|
|
|
speech = model.generate_speech(inputs["input_ids"], speaker_embeddings, vocoder=vocoder) |
|
|
|
|
|
output_file = "speech_output.wav" |
|
sf.write(output_file, speech.numpy(), samplerate=16000) |
|
|
|
return output_file |
|
|
|
|
|
examples = [ |
|
"We are using APIs and OAuth for authentication.", |
|
"CUDA and TensorFlow work together for deep learning models.", |
|
"The database uses NoSQL and supports JSON for data storage.", |
|
"Machine learning and artificial intelligence are advancing fast.", |
|
"Natural language processing techniques like GPT are widely adopted." |
|
] |
|
|
|
iface = gr.Interface( |
|
fn=text_to_speech, |
|
inputs="text", |
|
outputs="audio", |
|
title="Text-to-Speech (TTS) Application", |
|
description="Enter text with technical jargon for TTS conversion.", |
|
examples=examples |
|
) |
|
|
|
|
|
iface.launch(share=True) |
|
|