Spaces:
Runtime error
Runtime error
import gradio as gr | |
from transformers import pipeline | |
from datasets import load_dataset | |
import soundfile as sf | |
import torch | |
import os | |
import io | |
import base64 | |
import numpy as np | |
from pydub import AudioSegment | |
os.environ['TRANSFORMERS_CACHE'] = '.cache' | |
print ("----- setting up pipeline -----") | |
synthesiser = pipeline("text-to-speech", "microsoft/speecht5_tts") | |
print ("----- setting up dataset -----") | |
embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation") | |
speaker_embedding = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0) | |
# You can replace this embedding with your own as well. | |
print ("----- synthetizing audio -----") | |
#speech = synthesiser("Hello, my dog is cooler than you!", forward_params={"speaker_embeddings": speaker_embedding}) | |
#sf.write("speech.wav", speech["audio"], samplerate=speech["sampling_rate"]) | |
def greet(name): | |
return "Hello " + name + "!!" | |
def synthesise_audio(text, forward_params=None): | |
if len(text) > 100: | |
raise ValueError("Error: El texto es demasiado largo. Por favor, limita tu entrada a 100 caracteres.") | |
speech = synthesiser(text, forward_params={"speaker_embeddings": speaker_embedding}) | |
sf.write("speech.wav", speech["audio"], samplerate=speech["sampling_rate"]) | |
return "speech.wav" | |
# sf.write("speech.wav", speech["audio"], samplerate=speech["sampling_rate"]) | |
# return "speech.wav" | |
# Convert numpy array to audio | |
#with io.BytesIO() as f: | |
# sf.write(f, speech["audio"], samplerate=speech["sampling_rate"], format='wav') | |
# audio = f.getvalue() | |
# Convert numpy array to audio | |
#audio = np.int16(speech["audio"] * 32767) | |
#audio_segment = AudioSegment(audio, sample_width=2, frame_rate=speech["sampling_rate"], channels=1) | |
# Convert numpy array to list | |
#audio = speech["audio"] | |
#return speech["audio"] | |
#return audio | |
# Ensure audio is a numpy array | |
#if isinstance(speech["audio"], int): | |
# audio = np.array([speech["audio"]]) | |
#else: | |
# audio = speech["audio"] | |
# Create an in-memory buffer to store the audio data | |
#print("Creating in-memory buffer") | |
#audio_buffer = io.BytesIO() | |
# Write the audio data to the in-memory buffer | |
#print("Writing audio data to in-memory buffer") | |
#sf.write(audio_buffer, speech["audio"], samplerate=speech["sampling_rate"], format="WAV") | |
# Move the buffer cursor to the beginning of the buffer | |
#audio_buffer.seek(0) | |
# Read the audio data from the in-memory buffer into a numpy array | |
#print("Reading audio data from in-memory buffer") | |
#audio, sr = sf.read(audio_buffer) | |
#print("Audio data read from in-memory buffer, returning audio data and sample rate") | |
# Ensure audio is a numpy array before returning | |
#audio = np.array(audio) | |
#return audio, sr | |
#demo = gr.Interface(fn=greet, inputs="text", outputs="text", description="----- TTS Testing -----") | |
input_text = gr.Textbox(lines=10, label="Type the text you want to convert to speech:") | |
#examples = gr.Examples(inputs=[["Feliz cumpleaños Nuria"]], outputs=[[]]) | |
demo = gr.Interface(fn=synthesise_audio, | |
inputs=input_text, | |
outputs="audio", | |
#outputs = gr.Audio(type="numpy"), | |
description="----- manuai Text To Speech generator test -----", | |
allow_flagging = False) | |
demo.launch(debug = True) | |