Spaces:
Running
Running
import gradio as gr | |
import tempfile | |
import openai | |
import requests | |
import os | |
from functools import partial | |
def tts( | |
input_text: str, | |
model: str, | |
voice: str, | |
api_key: str, | |
response_format: str = "mp3", | |
speed: float = 1.0, | |
) -> str: | |
""" | |
Convert input text to speech using OpenAI's Text-to-Speech API. | |
(Function definition remains the same) | |
""" | |
# (Function body remains the same) | |
# ... | |
def main(): | |
""" | |
Main function to create and launch the Gradio interface. | |
""" | |
MODEL_OPTIONS = ["tts-1", "tts-1-hd"] | |
VOICE_OPTIONS = ["alloy", "echo", "fable", "onyx", "nova", "shimmer"] | |
RESPONSE_FORMAT_OPTIONS = ["mp3", "opus", "aac", "flac", "wav", "pcm"] | |
# Predefine voice previews URLs | |
VOICE_PREVIEW_URLS = { | |
voice: f"https://cdn.openai.com/API/docs/audio/{voice}.wav" | |
for voice in VOICE_OPTIONS | |
} | |
# Download audio previews to disk before initiating the interface | |
PREVIEW_DIR = "voice_previews" | |
os.makedirs(PREVIEW_DIR, exist_ok=True) | |
VOICE_PREVIEW_FILES = {} | |
for voice, url in VOICE_PREVIEW_URLS.items(): | |
local_file_path = os.path.join(PREVIEW_DIR, f"{voice}.wav") | |
if not os.path.exists(local_file_path): | |
try: | |
response = requests.get(url) | |
response.raise_for_status() | |
with open(local_file_path, "wb") as f: | |
f.write(response.content) | |
except requests.exceptions.RequestException as e: | |
print(f"Failed to download {voice} preview: {e}") | |
VOICE_PREVIEW_FILES[voice] = local_file_path | |
# Set static paths for Gradio to serve | |
gr.set_static_paths(paths=[PREVIEW_DIR]) | |
with gr.Blocks(title="OpenAI - Text to Speech") as demo: | |
gr.Markdown("# OpenAI Text-to-Speech Demo") | |
with gr.Row(): | |
with gr.Column(scale=1): | |
with gr.Group(): | |
gr.Markdown("### Voice Preview") | |
# Function to play the selected voice sample | |
def play_voice_sample(voice): | |
return gr.Audio.update( | |
value=VOICE_PREVIEW_FILES[voice], | |
label=voice.capitalize(), | |
) | |
# Create the 'preview_audio' component | |
preview_audio = gr.Audio( | |
interactive=False, | |
label="Echo", | |
value=VOICE_PREVIEW_FILES['echo'], | |
visible=True, | |
) | |
# Create buttons for each voice | |
for voice in VOICE_OPTIONS: | |
voice_button = gr.Button( | |
value=f"{voice.capitalize()}", | |
variant="secondary", | |
size="sm", | |
) | |
voice_button.click( | |
fn=partial(play_voice_sample, voice=voice), | |
outputs=preview_audio, | |
) | |
# Place the audio player below the buttons | |
preview_audio.render() | |
with gr.Column(scale=1): | |
api_key_input = gr.Textbox( | |
label="OpenAI API Key", | |
info="https://platform.openai.com/account/api-keys", | |
type="password", | |
placeholder="Enter your OpenAI API Key", | |
) | |
model_dropdown = gr.Dropdown( | |
choices=MODEL_OPTIONS, | |
label="Model", | |
value="tts-1", | |
info="Select tts-1 for speed or tts-1-hd for quality.", | |
) | |
voice_dropdown = gr.Dropdown( | |
choices=VOICE_OPTIONS, | |
label="Voice Options", | |
value="echo", | |
info="The voice to use when generating the audio.", | |
) | |
response_format_dropdown = gr.Dropdown( | |
choices=RESPONSE_FORMAT_OPTIONS, | |
label="Response Format", | |
value="mp3", | |
) | |
speed_slider = gr.Slider( | |
minimum=0.25, | |
maximum=4.0, | |
step=0.05, | |
label="Voice Speed", | |
value=1.0, | |
) | |
with gr.Column(scale=2): | |
input_textbox = gr.Textbox( | |
label="Input Text", | |
lines=10, | |
placeholder="Type your text here...", | |
) | |
# Add a character counter below the input textbox | |
char_count_text = gr.Markdown("0 / 4096") | |
# Function to update the character count | |
def update_char_count(input_text): | |
char_count = len(input_text) | |
return f"**{char_count} / 4096**" | |
# Update character count when the user stops typing | |
input_textbox.change( | |
fn=update_char_count, | |
inputs=input_textbox, | |
outputs=char_count_text, | |
) | |
submit_button = gr.Button( | |
"Convert Text to Speech", | |
variant="primary", | |
) | |
with gr.Column(scale=1): | |
output_audio = gr.Audio(label="Output Audio") | |
# Define the event handler for the submit button with error handling | |
def on_submit( | |
input_text, model, voice, api_key, response_format, speed | |
): | |
audio_file = tts( | |
input_text, model, voice, api_key, response_format, speed | |
) | |
return audio_file | |
# Trigger the conversion when the submit button is clicked | |
submit_button.click( | |
fn=on_submit, | |
inputs=[ | |
input_textbox, | |
model_dropdown, | |
voice_dropdown, | |
api_key_input, | |
response_format_dropdown, | |
speed_slider, | |
], | |
outputs=output_audio, | |
) | |
# Launch the Gradio app with error display enabled | |
demo.launch(show_error=True) | |
if __name__ == "__main__": | |
main() |