Spaces:
Running
Running
import gradio as gr | |
import random | |
from datetime import datetime | |
import tempfile | |
import os | |
import edge_tts | |
import asyncio | |
import warnings | |
import pytz | |
import re | |
import json | |
import pandas as pd | |
from pathlib import Path | |
from gradio_client import Client | |
warnings.filterwarnings('ignore') | |
# Initialize constants | |
PAGE_SIZE = 10 | |
FILE_DIR_PATH = "gallery" | |
# Initialize story starters | |
STORY_STARTERS = [ | |
['Adventure', 'In a hidden temple deep in the Amazon...'], | |
['Mystery', 'The detective found an unusual note...'], | |
['Romance', 'Two strangers meet on a rainy evening...'], | |
['Sci-Fi', 'The space station received an unexpected signal...'], | |
['Fantasy', 'A magical portal appeared in the garden...'], | |
['Comedy-Sitcom', 'The new roommate arrived with seven emotional support animals...'], | |
['Comedy-Workplace', 'The office printer started sending mysterious messages...'], | |
['Comedy-Family', 'Grandma decided to become a social media influencer...'], | |
['Comedy-Supernatural', 'The ghost haunting the house was absolutely terrible at scaring people...'], | |
['Comedy-Travel', 'The GPS insisted on giving directions in interpretive dance descriptions...'] | |
] | |
# Initialize client outside of interface definition | |
arxiv_client = None | |
def init_client(): | |
global arxiv_client | |
if arxiv_client is None: | |
arxiv_client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern") | |
return arxiv_client | |
def save_story(story, audio_path): | |
"""Save story and audio to gallery""" | |
try: | |
# Create gallery directory if it doesn't exist | |
gallery_dir = Path(FILE_DIR_PATH) | |
gallery_dir.mkdir(exist_ok=True) | |
# Generate timestamp for unique filename | |
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") | |
# Get first line for title | |
first_line = story.split('\n')[0].strip() | |
safe_name = re.sub(r'[^\w\s-]', '', first_line)[:50] | |
# Save story text as markdown | |
story_path = gallery_dir / f"story_{timestamp}_{safe_name}.md" | |
with open(story_path, "w") as f: | |
f.write(f"# {first_line}\n\n{story}") | |
# Copy audio file to gallery | |
new_audio_path = None | |
if audio_path: | |
new_audio_path = gallery_dir / f"audio_{timestamp}_{safe_name}.mp3" | |
os.system(f"cp {audio_path} {str(new_audio_path)}") | |
return str(story_path), str(new_audio_path) if new_audio_path else None | |
except Exception as e: | |
print(f"Error saving to gallery: {str(e)}") | |
return None, None | |
def list_all_outputs(generation_history): | |
"""Load all story generations for community view""" | |
try: | |
directory_path = FILE_DIR_PATH | |
if not os.path.exists(directory_path): | |
return "", gr.update(visible=True) | |
# Get all matched pairs of story/audio files | |
file_pairs = [] | |
for story_file in Path(directory_path).glob("story_*.md"): | |
timestamp = story_file.stem.split('_')[1] | |
audio_pattern = f"audio_{timestamp}_*.mp3" | |
audio_files = list(Path(directory_path).glob(audio_pattern)) | |
if audio_files: # Only include if we have both story and audio | |
file_pairs.append((story_file, audio_files[0])) | |
# Sort by modification time, newest first | |
file_pairs.sort(key=lambda x: os.path.getmtime(x[0]), reverse=True) | |
history_list = generation_history.split(',') if generation_history else [] | |
updated_files = [str(audio) for _, audio in file_pairs if str(audio) not in history_list] | |
updated_history = updated_files + history_list | |
return ','.join(updated_history), gr.update(visible=True) | |
except Exception as e: | |
print(f"Error loading community generations: {str(e)}") | |
return "", gr.update(visible=True) | |
def increase_list_size(list_size): | |
"""Increase the number of visible community generations""" | |
return list_size + PAGE_SIZE | |
def generate_story(prompt, model_choice): | |
"""Generate story using specified model""" | |
try: | |
client = init_client() | |
if client is None: | |
return "Error: Story generation service is not available." | |
result = client.predict( | |
prompt=prompt, | |
llm_model_picked=model_choice, | |
stream_outputs=True, | |
api_name="/ask_llm" | |
) | |
return result | |
except Exception as e: | |
return f"Error generating story: {str(e)}" | |
async def generate_speech(text, voice="en-US-AriaNeural"): | |
"""Generate speech from text""" | |
try: | |
communicate = edge_tts.Communicate(text, voice) | |
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as tmp_file: | |
tmp_path = tmp_file.name | |
await communicate.save(tmp_path) | |
return tmp_path | |
except Exception as e: | |
print(f"Error in text2speech: {str(e)}") | |
return None | |
def process_story_and_audio(prompt, model_choice): | |
"""Process story, generate audio, and save to gallery""" | |
try: | |
# Generate story | |
story = generate_story(prompt, model_choice) | |
if isinstance(story, str) and story.startswith("Error"): | |
return story, None | |
# Generate audio | |
audio_path = asyncio.run(generate_speech(story)) | |
# Save to gallery | |
story_path, saved_audio_path = save_story(story, audio_path) | |
return story, audio_path | |
except Exception as e: | |
return f"Error: {str(e)}", None | |
# Add CSS for community generations | |
css = ''' | |
#live_gen:before { | |
content: ''; | |
animation: svelte-z7cif2-pulseStart 1s cubic-bezier(.4,0,.6,1), svelte-z7cif2-pulse 2s cubic-bezier(.4,0,.6,1) 1s infinite; | |
border: 2px solid var(--color-accent); | |
background: transparent; | |
z-index: var(--layer-1); | |
pointer-events: none; | |
position: absolute; | |
height: 100%; | |
width: 100%; | |
border-radius: 7px; | |
} | |
#live_gen_items{ | |
max-height: 570px; | |
overflow-y: scroll; | |
} | |
''' | |
# Create the Gradio interface | |
with gr.Blocks(title="AI Story Generator", css=css) as demo: | |
gr.Markdown(""" | |
# π AI Story Generator & Narrator | |
Generate creative stories, listen to them, and build your gallery! | |
""") | |
# Add hidden state for community generations | |
generation_history = gr.Textbox(visible=False) | |
list_size = gr.Number(value=PAGE_SIZE, visible=False) | |
with gr.Row(): | |
with gr.Column(scale=3): | |
with gr.Row(): | |
prompt_input = gr.Textbox( | |
label="Story Concept", | |
placeholder="Enter your story idea...", | |
lines=3 | |
) | |
with gr.Row(): | |
model_choice = gr.Dropdown( | |
label="Model", | |
choices=[ | |
"mistralai/Mixtral-8x7B-Instruct-v0.1", | |
"mistralai/Mistral-7B-Instruct-v0.2" | |
], | |
value="mistralai/Mixtral-8x7B-Instruct-v0.1" | |
) | |
generate_btn = gr.Button("Generate Story") | |
with gr.Row(): | |
story_output = gr.Textbox( | |
label="Generated Story", | |
lines=10, | |
interactive=False | |
) | |
with gr.Row(): | |
audio_output = gr.Audio( | |
label="Story Narration", | |
type="filepath" | |
) | |
# Sidebar with Story Starters and Community Generations | |
with gr.Column(scale=1): | |
gr.Markdown("### π Story Starters") | |
story_starters = gr.Dataframe( | |
value=STORY_STARTERS, | |
headers=["Category", "Starter"], | |
interactive=False | |
) | |
# Community Generations section | |
with gr.Column(elem_id="live_gen") as community_list: | |
gr.Markdown("### π¬ Community Stories") | |
with gr.Column(elem_id="live_gen_items"): | |
def show_output_list(generation_history, list_size): | |
history_list = generation_history.split(',') if generation_history else [] | |
history_list_latest = history_list[:list_size] | |
for audio_path in history_list_latest: | |
if not audio_path or not os.path.exists(audio_path): | |
continue | |
try: | |
# Get corresponding story file | |
story_path = audio_path.replace('audio_', 'story_').replace('.mp3', '.md') | |
if not os.path.exists(story_path): | |
continue | |
# Read story content | |
with open(story_path, 'r') as file: | |
story_content = file.read() | |
# Extract title from markdown | |
title = story_content.split('\n')[0].replace('# ', '') | |
with gr.Group(): | |
gr.Markdown(value=f"### {title}") | |
gr.Audio(value=audio_path) | |
except Exception as e: | |
print(f"Error showing generation: {str(e)}") | |
continue | |
load_more = gr.Button("Load More Stories") | |
load_more.click(fn=increase_list_size, inputs=list_size, outputs=list_size) | |
# Event handlers | |
def update_prompt(evt: gr.SelectData): | |
return STORY_STARTERS[evt.index[0]][1] | |
story_starters.select(update_prompt, None, prompt_input) | |
generate_btn.click( | |
fn=process_story_and_audio, | |
inputs=[prompt_input, model_choice], | |
outputs=[story_output, audio_output] | |
) | |
# Auto-refresh for community generations | |
demo.load(fn=list_all_outputs, inputs=generation_history, outputs=[generation_history, community_list], every=2) | |
if __name__ == "__main__": | |
demo.launch() |