Spaces:
Running
Running
File size: 10,524 Bytes
a0ebdf1 003b1ad 94a8dcd 003b1ad a0ebdf1 003b1ad a0ebdf1 003b1ad a0ebdf1 94a8dcd a0ebdf1 94a8dcd a0ebdf1 94a8dcd a74315c 94a8dcd a0ebdf1 94a8dcd a0ebdf1 94a8dcd a0ebdf1 94a8dcd a0ebdf1 94a8dcd a0ebdf1 94a8dcd a0ebdf1 94a8dcd 003b1ad 94a8dcd a74315c 003b1ad 94a8dcd a0ebdf1 94a8dcd a0ebdf1 003b1ad a0ebdf1 003b1ad 94a8dcd 003b1ad 94a8dcd a0ebdf1 003b1ad a0ebdf1 003b1ad a0ebdf1 a74315c 003b1ad 94a8dcd a0ebdf1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 |
import gradio as gr
import random
from datetime import datetime
import tempfile
import os
import edge_tts
import asyncio
import warnings
import pytz
import re
import json
import pandas as pd
from pathlib import Path
from gradio_client import Client
warnings.filterwarnings('ignore')
# Initialize constants
PAGE_SIZE = 10
FILE_DIR_PATH = "gallery"
# Initialize story starters
STORY_STARTERS = [
['Adventure', 'In a hidden temple deep in the Amazon...'],
['Mystery', 'The detective found an unusual note...'],
['Romance', 'Two strangers meet on a rainy evening...'],
['Sci-Fi', 'The space station received an unexpected signal...'],
['Fantasy', 'A magical portal appeared in the garden...'],
['Comedy-Sitcom', 'The new roommate arrived with seven emotional support animals...'],
['Comedy-Workplace', 'The office printer started sending mysterious messages...'],
['Comedy-Family', 'Grandma decided to become a social media influencer...'],
['Comedy-Supernatural', 'The ghost haunting the house was absolutely terrible at scaring people...'],
['Comedy-Travel', 'The GPS insisted on giving directions in interpretive dance descriptions...']
]
# Initialize client outside of interface definition
arxiv_client = None
def init_client():
global arxiv_client
if arxiv_client is None:
arxiv_client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
return arxiv_client
def save_story(story, audio_path):
"""Save story and audio to gallery"""
try:
# Create gallery directory if it doesn't exist
gallery_dir = Path(FILE_DIR_PATH)
gallery_dir.mkdir(exist_ok=True)
# Generate timestamp for unique filename
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
# Get first line for title
first_line = story.split('\n')[0].strip()
safe_name = re.sub(r'[^\w\s-]', '', first_line)[:50]
# Save story text as markdown
story_path = gallery_dir / f"story_{timestamp}_{safe_name}.md"
with open(story_path, "w") as f:
f.write(f"# {first_line}\n\n{story}")
# Copy audio file to gallery
new_audio_path = None
if audio_path:
new_audio_path = gallery_dir / f"audio_{timestamp}_{safe_name}.mp3"
os.system(f"cp {audio_path} {str(new_audio_path)}")
return str(story_path), str(new_audio_path) if new_audio_path else None
except Exception as e:
print(f"Error saving to gallery: {str(e)}")
return None, None
def list_all_outputs(generation_history):
"""Load all story generations for community view"""
try:
directory_path = FILE_DIR_PATH
if not os.path.exists(directory_path):
return "", gr.update(visible=True)
# Get all matched pairs of story/audio files
file_pairs = []
for story_file in Path(directory_path).glob("story_*.md"):
timestamp = story_file.stem.split('_')[1]
audio_pattern = f"audio_{timestamp}_*.mp3"
audio_files = list(Path(directory_path).glob(audio_pattern))
if audio_files: # Only include if we have both story and audio
file_pairs.append((story_file, audio_files[0]))
# Sort by modification time, newest first
file_pairs.sort(key=lambda x: os.path.getmtime(x[0]), reverse=True)
history_list = generation_history.split(',') if generation_history else []
updated_files = [str(audio) for _, audio in file_pairs if str(audio) not in history_list]
updated_history = updated_files + history_list
return ','.join(updated_history), gr.update(visible=True)
except Exception as e:
print(f"Error loading community generations: {str(e)}")
return "", gr.update(visible=True)
def increase_list_size(list_size):
"""Increase the number of visible community generations"""
return list_size + PAGE_SIZE
def generate_story(prompt, model_choice):
"""Generate story using specified model"""
try:
client = init_client()
if client is None:
return "Error: Story generation service is not available."
result = client.predict(
prompt=prompt,
llm_model_picked=model_choice,
stream_outputs=True,
api_name="/ask_llm"
)
return result
except Exception as e:
return f"Error generating story: {str(e)}"
async def generate_speech(text, voice="en-US-AriaNeural"):
"""Generate speech from text"""
try:
communicate = edge_tts.Communicate(text, voice)
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as tmp_file:
tmp_path = tmp_file.name
await communicate.save(tmp_path)
return tmp_path
except Exception as e:
print(f"Error in text2speech: {str(e)}")
return None
def process_story_and_audio(prompt, model_choice):
"""Process story, generate audio, and save to gallery"""
try:
# Generate story
story = generate_story(prompt, model_choice)
if isinstance(story, str) and story.startswith("Error"):
return story, None
# Generate audio
audio_path = asyncio.run(generate_speech(story))
# Save to gallery
story_path, saved_audio_path = save_story(story, audio_path)
return story, audio_path
except Exception as e:
return f"Error: {str(e)}", None
# Add CSS for community generations
css = '''
#live_gen:before {
content: '';
animation: svelte-z7cif2-pulseStart 1s cubic-bezier(.4,0,.6,1), svelte-z7cif2-pulse 2s cubic-bezier(.4,0,.6,1) 1s infinite;
border: 2px solid var(--color-accent);
background: transparent;
z-index: var(--layer-1);
pointer-events: none;
position: absolute;
height: 100%;
width: 100%;
border-radius: 7px;
}
#live_gen_items{
max-height: 570px;
overflow-y: scroll;
}
'''
# Create the Gradio interface
with gr.Blocks(title="AI Story Generator", css=css) as demo:
gr.Markdown("""
# π AI Story Generator & Narrator
Generate creative stories, listen to them, and build your gallery!
""")
# Add hidden state for community generations
generation_history = gr.Textbox(visible=False)
list_size = gr.Number(value=PAGE_SIZE, visible=False)
with gr.Row():
with gr.Column(scale=3):
with gr.Row():
prompt_input = gr.Textbox(
label="Story Concept",
placeholder="Enter your story idea...",
lines=3
)
with gr.Row():
model_choice = gr.Dropdown(
label="Model",
choices=[
"mistralai/Mixtral-8x7B-Instruct-v0.1",
"mistralai/Mistral-7B-Instruct-v0.2"
],
value="mistralai/Mixtral-8x7B-Instruct-v0.1"
)
generate_btn = gr.Button("Generate Story")
with gr.Row():
story_output = gr.Textbox(
label="Generated Story",
lines=10,
interactive=False
)
with gr.Row():
audio_output = gr.Audio(
label="Story Narration",
type="filepath"
)
# Sidebar with Story Starters and Community Generations
with gr.Column(scale=1):
gr.Markdown("### π Story Starters")
story_starters = gr.Dataframe(
value=STORY_STARTERS,
headers=["Category", "Starter"],
interactive=False
)
# Community Generations section
with gr.Column(elem_id="live_gen") as community_list:
gr.Markdown("### π¬ Community Stories")
with gr.Column(elem_id="live_gen_items"):
@gr.render(inputs=[generation_history, list_size])
def show_output_list(generation_history, list_size):
history_list = generation_history.split(',') if generation_history else []
history_list_latest = history_list[:list_size]
for audio_path in history_list_latest:
if not audio_path or not os.path.exists(audio_path):
continue
try:
# Get corresponding story file
story_path = audio_path.replace('audio_', 'story_').replace('.mp3', '.md')
if not os.path.exists(story_path):
continue
# Read story content
with open(story_path, 'r') as file:
story_content = file.read()
# Extract title from markdown
title = story_content.split('\n')[0].replace('# ', '')
with gr.Group():
gr.Markdown(value=f"### {title}")
gr.Audio(value=audio_path)
except Exception as e:
print(f"Error showing generation: {str(e)}")
continue
load_more = gr.Button("Load More Stories")
load_more.click(fn=increase_list_size, inputs=list_size, outputs=list_size)
# Event handlers
def update_prompt(evt: gr.SelectData):
return STORY_STARTERS[evt.index[0]][1]
story_starters.select(update_prompt, None, prompt_input)
generate_btn.click(
fn=process_story_and_audio,
inputs=[prompt_input, model_choice],
outputs=[story_output, audio_output]
)
# Auto-refresh for community generations
demo.load(fn=list_all_outputs, inputs=generation_history, outputs=[generation_history, community_list], every=2)
if __name__ == "__main__":
demo.launch() |