Spaces:
Running
Running
import gradio as gr | |
import random | |
import time | |
from datetime import datetime | |
import tempfile | |
import os | |
import edge_tts | |
import asyncio | |
import warnings | |
from gradio_client import Client | |
import json | |
import pytz | |
import re | |
warnings.filterwarnings('ignore') | |
# Initialize the Gradio client for model access | |
def initialize_clients(): | |
try: | |
client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern") | |
return client | |
except Exception as e: | |
print(f"Error initializing client: {str(e)}") | |
return None | |
if "client" not in locals(): | |
CLIENT = initialize_clients() | |
# Helper function to generate a filename | |
def gen_AI_IO_filename(display_query, output): | |
now_central = datetime.now(pytz.timezone("America/Chicago")) | |
timestamp = now_central.strftime("%Y-%m-%d-%I-%M-%S-%f-%p") | |
display_query = display_query[:50] | |
output_snippet = re.sub(r'[^A-Za-z0-9]+', '_', output[:100]) | |
filename = f"{timestamp} - {display_query} - {output_snippet}.md" | |
return filename | |
def create_file(filename, prompt, response, should_save=True): | |
"""Create and save a file with prompt and response""" | |
if not should_save: | |
return | |
with open(filename, 'w', encoding='utf-8') as file: | |
file.write(f"Prompt:\n{prompt}\n\nResponse:\n{response}") | |
async def generate_speech(text, voice="en-US-AriaNeural"): | |
"""Generate speech from text using edge-tts""" | |
try: | |
communicate = edge_tts.Communicate(text, voice) | |
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as tmp_file: | |
tmp_path = tmp_file.name | |
await communicate.save(tmp_path) | |
return tmp_path | |
except Exception as e: | |
print(f"Error in text2speech: {str(e)}") | |
return None | |
def generate_story(prompt, model_choice): | |
"""Generate story using specified model through ArXiv RAG pattern""" | |
try: | |
if CLIENT is None: | |
return "Error: Story generation service is not available." | |
# First pass: Generate initial story with chosen model | |
initial_result = CLIENT.predict( | |
prompt=prompt, | |
llm_model_picked=model_choice, | |
stream_outputs=True, | |
api_name="/ask_llm" | |
) | |
# Second pass: Enhance with RAG pattern | |
enhanced_result = CLIENT.predict( | |
message=prompt, | |
llm_results_use=10, | |
database_choice="Semantic Search", | |
llm_model_picked=model_choice, | |
api_name="/update_with_rag_md" | |
) | |
# Combine results and save | |
story = initial_result + "\n\nEnhanced version:\n" + enhanced_result[0] | |
# Save outputs | |
filename = gen_AI_IO_filename("Story", initial_result) | |
create_file(filename, prompt, initial_result) | |
filename = gen_AI_IO_filename("Enhanced", enhanced_result[0]) | |
create_file(filename, prompt, enhanced_result[0]) | |
return story | |
except Exception as e: | |
return f"Error generating story: {str(e)}" | |
def story_generator_interface(prompt, genre, structure, model_choice, num_scenes, words_per_scene): | |
"""Main story generation and audio creation function""" | |
try: | |
# Create storytelling prompt | |
story_prompt = f"""Create a {genre} story following this structure: {structure} | |
Base concept: {prompt} | |
Make it engaging and suitable for narration. | |
Include vivid descriptions and sensory details. | |
Use approximately {words_per_scene} words per scene. | |
Create {num_scenes} distinct scenes.""" | |
# Generate story | |
story = generate_story(story_prompt, model_choice) | |
if story.startswith("Error"): | |
return story, None | |
# Generate speech | |
audio_path = asyncio.run(generate_speech(story)) | |
return story, audio_path | |
except Exception as e: | |
error_msg = f"An error occurred: {str(e)}" | |
return error_msg, None | |
# Create Gradio interface | |
with gr.Blocks(title="AI Story Generator", theme=gr.themes.Soft()) as demo: | |
gr.Markdown(""" | |
# ๐ญ AI Story Generator | |
Generate creative stories with AI and listen to them! Using Mistral and Mixtral models. | |
""") | |
with gr.Row(): | |
with gr.Column(): | |
prompt_input = gr.Textbox( | |
label="Story Concept", | |
placeholder="Enter your story idea...", | |
lines=3 | |
) | |
genre_input = gr.Dropdown( | |
label="Genre", | |
choices=[ | |
"Science Fiction", | |
"Fantasy", | |
"Mystery", | |
"Romance", | |
"Horror", | |
"Adventure", | |
"Historical Fiction", | |
"Comedy" | |
], | |
value="Fantasy" | |
) | |
structure_input = gr.Dropdown( | |
label="Story Structure", | |
choices=[ | |
"Three Act (Setup -> Confrontation -> Resolution)", | |
"Hero's Journey (Call -> Adventure -> Return)", | |
"Five Act (Exposition -> Rising Action -> Climax -> Falling Action -> Resolution)" | |
], | |
value="Three Act (Setup -> Confrontation -> Resolution)" | |
) | |
model_choice = gr.Dropdown( | |
label="Model", | |
choices=[ | |
"mistralai/Mixtral-8x7B-Instruct-v0.1", | |
"mistralai/Mistral-7B-Instruct-v0.2" | |
], | |
value="mistralai/Mixtral-8x7B-Instruct-v0.1" | |
) | |
num_scenes = gr.Slider( | |
label="Number of Scenes", | |
minimum=3, | |
maximum=7, | |
value=5, | |
step=1 | |
) | |
words_per_scene = gr.Slider( | |
label="Words per Scene", | |
minimum=20, | |
maximum=100, | |
value=50, | |
step=10 | |
) | |
generate_btn = gr.Button("Generate Story") | |
with gr.Row(): | |
with gr.Column(): | |
story_output = gr.Textbox( | |
label="Generated Story", | |
lines=10, | |
interactive=False | |
) | |
with gr.Row(): | |
audio_output = gr.Audio( | |
label="Story Narration", | |
type="filepath" | |
) | |
generate_btn.click( | |
fn=story_generator_interface, | |
inputs=[ | |
prompt_input, | |
genre_input, | |
structure_input, | |
model_choice, | |
num_scenes, | |
words_per_scene | |
], | |
outputs=[ | |
story_output, | |
audio_output | |
] | |
) | |
if __name__ == "__main__": | |
demo.launch( | |
debug=True, | |
share=True, | |
server_name="0.0.0.0", | |
server_port=7860 | |
) |