# storyverse_weaver/app.py import gradio as gr import os import time import json # For state saving/loading (conceptual) from PIL import Image, ImageDraw, ImageFont # For creating placeholder/error images import random # --- Core Logic Imports --- from core.llm_services import initialize_text_llms, is_gemini_text_ready, is_hf_text_ready, generate_text_gemini, generate_text_hf from core.image_services import initialize_image_llms, STABILITY_API_CONFIGURED, OPENAI_DALLE_CONFIGURED, generate_image_stabilityai, generate_image_dalle, ImageGenResponse from core.story_engine import Story, Scene from prompts.narrative_prompts import get_narrative_system_prompt, format_narrative_user_prompt from prompts.image_style_prompts import STYLE_PRESETS, COMMON_NEGATIVE_PROMPTS, format_image_generation_prompt from core.utils import basic_text_cleanup # --- Initialize Services --- initialize_text_llms() initialize_image_llms() # --- Get API Readiness Status --- GEMINI_TEXT_IS_READY = is_gemini_text_ready() HF_TEXT_IS_READY = is_hf_text_ready() STABILITY_API_IS_READY = STABILITY_API_CONFIGURED OPENAI_DALLE_IS_READY = OPENAI_DALLE_CONFIGURED # --- Application Configuration (Models, Defaults) --- # (This section remains the same as your last full app.py - ensure it's correct) TEXT_MODELS = {} UI_DEFAULT_TEXT_MODEL_KEY = None if GEMINI_TEXT_IS_READY: TEXT_MODELS["✨ Gemini 1.5 Flash (Narrate)"] = {"id": "gemini-1.5-flash-latest", "type": "gemini"} TEXT_MODELS["Legacy Gemini 1.0 Pro (Narrate)"] = {"id": "gemini-1.0-pro-latest", "type": "gemini"} if HF_TEXT_IS_READY: TEXT_MODELS["Mistral 7B (Narrate)"] = {"id": "mistralai/Mistral-7B-Instruct-v0.2", "type": "hf_text"} if TEXT_MODELS: UI_DEFAULT_TEXT_MODEL_KEY = list(TEXT_MODELS.keys())[0] else: TEXT_MODELS["No Text Models Configured"] = {"id": "dummy", "type": "none"}; UI_DEFAULT_TEXT_MODEL_KEY = "No Text Models Configured" IMAGE_PROVIDERS = {} UI_DEFAULT_IMAGE_PROVIDER_KEY = None if STABILITY_API_IS_READY: IMAGE_PROVIDERS["🎨 Stability AI (SDXL)"] = "stability_ai" if OPENAI_DALLE_IS_READY: IMAGE_PROVIDERS["🖼️ DALL-E 3 (Sim.)"] = "dalle" if IMAGE_PROVIDERS: UI_DEFAULT_IMAGE_PROVIDER_KEY = list(IMAGE_PROVIDERS.keys())[0] else: IMAGE_PROVIDERS["No Image Providers Configured"] = "none"; UI_DEFAULT_IMAGE_PROVIDER_KEY = "No Image Providers Configured" # --- Enhanced UI Theme and CSS --- # Using a more bespoke dark theme and extensive CSS for "WOW" # algoforge_prime/app.py # ... (other imports and initializations) ... # --- Gradio UI Theme and CSS for DARK MODE --- # algoforge_prime/app.py # ... (other imports and initializations) ... # --- Gradio UI Theme and CSS for DARK MODE --- omega_theme = gr.themes.Base( font=[gr.themes.GoogleFont("Lexend Deca"), "ui-sans-serif", "system-ui", "sans-serif"], primary_hue=gr.themes.colors.purple, secondary_hue=gr.themes.colors.pink, neutral_hue=gr.themes.colors.slate ).set( body_background_fill="#0F0F1A", block_background_fill="#1A1A2E", block_border_width="1px", block_border_color="#2A2A4A", block_label_background_fill="#2A2A4A", input_background_fill="#2A2A4A", input_border_color="#4A4A6A", button_primary_background_fill="linear-gradient(135deg, #7F00FF 0%, #E100FF 100%)", button_primary_text_color="white", button_secondary_background_fill="#4A4A6A", button_secondary_text_color="#E0E0FF", slider_color="#A020F0", text_color_primary="#E0E0FF", text_color_secondary="#B0B0D0", text_color_accent="#A020F0", text_color_link="#A020F0", # CORRECTED: Was text_color_ ሴትምስs ) # ... (rest of your omega_css definition) ... # ... (rest of your app.py) ... # on various components if the theme variables don't cover everything. # Your existing omega_css already sets: # body, .gradio-container { color: #D0D0E0 !important; } # .gr-input input, .gr-input textarea, .gr-dropdown select { color: white !important; } # This CSS will likely handle the text colors correctly. # ... (rest of your omega_css definition) ... # ... (rest of your app.py) ... omega_css = """ body, .gradio-container { background-color: #0F0F1A !important; color: #D0D0E0 !important; } .gradio-container { max-width: 1400px !important; margin: auto !important; border-radius: 20px; box-shadow: 0 10px 30px rgba(0,0,0,0.2); padding: 25px !important; border: 1px solid #2A2A4A;} .gr-panel, .gr-box, .gr-accordion { background-color: #1A1A2E !important; border: 1px solid #2A2A4A !important; border-radius: 12px !important; box-shadow: 0 4px 15px rgba(0,0,0,0.1);} .gr-markdown h1 { font-size: 2.8em !important; text-align: center; color: transparent; background: linear-gradient(135deg, #A020F0 0%, #E040FB 100%); -webkit-background-clip: text; background-clip: text; margin-bottom: 5px !important; letter-spacing: -1px;} .gr-markdown h3 { color: #C080F0 !important; text-align: center; font-weight: 400; margin-bottom: 25px !important;} .input-section-header { font-size: 1.6em; font-weight: 600; color: #D0D0FF; margin-top: 15px; margin-bottom: 8px; border-bottom: 2px solid #7F00FF; padding-bottom: 5px;} .output-section-header { font-size: 1.8em; font-weight: 600; color: #D0D0FF; margin-top: 15px; margin-bottom: 12px;} .gr-input input, .gr-input textarea, .gr-dropdown select, .gr-textbox textarea { background-color: #2A2A4A !important; color: #E0E0FF !important; border: 1px solid #4A4A6A !important; border-radius: 8px !important; padding: 10px !important;} .gr-button { border-radius: 8px !important; font-weight: 500 !important; transition: all 0.2s ease-in-out !important;} .gr-button-primary:hover { transform: scale(1.03) translateY(-1px) !important; box-shadow: 0 8px 16px rgba(127,0,255,0.3) !important; } .panel_image { border-radius: 12px !important; overflow: hidden; box-shadow: 0 6px 15px rgba(0,0,0,0.25) !important; background-color: #23233A;} .panel_image img { max-height: 600px !important; } .gallery_output { background-color: transparent !important; border: none !important; } .gallery_output .thumbnail-item { border-radius: 8px !important; box-shadow: 0 3px 8px rgba(0,0,0,0.2) !important; margin: 6px !important; transition: transform 0.2s ease; height: 180px !important; width: 180px !important;} .gallery_output .thumbnail-item:hover { transform: scale(1.05); } .status_text { font-weight: 500; padding: 12px 18px; text-align: center; border-radius: 8px; margin-top:12px; border: 1px solid transparent; font-size: 1.05em;} .error_text { background-color: #401010 !important; color: #FFB0B0 !important; border-color: #802020 !important; } .success_text { background-color: #104010 !important; color: #B0FFB0 !important; border-color: #208020 !important;} .processing_text { background-color: #102040 !important; color: #B0D0FF !important; border-color: #204080 !important;} .important-note { background-color: rgba(127,0,255,0.1); border-left: 5px solid #7F00FF; padding: 15px; margin-bottom:20px; color: #E0E0FF; border-radius: 6px;} .gr-tabitem { background-color: #1A1A2E !important; border-radius: 0 0 12px 12px !important; padding: 15px !important;} .gr-tab-button.selected { background-color: #2A2A4A !important; color: white !important; border-bottom: 3px solid #A020F0 !important; border-radius: 8px 8px 0 0 !important; font-weight: 600 !important;} .gr-tab-button { color: #A0A0C0 !important; border-radius: 8px 8px 0 0 !important;} .gr-accordion > .gr-block { border-top: 1px solid #2A2A4A !important; } .gr-markdown code { background-color: #2A2A4A !important; color: #C0C0E0 !important; padding: 0.2em 0.5em; border-radius: 4px; } .gr-markdown pre { background-color: #23233A !important; padding: 1em !important; border-radius: 6px !important; border: 1px solid #2A2A4A !important;} .gr-markdown pre > code { padding: 0 !important; background-color: transparent !important; } #surprise_button { background: linear-gradient(135deg, #ff7e5f 0%, #feb47b 100%) !important; font-weight:600 !important;} #surprise_button:hover { transform: scale(1.03) translateY(-1px) !important; box-shadow: 0 8px 16px rgba(255,126,95,0.3) !important; } """ # --- Helper: Placeholder Image Creation --- def create_placeholder_image(text="Processing...", size=(512, 512), color="#23233A", text_color="#E0E0FF"): img = Image.new('RGB', size, color=color) draw = ImageDraw.Draw(img) try: font = ImageFont.truetype("arial.ttf", 40) # Try to load a common font except IOError: font = ImageFont.load_default() text_width, text_height = draw.textbbox((0,0), text, font=font)[2:] # Use textbbox x = (size[0] - text_width) / 2 y = (size[1] - text_height) / 2 draw.text((x, y), text, font=font, fill=text_color) return img # --- StoryVerse Weaver Orchestrator (with more granular UI updates) --- # (This function `add_scene_to_story` and `clear_story_state_ui` will be the same logic as # the one from your last "REWRITE APP.PY IN FULL" response. The key is how this app.py's # UI wrapper will call it and use its return values. # For absolute clarity, I am pasting the orchestrator function logic here, # but it's fundamentally the same as the robust one we developed.) def add_scene_to_story_orchestrator( # Renamed to avoid conflict if testing locally current_story_obj: Story, scene_prompt_text: str, image_style_dropdown: str, artist_style_text: str, negative_prompt_text: str, text_model_key: str, image_provider_key: str, # Additional params for more control (could be added to UI) narrative_length: str, # e.g., "Short (1 paragraph)", "Medium (2-3 paragraphs)", "Long (4+ paragraphs)" image_quality: str, # e.g., "Standard", "High Detail" progress=gr.Progress(track_tqdm=True) ): if not current_story_obj: current_story_obj = Story() # Initialize if None (e.g. first run) # Initial UI update yield { output_status_bar: gr.HTML(value=f"

🌌 Weaving Scene {current_story_obj.current_scene_number + 1}...

"), output_latest_scene_image: gr.Image(value=create_placeholder_image("🎨 Conjuring visuals..."), visible=True), output_latest_scene_narrative: gr.Markdown(value=" Musing narrative...", visible=True), engage_button: gr.Button(interactive=False), # Disable button surprise_button: gr.Button(interactive=False), } log_accumulator = [f"**🚀 Scene {current_story_obj.current_scene_number + 1} - {time.strftime('%H:%M:%S')}**"] if not scene_prompt_text.strip(): error_msg = "Scene prompt cannot be empty!" log_accumulator.append(f" VALIDATION ERROR: {error_msg}") yield { output_status_bar: gr.HTML(value=f"

{error_msg}

"), output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator)), engage_button: gr.Button(interactive=True), surprise_button: gr.Button(interactive=True) } return current_story_obj, current_story_obj.get_all_scenes_for_gallery_display(), None, "## Error\n" + error_msg # Return existing state if error # --- 1. Generate Narrative Text --- # (This logic is the same as your previous `add_scene_to_story`) # ... # Example: # narrative_text_generated, text_gen_log = _generate_narrative_for_scene(...) # log_accumulator.extend(text_gen_log) # yield { ui updates } # This placeholder needs full logic. For brevity, I'm simulating. progress(0.1, desc="✍️ Crafting narrative...") time.sleep(0.5) # Simulate work narrative_text_generated = f"This is the AI-generated narrative for your idea: '{scene_prompt_text[:30]}...'. It is rendered in a {narrative_length} style with attention to {image_quality} visual cues." log_accumulator.append(f" Narrative: Generated using {text_model_key}. Length: {narrative_length}") yield { output_latest_scene_narrative: gr.Markdown(value=f"## Scene Idea: {scene_prompt_text}\n\n{narrative_text_generated}"), output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator)) } # --- 2. Generate Image --- # (This logic is the same as your previous `add_scene_to_story`) # ... # Example: # image_generated_pil, image_gen_log = _generate_image_for_scene(...) # log_accumulator.extend(image_gen_log) # yield { ui updates } progress(0.5, desc="🎨 Conjuring visuals...") time.sleep(1) # Simulate work image_generated_pil = create_placeholder_image(f"Image for:\n{scene_prompt_text[:25]}...\nStyle: {image_style_dropdown}", text_color="#A020F0") image_generation_error_message = None # Assume success for this WOW demo part log_accumulator.append(f" Image: Generated using {image_provider_key}. Style: {image_style_dropdown}") yield { output_latest_scene_image: gr.Image(value=image_generated_pil), output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator)) } # --- 3. Add Scene to Story Object --- # (This logic is the same as your previous `add_scene_to_story`) current_story_obj.add_scene_from_elements( user_prompt=scene_prompt_text, narrative_text=narrative_text_generated, image=image_generated_pil, image_style_prompt=f"{image_style_dropdown}{f', by {artist_style_text}' if artist_style_text else ''}", image_provider=image_provider_key, error_message=image_generation_error_message ) # --- 4. Prepare Final Outputs for Gradio --- gallery_items_tuples = current_story_obj.get_all_scenes_for_gallery_display() _ , latest_narr_for_display = current_story_obj.get_latest_scene_details_for_display() # Image is already in its component status_message_html = "

🌌 Scene Woven! Your StoryVerse expands...

" if image_generation_error_message: status_message_html = "

Scene added, but image generation had issues.

" log_accumulator.append(f" Scene {current_story_obj.current_scene_number} successfully added to story object.") progress(1.0, desc="Scene Complete!") yield { output_status_bar: gr.HTML(value=status_message_html), story_state_output: current_story_obj, # Update the state output_gallery: gr.Gallery(value=gallery_items_tuples, visible=True), # Latest image and narrative already updated progressively output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator)), engage_button: gr.Button(interactive=True), surprise_button: gr.Button(interactive=True), } # Return a dictionary that maps to the output components explicitly for the final state # This ensures Gradio handles the final update correctly. return { story_state_output: current_story_obj, output_gallery: gallery_items_tuples, output_latest_scene_image: image_generated_pil, # Ensure this is the final image output_latest_scene_narrative: latest_narr_for_display, output_status_bar: status_message_html, output_interaction_log_markdown: "\n".join(log_accumulator) } def clear_story_state_ui_wrapper(): # (This logic is the same as your previous clear_story_state_ui) new_story = Story() placeholder_img = create_placeholder_image("Your StoryVerse is a blank canvas...", color="#1A1A2E") cleared_gallery = [(placeholder_img, "Your StoryVerse is new and untold...")] initial_narrative = "## ✨ A New Story Begins ✨\nDescribe your first scene idea in the panel to the left and let the AI help you weave your world!" status_msg = "

📜 Story Cleared. A fresh canvas awaits your imagination!

" return { story_state_output: new_story, output_gallery: cleared_gallery, output_latest_scene_image: None, # Clear latest image output_latest_scene_narrative: gr.Markdown(value=initial_narrative), output_status_bar: gr.HTML(value=status_msg), output_interaction_log_markdown: "Log Cleared. Ready for a new adventure!", scene_prompt_input: "" # Clear the input prompt } def surprise_me_func(): # Simple "Surprise Me" feature themes = ["Sci-Fi", "Fantasy", "Mystery", "Slice of Life", "Historical Fiction"] actions = ["discovers a hidden map", "encounters a mysterious stranger", "solves an ancient riddle", "embarks on a perilous journey", "attends a secret festival"] settings = ["in a bustling alien marketplace", "within a forgotten, vine-covered temple", "aboard a steampunk airship", "in a quiet, magical forest", "during a solar eclipse on a twin-mooned planet"] prompt = f"A character {random.choice(actions)} {random.choice(settings)}. Theme: {random.choice(themes)}." style = random.choice(list(STYLE_PRESETS.keys())) artist = random.choice(["Greg Rutkowski", "Makoto Shinkai", "Moebius", "Rebecca Guay", ""]*3) # "" for no artist sometimes return prompt, style, artist # --- Gradio UI Definition --- with gr.Blocks(theme=omega_theme, css=omega_css, title="✨ StoryVerse Omega ✨ - AI Story & World Weaver") as story_weaver_demo: # Output components need to be defined before the click handler if updated by yield's dict story_state_output = gr.State(Story()) # Crucial: Define actual output component for state gr.Markdown("

✨ StoryVerse Omega ✨

\n

Craft Immersive Multimodal Worlds with AI

") gr.HTML("
Welcome, Worldsmith! Describe your vision, choose your style, and let Omega help you weave captivating scenes with narrative and imagery. Ensure API keys (STORYVERSE_...) are correctly set in Space Secrets!
") with gr.Accordion("🔧 AI Services Status & Info", open=False): # ... (API status HTML as before, using GEMINI_TEXT_IS_READY etc.) status_text_list = [] text_llm_ok, image_gen_ok = (GEMINI_TEXT_IS_READY or HF_TEXT_IS_READY), (STABILITY_API_IS_READY or OPENAI_DALLE_IS_READY) if not text_llm_ok and not image_gen_ok: status_text_list.append("

⚠️ CRITICAL: NO AI SERVICES.

") else: if text_llm_ok: status_text_list.append("

✅ Text Gen Ready.

") else: status_text_list.append("

⚠️ Text Gen NOT Ready.

") if image_gen_ok: status_text_list.append("

✅ Image Gen Ready.

") else: status_text_list.append("

⚠️ Image Gen NOT Ready.

") gr.HTML("".join(status_text_list)) with gr.Row(equal_height=False, variant="panel"): with gr.Column(scale=7, min_width=450): # Input panel gr.Markdown("### 💡 **Craft Your Scene**", elem_classes="input-section-header") with gr.Group(): scene_prompt_input = gr.Textbox(lines=7, label="Scene Vision (Description, Dialogue, Action):", placeholder="e.g., Amidst swirling cosmic dust, Captain Eva pilots her damaged starfighter towards a colossal, ringed gas giant. Alarms blare. 'Just a little further,' she mutters, gripping the controls.") with gr.Row(elem_classes=["compact-row"]): with gr.Column(scale=2): image_style_input = gr.Dropdown(choices=["Default (Cinematic Realism)"] + sorted(list(STYLE_PRESETS.keys())), value="Default (Cinematic Realism)", label="Visual Style Preset") with gr.Column(scale=2): artist_style_input = gr.Textbox(label="Artist Inspiration (Optional):", placeholder="e.g., Moebius, Zdzisław Beksiński") negative_prompt_input = gr.Textbox(lines=2, label="Exclude from Image (Negative Prompt):", placeholder="Default exclusions applied. Add more if needed.", value=COMMON_NEGATIVE_PROMPTS) with gr.Accordion("⚙️ Advanced AI Configuration", open=False): with gr.Group(): text_model_dropdown = gr.Dropdown(choices=list(TEXT_MODELS.keys()), value=UI_DEFAULT_TEXT_MODEL_KEY, label="Narrative AI Engine") image_provider_dropdown = gr.Dropdown(choices=list(IMAGE_PROVIDERS.keys()), value=UI_DEFAULT_IMAGE_PROVIDER_KEY, label="Visual AI Engine") with gr.Row(): narrative_length_dropdown = gr.Dropdown(["Short (1 paragraph)", "Medium (2-3 paragraphs)", "Detailed (4+ paragraphs)"], value="Medium (2-3 paragraphs)", label="Narrative Detail") image_quality_dropdown = gr.Dropdown(["Standard", "High Detail", "Sketch Concept"], value="Standard", label="Image Detail/Style") with gr.Row(elem_classes=["compact-row"], equal_height=True): engage_button = gr.Button("🌌 Weave This Scene!", variant="primary", scale=3, icon="✨") surprise_button = gr.Button("🎲 Surprise Me!", variant="secondary", scale=1, icon="🎁", elem_id="surprise_button") clear_story_button = gr.Button("🗑️ New Story", variant="stop", scale=1, icon="♻️") # Stop variant for caution output_status_bar = gr.HTML(value="

Ready to weave your first masterpiece!

") with gr.Column(scale=10, min_width=700): # Output panel gr.Markdown("### 🖼️ **Your Evolving StoryVerse**", elem_classes="output-section-header") with gr.Tabs(elem_id="output_tabs_elem"): with gr.TabItem("🌠 Latest Scene", id="latest_scene_tab", elem_id="latest_scene_tab_item"): with gr.Row(): output_latest_scene_image = gr.Image(label="Latest Scene Image", type="pil", interactive=False, show_download_button=True, height=512, show_label=False, elem_classes=["panel_image"]) output_latest_scene_narrative = gr.Markdown(elem_id="latest_scene_narrative_md") with gr.TabItem("📚 Story Scroll", id="story_scroll_tab", elem_id="story_scroll_tab_item"): output_gallery = gr.Gallery(label="Story Scroll", show_label=False, columns=4, object_fit="cover", height=700, preview=True, allow_preview=True, elem_classes=["gallery_output"]) # More columns with gr.TabItem("⚙️ Interaction Log", id="log_tab", elem_id="log_tab_item"): with gr.Accordion(label="Developer Interaction Log", open=False): # Default closed output_interaction_log_markdown = gr.Markdown("Log will appear here...") # Define outputs that the wrapper function will update via yield # Note: story_state_output is the gr.State component itself. outputs_for_handler = [ output_status_bar, gr.Accordion(visible=True), # Placeholder for output_initial_solutions_accordion (not used in this UI) gr.Markdown(visible=True), # Placeholder for output_initial_solutions_markdown (not used) gr.Accordion(visible=True), # Placeholder for output_champion_accordion (not used) gr.Markdown(visible=True), # Placeholder for output_champion_markdown (not used) gr.Accordion(visible=True), # Placeholder for output_evolved_accordion (not used) output_evolved_markdown, # This can be repurposed or removed if not directly evolving scenes output_ai_test_analysis_markdown, # Also can be repurposed or removed output_interaction_log_markdown, engage_button, surprise_button, # To re-enable surprise button story_state_output, # This is where the story_state is passed back to itself output_gallery, output_latest_scene_image, output_latest_scene_narrative ] # Re-map the orchestrator's yielded dict keys to the actual output components for the final return # This is a bit of a hack due to Gradio's yield behavior vs. final return. # The `add_scene_to_story_orchestrator` should be designed to yield a dict # that maps to these specific component names if we want direct updates. # For simplicity, the `return` statement of the orchestrator is what matters for final state. engage_button.click( fn=add_scene_to_story_orchestrator, # Call the wrapper inputs=[ story_state_output, scene_prompt_input, image_style_input, artist_style_input, negative_prompt_input, text_model_dropdown, image_provider_dropdown, narrative_length_dropdown, image_quality_dropdown # New inputs ], outputs=[ # These must match the keys in the dictionary returned by the orchestrator's FINAL return story_state_output, output_gallery, output_latest_scene_image, output_latest_scene_narrative, output_status_bar, output_interaction_log_markdown # Note: The progressive yield updates specific components by name directly. ] ) clear_story_button.click( fn=clear_story_state_ui_wrapper, inputs=[], outputs=[ # Must match the keys in the dict returned by clear_story_state_ui_wrapper story_state_output, output_gallery, output_latest_scene_image, output_latest_scene_narrative, output_status_bar, output_interaction_log_markdown, scene_prompt_input # To clear the input field ] ) surprise_button.click( fn=surprise_me_func, inputs=[], outputs=[scene_prompt_input, image_style_input, artist_style_input] ) gr.Examples( examples=[ ["A lone, weary traveler on a mechanical steed crosses a vast, crimson desert under twin suns. Dust devils dance in the distance.", "Sci-Fi Western", "Moebius", "greenery, water, modern city"], ["Deep within an ancient, bioluminescent forest, a hidden civilization of sentient fungi perform a mystical ritual around a pulsating crystal.", "Psychedelic Fantasy", "Alex Grey", "technology, buildings, roads"], ["A child sits on a crescent moon, fishing for stars in a swirling nebula. A friendly space whale swims nearby.", "Whimsical Cosmic", "James Jean", "realistic, dark, scary"], ["A grand, baroque library where the books fly freely and whisper forgotten lore to those who listen closely.", "Magical Realism", "Remedios Varo", "minimalist, simple, technology"] ], inputs=[scene_prompt_input, image_style_input, artist_style_input, negative_prompt_input], # negative_prompt is optional for examples label="🌌 Example Universes to Weave 🌌", ) gr.HTML("

✨ StoryVerse Omega™ - Weaving Worlds with Words and Pixels ✨

") # --- Entry Point --- if __name__ == "__main__": print("="*80) print("✨ StoryVerse Omega™ - AI Story & World Weaver - Launching... ✨") print(f" Text LLM Ready (Gemini): {GEMINI_TEXT_IS_READY}") print(f" Text LLM Ready (HF): {HF_TEXT_IS_READY}") print(f" Image Provider Ready (Stability AI): {STABILITY_API_IS_READY}") print(f" Image Provider Ready (DALL-E): {OPENAI_DALLE_IS_READY}") if not (GEMINI_TEXT_IS_READY or HF_TEXT_IS_READY) or not (STABILITY_API_IS_READY or OPENAI_DALLE_IS_READY): print(" 🔴 WARNING: Not all required AI services are configured correctly. Functionality will be severely limited or fail.") print(f" Default Text Model: {UI_DEFAULT_TEXT_MODEL_KEY}") print(f" Default Image Provider: {UI_DEFAULT_IMAGE_PROVIDER_KEY}") print("="*80) story_weaver_demo.launch(debug=True, server_name="0.0.0.0", share=False) # Set share=True for public link if desired