mgbam commited on
Commit
6c4b816
Β·
verified Β·
1 Parent(s): 7ce8888

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +121 -162
app.py CHANGED
@@ -10,7 +10,7 @@ import traceback
10
  # --- Core Logic Imports ---
11
  from core.llm_services import initialize_text_llms, is_gemini_text_ready, is_hf_text_ready, generate_text_gemini, generate_text_hf
12
  from core.image_services import initialize_image_llms, is_dalle_ready, is_hf_image_api_ready, generate_image_dalle, generate_image_hf_model, ImageGenResponse
13
- from core.story_engine import Story, Scene
14
  from prompts.narrative_prompts import get_narrative_system_prompt, format_narrative_user_prompt
15
  from prompts.image_style_prompts import STYLE_PRESETS, COMMON_NEGATIVE_PROMPTS, format_image_generation_prompt
16
  from core.utils import basic_text_cleanup
@@ -26,23 +26,18 @@ DALLE_IMAGE_IS_READY = is_dalle_ready()
26
  HF_IMAGE_IS_READY = is_hf_image_api_ready()
27
 
28
  # --- Application Configuration (Models, Defaults) ---
 
29
  TEXT_MODELS = {}
30
  UI_DEFAULT_TEXT_MODEL_KEY = None
31
  if GEMINI_TEXT_IS_READY:
32
  TEXT_MODELS["✨ Gemini 1.5 Flash (Narrate)"] = {"id": "gemini-1.5-flash-latest", "type": "gemini"}
33
- TEXT_MODELS["Legacy Gemini 1.0 Pro (Narrate)"] = {"id": "gemini-1.0-pro-latest", "type": "gemini"}
34
- if HF_TEXT_IS_READY: # This will be used if Gemini is not ready
35
  TEXT_MODELS["Mistral 7B (Narrate via HF)"] = {"id": "mistralai/Mistral-7B-Instruct-v0.2", "type": "hf_text"}
36
- TEXT_MODELS["Gemma 2B (Narrate via HF)"] = {"id": "google/gemma-2b-it", "type": "hf_text"}
37
-
38
- if TEXT_MODELS: # Determine default text model
39
- if GEMINI_TEXT_IS_READY and "✨ Gemini 1.5 Flash (Narrate)" in TEXT_MODELS:
40
- UI_DEFAULT_TEXT_MODEL_KEY = "✨ Gemini 1.5 Flash (Narrate)"
41
- elif HF_TEXT_IS_READY and "Mistral 7B (Narrate via HF)" in TEXT_MODELS:
42
- UI_DEFAULT_TEXT_MODEL_KEY = "Mistral 7B (Narrate via HF)"
43
- elif TEXT_MODELS: # Fallback if preferred defaults are somehow not in the populated list
44
- UI_DEFAULT_TEXT_MODEL_KEY = list(TEXT_MODELS.keys())[0]
45
- else: # No text models configured at all
46
  TEXT_MODELS["No Text Models Configured"] = {"id": "dummy_text_error", "type": "none"}
47
  UI_DEFAULT_TEXT_MODEL_KEY = "No Text Models Configured"
48
 
@@ -50,66 +45,24 @@ IMAGE_PROVIDERS = {}
50
  UI_DEFAULT_IMAGE_PROVIDER_KEY = None
51
  if DALLE_IMAGE_IS_READY:
52
  IMAGE_PROVIDERS["πŸ–ΌοΈ OpenAI DALL-E 3"] = "dalle_3"
53
- IMAGE_PROVIDERS["πŸ–ΌοΈ OpenAI DALL-E 2 (Legacy)"] = "dalle_2"
54
  UI_DEFAULT_IMAGE_PROVIDER_KEY = "πŸ–ΌοΈ OpenAI DALL-E 3"
55
- elif HF_IMAGE_IS_READY: # Fallback to HF if DALL-E not ready
56
  IMAGE_PROVIDERS["🎑 HF - SDXL Base"] = "hf_sdxl_base"
57
- IMAGE_PROVIDERS["🎠 HF - OpenJourney"] = "hf_openjourney"
58
- IMAGE_PROVIDERS["🌌 HF - SD v1.5"] = "hf_sd_1_5"
59
  UI_DEFAULT_IMAGE_PROVIDER_KEY = "🎑 HF - SDXL Base"
60
-
61
  if not IMAGE_PROVIDERS:
62
  IMAGE_PROVIDERS["No Image Providers Configured"] = "none"
63
  UI_DEFAULT_IMAGE_PROVIDER_KEY = "No Image Providers Configured"
64
- elif not UI_DEFAULT_IMAGE_PROVIDER_KEY and IMAGE_PROVIDERS :
65
- UI_DEFAULT_IMAGE_PROVIDER_KEY = list(IMAGE_PROVIDERS.keys())[0]
66
 
67
 
68
  # --- Gradio UI Theme and CSS ---
69
- omega_theme = gr.themes.Base(
70
- font=[gr.themes.GoogleFont("Lexend Deca"), "ui-sans-serif", "system-ui", "sans-serif"],
71
- primary_hue=gr.themes.colors.purple, secondary_hue=gr.themes.colors.pink, neutral_hue=gr.themes.colors.slate
72
- ).set(
73
- body_background_fill="#0F0F1A", block_background_fill="#1A1A2E", block_border_width="1px",
74
- block_border_color="#2A2A4A", block_label_background_fill="#2A2A4A", input_background_fill="#2A2A4A",
75
- input_border_color="#4A4A6A", button_primary_background_fill="linear-gradient(135deg, #7F00FF 0%, #E100FF 100%)",
76
- button_primary_text_color="white", button_secondary_background_fill="#4A4A6A",
77
- button_secondary_text_color="#E0E0FF", slider_color="#A020F0"
78
- )
79
- omega_css = """
80
- body, .gradio-container { background-color: #0F0F1A !important; color: #D0D0E0 !important; }
81
- .gradio-container { max-width: 1400px !important; margin: auto !important; border-radius: 20px; box-shadow: 0 10px 30px rgba(0,0,0,0.2); padding: 25px !important; border: 1px solid #2A2A4A;}
82
- .gr-panel, .gr-box, .gr-accordion { background-color: #1A1A2E !important; border: 1px solid #2A2A4A !important; border-radius: 12px !important; box-shadow: 0 4px 15px rgba(0,0,0,0.1);}
83
- .gr-markdown h1 { font-size: 2.8em !important; text-align: center; color: transparent; background: linear-gradient(135deg, #A020F0 0%, #E040FB 100%); -webkit-background-clip: text; background-clip: text; margin-bottom: 5px !important; letter-spacing: -1px;}
84
- .gr-markdown h3 { color: #C080F0 !important; text-align: center; font-weight: 400; margin-bottom: 25px !important;}
85
- .input-section-header { font-size: 1.6em; font-weight: 600; color: #D0D0FF; margin-top: 15px; margin-bottom: 8px; border-bottom: 2px solid #7F00FF; padding-bottom: 5px;}
86
- .output-section-header { font-size: 1.8em; font-weight: 600; color: #D0D0FF; margin-top: 15px; margin-bottom: 12px;}
87
- .gr-input input, .gr-input textarea, .gr-dropdown select, .gr-textbox textarea { background-color: #2A2A4A !important; color: #E0E0FF !important; border: 1px solid #4A4A6A !important; border-radius: 8px !important; padding: 10px !important;}
88
- .gr-button { border-radius: 8px !important; font-weight: 500 !important; transition: all 0.2s ease-in-out !important; display: flex; align-items: center; justify-content: center;}
89
- .gr-button span { white-space: nowrap !important; overflow: hidden; text-overflow: ellipsis; display: inline-block; max-width: 90%; line-height: normal !important; }
90
- .gr-button svg { width: 1.1em !important; height: 1.1em !important; margin-right: 4px !important; flex-shrink: 0;}
91
- .gr-button-primary { padding: 10px 15px !important; } /* Adjusted padding for potentially shorter text */
92
- .gr-button-primary:hover { transform: scale(1.03) translateY(-1px) !important; box-shadow: 0 8px 16px rgba(127,0,255,0.3) !important; }
93
- .panel_image { border-radius: 12px !important; overflow: hidden; box-shadow: 0 6px 15px rgba(0,0,0,0.25) !important; background-color: #23233A;}
94
- .panel_image img { max-height: 600px !important; }
95
- .gallery_output { background-color: transparent !important; border: none !important; }
96
- .gallery_output .thumbnail-item { border-radius: 8px !important; box-shadow: 0 3px 8px rgba(0,0,0,0.2) !important; margin: 6px !important; transition: transform 0.2s ease; height: 180px !important; width: 180px !important;}
97
- .gallery_output .thumbnail-item:hover { transform: scale(1.05); }
98
- .status_text { font-weight: 500; padding: 12px 18px; text-align: center; border-radius: 8px; margin-top:12px; border: 1px solid transparent; font-size: 1.05em;}
99
- .error_text { background-color: #401010 !important; color: #FFB0B0 !important; border-color: #802020 !important; }
100
- .success_text { background-color: #104010 !important; color: #B0FFB0 !important; border-color: #208020 !important;}
101
- .processing_text { background-color: #102040 !important; color: #B0D0FF !important; border-color: #204080 !important;}
102
- .important-note { background-color: rgba(127,0,255,0.1); border-left: 5px solid #7F00FF; padding: 15px; margin-bottom:20px; color: #E0E0FF; border-radius: 6px;}
103
- .gr-tabitem { background-color: #1A1A2E !important; border-radius: 0 0 12px 12px !important; padding: 15px !important;}
104
- .gr-tab-button.selected { background-color: #2A2A4A !important; color: white !important; border-bottom: 3px solid #A020F0 !important; border-radius: 8px 8px 0 0 !important; font-weight: 600 !important;}
105
- .gr-tab-button { color: #A0A0C0 !important; border-radius: 8px 8px 0 0 !important;}
106
- .gr-accordion > .gr-block { border-top: 1px solid #2A2A4A !important; }
107
- .gr-markdown code { background-color: #2A2A4A !important; color: #C0C0E0 !important; padding: 0.2em 0.5em; border-radius: 4px; }
108
- .gr-markdown pre { background-color: #23233A !important; padding: 1em !important; border-radius: 6px !important; border: 1px solid #2A2A4A !important;}
109
- .gr-markdown pre > code { padding: 0 !important; background-color: transparent !important; }
110
- #surprise_button { background: linear-gradient(135deg, #ff7e5f 0%, #feb47b 100%) !important; font-weight:600 !important;}
111
- #surprise_button:hover { transform: scale(1.03) translateY(-1px) !important; box-shadow: 0 8px 16px rgba(255,126,95,0.3) !important; }
112
- """
113
 
114
  # --- Helper: Placeholder Image Creation ---
115
  def create_placeholder_image(text="Processing...", size=(512, 512), color="#23233A", text_color="#E0E0FF"):
@@ -130,24 +83,31 @@ def add_scene_to_story_orchestrator(
130
  progress=gr.Progress(track_tqdm=True)
131
  ):
132
  start_time = time.time()
133
- if not current_story_obj: current_story_obj = Story()
134
 
135
  log_accumulator = [f"**πŸš€ Scene {current_story_obj.current_scene_number + 1} - {time.strftime('%H:%M:%S')}**"]
136
 
137
- ret_story_state = current_story_obj
138
- # Initialize gallery with placeholders or current items to avoid errors if generation fails early
139
- initial_gallery_items = current_story_obj.get_all_scenes_for_gallery_display()
140
- if not initial_gallery_items: # Handle case where story is new and has no scenes
141
- placeholder_img = create_placeholder_image("Waiting for first scene...", size=(180,180), color="#1A1A2E")
142
- initial_gallery_items = [(placeholder_img, "Your StoryVerse awaits!")]
143
- ret_gallery = initial_gallery_items
 
 
 
 
144
 
 
 
 
145
  ret_latest_image = None
146
  ret_latest_narrative_md_obj = gr.Markdown(value="## Processing...\nNarrative being woven...")
147
  ret_status_bar_html_obj = gr.HTML(value="<p class='processing_text status_text'>Processing...</p>")
148
- # ret_log_md will be built up
149
 
150
- # Initial yield for UI updates (buttons disabled by .then() chain)
151
  yield {
152
  output_status_bar: gr.HTML(value=f"<p class='processing_text status_text'>🌌 Weaving Scene {current_story_obj.current_scene_number + 1}...</p>"),
153
  output_latest_scene_image: gr.Image(value=create_placeholder_image("🎨 Conjuring visuals...")),
@@ -161,23 +121,23 @@ def add_scene_to_story_orchestrator(
161
 
162
  # --- 1. Generate Narrative Text ---
163
  progress(0.1, desc="✍️ Crafting narrative...")
164
- narrative_text_generated = f"Narrative Error: Init failed."
 
 
165
  text_model_info = TEXT_MODELS.get(text_model_key)
166
  if text_model_info and text_model_info["type"] != "none":
167
- system_p = get_narrative_system_prompt("default")
168
- prev_narrative = current_story_obj.get_last_scene_narrative()
169
- user_p = format_narrative_user_prompt(scene_prompt_text, prev_narrative)
170
- log_accumulator.append(f" Narrative: Using {text_model_key} ({text_model_info['id']}). Length: {narrative_length}")
171
  text_response = None
172
  if text_model_info["type"] == "gemini": text_response = generate_text_gemini(user_p, model_id=text_model_info["id"], system_prompt=system_p, max_tokens=768 if narrative_length.startswith("Detailed") else 400)
173
  elif text_model_info["type"] == "hf_text": text_response = generate_text_hf(user_p, model_id=text_model_info["id"], system_prompt=system_p, max_tokens=768 if narrative_length.startswith("Detailed") else 400)
174
  if text_response and text_response.success: narrative_text_generated = basic_text_cleanup(text_response.text); log_accumulator.append(f" Narrative: Success.")
175
  elif text_response: narrative_text_generated = f"**Narrative Error ({text_model_key}):** {text_response.error}"; log_accumulator.append(f" Narrative: FAILED - {text_response.error}")
176
  else: log_accumulator.append(f" Narrative: FAILED - No response from {text_model_key}.")
177
- else: narrative_text_generated = "**Narrative Error:** Selected text model not available or misconfigured."; log_accumulator.append(f" Narrative: FAILED - Model '{text_model_key}' unavailable.")
178
-
179
  ret_latest_narrative_str_content = f"## Scene Idea: {scene_prompt_text}\n\n{narrative_text_generated}"
180
- ret_latest_narrative_md_obj = gr.Markdown(value=ret_latest_narrative_str_content)
181
  yield { output_latest_scene_narrative: ret_latest_narrative_md_obj,
182
  output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator)) }
183
 
@@ -185,40 +145,31 @@ def add_scene_to_story_orchestrator(
185
  progress(0.5, desc="🎨 Conjuring visuals...")
186
  image_generated_pil = None
187
  image_generation_error_message = None
 
 
188
  selected_image_provider_key_from_ui = image_provider_key
189
  selected_image_provider_type = IMAGE_PROVIDERS.get(selected_image_provider_key_from_ui)
190
-
191
  image_content_prompt_for_gen = narrative_text_generated if narrative_text_generated and "Error" not in narrative_text_generated else scene_prompt_text
192
- quality_keyword = "ultra detailed, intricate, masterpiece, " if image_quality == "High Detail" else ("concept sketch, line art, " if image_quality == "Sketch Concept" else "")
193
  full_image_prompt = format_image_generation_prompt(quality_keyword + image_content_prompt_for_gen[:350], image_style_dropdown, artist_style_text)
194
- log_accumulator.append(f" Image: Attempting with provider key '{selected_image_provider_key_from_ui}' (maps to type '{selected_image_provider_type}'). Style: {image_style_dropdown}.")
195
-
196
- if selected_image_provider_type and selected_image_provider_type != "none":
197
- image_response = None
198
  if selected_image_provider_type.startswith("dalle_"):
199
- if DALLE_IMAGE_IS_READY:
200
- dalle_model_version = "dall-e-3" if selected_image_provider_type == "dalle_3" else "dall-e-2"
201
- dalle_size = "1024x1024"
202
- dalle_quality_param = "hd" if image_quality=="High Detail" and dalle_model_version == "dall-e-3" else "standard"
203
- image_response = generate_image_dalle(full_image_prompt, model=dalle_model_version, size=dalle_size, quality=dalle_quality_param)
204
- else: image_generation_error_message = "**Image Error:** DALL-E selected but API not ready."
205
  elif selected_image_provider_type.startswith("hf_"):
206
- if HF_IMAGE_IS_READY:
207
- hf_model_id_to_call = "stabilityai/stable-diffusion-xl-base-1.0"; img_width, img_height = 768, 768 # Defaults
208
- if selected_image_provider_type == "hf_openjourney": hf_model_id_to_call = "prompthero/openjourney"; img_width,img_height = 512,512
209
- elif selected_image_provider_type == "hf_sdxl_base": hf_model_id_to_call = "stabilityai/stable-diffusion-xl-base-1.0"; # Redundant, but explicit
210
- elif selected_image_provider_type == "hf_sd_1_5": hf_model_id_to_call = "runwayml/stable-diffusion-v1-5"; img_width,img_height = 512,512
211
- image_response = generate_image_hf_model(full_image_prompt, model_id=hf_model_id_to_call, negative_prompt=negative_prompt_text or COMMON_NEGATIVE_PROMPTS, width=img_width, height=img_height)
212
- else: image_generation_error_message = "**Image Error:** HF Image Model selected but API not ready."
213
- else: image_generation_error_message = f"**Image Error:** Provider type '{selected_image_provider_type}' not handled."
214
-
215
- if image_response and image_response.success: image_generated_pil = image_response.image; log_accumulator.append(f" Image: Success from {image_response.provider} (Model: {image_response.model_id_used}).")
216
- elif image_response: image_generation_error_message = f"**Image Error ({image_response.provider} - {image_response.model_id_used}):** {image_response.error}"; log_accumulator.append(f" Image: FAILED - {image_response.error}")
217
- elif not image_generation_error_message: image_generation_error_message = f"**Image Error:** No response with {image_provider_key}."
218
-
219
- if not image_generated_pil and not image_generation_error_message:
220
- image_generation_error_message = "**Image Error:** No valid image provider configured or selected."
221
- log_accumulator.append(f" Image: FAILED - {image_generation_error_message}")
222
 
223
  ret_latest_image = image_generated_pil if image_generated_pil else create_placeholder_image("Image Gen Failed", color="#401010")
224
  yield { output_latest_scene_image: gr.Image(value=ret_latest_image),
@@ -226,31 +177,34 @@ def add_scene_to_story_orchestrator(
226
 
227
  # --- 3. Add Scene to Story Object ---
228
  final_scene_error = None
229
- if image_generation_error_message and "**Narrative Error**" in narrative_text_generated : final_scene_error = f"{narrative_text_generated}\n{image_generation_error_message}"
230
  elif "**Narrative Error**" in narrative_text_generated: final_scene_error = narrative_text_generated
231
  elif image_generation_error_message: final_scene_error = image_generation_error_message
232
 
233
  current_story_obj.add_scene_from_elements(
234
  user_prompt=scene_prompt_text,
235
- narrative_text=narrative_text_generated if "**Narrative Error**" not in narrative_text_generated else "(Narrative gen failed)",
236
  image=image_generated_pil,
237
  image_style_prompt=f"{image_style_dropdown}{f', by {artist_style_text}' if artist_style_text and artist_style_text.strip() else ''}",
238
  image_provider=selected_image_provider_key_from_ui,
239
  error_message=final_scene_error
240
  )
241
  ret_story_state = current_story_obj
242
- log_accumulator.append(f" Scene {current_story_obj.current_scene_number} processed and added.")
243
 
244
  # --- 4. Prepare Final Values for Return Tuple ---
245
- ret_gallery = current_story_obj.get_all_scenes_for_gallery_display()
246
- # Ensure gallery items are PIL Images or None for errored/missing images
247
  processed_gallery_tuples = []
248
- for img_item, cap_text in ret_gallery:
249
- if isinstance(img_item, Image.Image):
250
- processed_gallery_tuples.append((img_item, cap_text))
251
- else: # Assume it's an error or no image, create placeholder for gallery
252
- gallery_placeholder = create_placeholder_image(f"S{cap_text.split(':')[0][1:]}\nError/NoImg", size=(180,180), color="#2A2A4A")
253
- processed_gallery_tuples.append((gallery_placeholder, cap_text))
 
 
 
 
254
  ret_gallery = processed_gallery_tuples
255
 
256
 
@@ -273,26 +227,31 @@ def add_scene_to_story_orchestrator(
273
 
274
  current_total_time = time.time() - start_time
275
  log_accumulator.append(f" Cycle ended at {time.strftime('%H:%M:%S')}. Total time: {current_total_time:.2f}s")
276
- ret_log_md = gr.Markdown(value="\n".join(log_accumulator))
277
 
278
- # This is the FINAL return. It must be a tuple matching the `outputs` list of engage_button.click()
279
  return (
280
- ret_story_state, ret_gallery, ret_latest_image,
281
- ret_latest_narrative_md_obj, ret_status_bar_html_obj, ret_log_md
 
 
 
 
282
  )
283
 
284
  def clear_story_state_ui_wrapper():
285
- new_story = Story(); ph_img = create_placeholder_image("Blank canvas...", color="#1A1A2E", text_color="#A0A0C0")
286
- # Ensure gallery output for clear is also a list of (image, caption)
287
- cleared_gallery_display = [(ph_img, "Your StoryVerse is new and untold...")]
288
- initial_narrative = "## ✨ New Story ✨\nDescribe your first scene!"
 
289
  status_msg = "<p class='processing_text status_text'>πŸ“œ Story Cleared.</p>"
290
- return (new_story, cleared_gallery_display, None, gr.Markdown(initial_narrative), gr.HTML(status_msg), "Log Cleared.", "")
291
 
292
  def surprise_me_func():
293
- print("DEBUG: surprise_me_func called") # For checking button functionality
294
  themes = ["Cosmic Horror", "Solarpunk Utopia", "Mythic Fantasy", "Noir Detective"]; actions = ["unearths an artifact", "negotiates"]; settings = ["on a rogue planet", "in a city in a tree"]; prompt = f"A protagonist {random.choice(actions)} {random.choice(settings)}. Theme: {random.choice(themes)}."; style = random.choice(list(STYLE_PRESETS.keys())); artist = random.choice(["H.R. Giger", "Moebius", ""]*2)
295
- print(f"DEBUG: surprise_me_func returning: {prompt}, {style}, {artist}")
296
  return prompt, style, artist
297
 
298
  def disable_buttons_for_processing():
@@ -308,8 +267,20 @@ with gr.Blocks(theme=omega_theme, css=omega_css, title="✨ StoryVerse Omega ✨
308
  # Define Python variables for UI components
309
  story_state_output = gr.State(Story())
310
 
311
- with gr.Row(equal_height=False, variant="panel"): # Main layout row
312
- # Input Column
 
 
 
 
 
 
 
 
 
 
 
 
313
  with gr.Column(scale=7, min_width=450):
314
  gr.Markdown("### πŸ’‘ **Craft Your Scene**", elem_classes="input-section-header")
315
  with gr.Group(): scene_prompt_input = gr.Textbox(lines=7, label="Scene Vision (Description, Dialogue, Action):", placeholder="e.g., Amidst swirling cosmic dust...")
@@ -326,11 +297,10 @@ with gr.Blocks(theme=omega_theme, css=omega_css, title="✨ StoryVerse Omega ✨
326
  image_quality_dropdown = gr.Dropdown(["Standard", "High Detail", "Sketch Concept"], value="Standard", label="Image Detail/Style")
327
  with gr.Row(elem_classes=["compact-row"], equal_height=True):
328
  engage_button = gr.Button("🌌 Weave!", variant="primary", scale=3, icon="✨") # Shorter text
329
- surprise_button = gr.Button("🎲 Surprise!", variant="secondary", scale=1, icon="🎁")# Shorter text
330
  clear_story_button = gr.Button("πŸ—‘οΈ New", variant="stop", scale=1, icon="♻️") # Shorter text
331
  output_status_bar = gr.HTML(value="<p class='processing_text status_text'>Ready to weave your first masterpiece!</p>")
332
 
333
- # Output Column
334
  with gr.Column(scale=10, min_width=700):
335
  gr.Markdown("### πŸ–ΌοΈ **Your StoryVerse**", elem_classes="output-section-header")
336
  with gr.Tabs():
@@ -343,32 +313,8 @@ with gr.Blocks(theme=omega_theme, css=omega_css, title="✨ StoryVerse Omega ✨
343
  with gr.Accordion("Interaction Log", open=False):
344
  output_interaction_log_markdown = gr.Markdown("Log...")
345
 
346
- # API Status (defined after main layout to ensure it's below everything)
347
- with gr.Accordion("πŸ”§ AI Services Status & Info", open=False, elem_id="api_status_accordion"):
348
- status_text_list = []; text_llm_ok = (GEMINI_TEXT_IS_READY or HF_TEXT_IS_READY); image_gen_ok = (DALLE_IMAGE_IS_READY or HF_IMAGE_IS_READY)
349
- if not text_llm_ok and not image_gen_ok: status_text_list.append("<p style='color:#FCA5A5;font-weight:bold;'>⚠️ CRITICAL: NO AI SERVICES CONFIGURED.</p>")
350
- else:
351
- if text_llm_ok: status_text_list.append("<p style='color:#A7F3D0;'>βœ… Text Generation Ready.</p>")
352
- else: status_text_list.append("<p style='color:#FCD34D;'>⚠️ Text Generation NOT Ready.</p>")
353
- if image_gen_ok: status_text_list.append("<p style='color:#A7F3D0;'>βœ… Image Generation Ready.</p>")
354
- else: status_text_list.append("<p style='color:#FCD34D;'>⚠️ Image Generation NOT Ready.</p>")
355
- gr.HTML("".join(status_text_list))
356
-
357
- # Examples (defined after main layout)
358
- gr.Examples(
359
- examples=[
360
- ["A lone, weary traveler on a mechanical steed crosses a vast, crimson desert under twin suns. Dust devils dance in the distance.", "Sci-Fi Western", "Moebius", "greenery, water, modern city"],
361
- ["Deep within an ancient, bioluminescent forest, a hidden civilization of sentient fungi perform a mystical ritual around a pulsating crystal.", "Psychedelic Fantasy", "Alex Grey", "technology, buildings, roads"],
362
- ["A child sits on a crescent moon, fishing for stars in a swirling nebula. A friendly space whale swims nearby.", "Whimsical Cosmic", "James Jean", "realistic, dark, scary"],
363
- ["A grand, baroque library where the books fly freely and whisper forgotten lore to those who listen closely.", "Magical Realism", "Remedios Varo", "minimalist, simple, technology"]
364
- ],
365
- inputs=[scene_prompt_input, image_style_input, artist_style_input, negative_prompt_input],
366
- label="🌌 Example Universes to Weave 🌌",
367
- )
368
- gr.HTML("<div style='text-align:center; margin-top:30px; padding-bottom:20px;'><p style='font-size:0.9em; color:#8080A0;'>✨ StoryVerse Omegaβ„’ - Weaving Worlds with Words and Pixels ✨</p></div>")
369
-
370
  # Event Handlers
371
- engage_event_actions = engage_button.click(fn=disable_buttons_for_processing, outputs=[engage_button, surprise_button], queue=False)\
372
  .then(fn=add_scene_to_story_orchestrator,
373
  inputs=[story_state_output, scene_prompt_input, image_style_input, artist_style_input, negative_prompt_input, text_model_dropdown, image_provider_dropdown, narrative_length_dropdown, image_quality_dropdown],
374
  outputs=[story_state_output, output_gallery, output_latest_scene_image, output_latest_scene_narrative, output_status_bar, output_interaction_log_markdown])\
@@ -380,12 +326,25 @@ with gr.Blocks(theme=omega_theme, css=omega_css, title="✨ StoryVerse Omega ✨
380
  surprise_button.click(fn=surprise_me_func,
381
  outputs=[scene_prompt_input, image_style_input, artist_style_input])
382
 
 
 
 
 
 
 
 
 
 
 
 
 
383
  # --- Entry Point ---
384
  if __name__ == "__main__":
385
  print("="*80); print("✨ StoryVerse Omega (Full App with Fixes) Launching... ✨")
386
  print(f" Gemini Text Ready: {GEMINI_TEXT_IS_READY}"); print(f" HF Text Ready: {HF_TEXT_IS_READY}")
387
  print(f" DALL-E Image Ready: {DALLE_IMAGE_IS_READY}"); print(f" HF Image API Ready: {HF_IMAGE_IS_READY}")
388
- if not (GEMINI_TEXT_IS_READY or HF_TEXT_IS_READY) or not (DALLE_IMAGE_IS_READY or HF_IMAGE_IS_READY): print(" πŸ”΄ WARNING: Not all services configured.")
 
389
  print(f" Default Text Model: {UI_DEFAULT_TEXT_MODEL_KEY}"); print(f" Default Image Provider: {UI_DEFAULT_IMAGE_PROVIDER_KEY}")
390
  print("="*80)
391
  story_weaver_demo.launch(debug=True, server_name="0.0.0.0", share=False)
 
10
  # --- Core Logic Imports ---
11
  from core.llm_services import initialize_text_llms, is_gemini_text_ready, is_hf_text_ready, generate_text_gemini, generate_text_hf
12
  from core.image_services import initialize_image_llms, is_dalle_ready, is_hf_image_api_ready, generate_image_dalle, generate_image_hf_model, ImageGenResponse
13
+ from core.story_engine import Story, Scene # CRITICAL: Ensure this is your updated Story class
14
  from prompts.narrative_prompts import get_narrative_system_prompt, format_narrative_user_prompt
15
  from prompts.image_style_prompts import STYLE_PRESETS, COMMON_NEGATIVE_PROMPTS, format_image_generation_prompt
16
  from core.utils import basic_text_cleanup
 
26
  HF_IMAGE_IS_READY = is_hf_image_api_ready()
27
 
28
  # --- Application Configuration (Models, Defaults) ---
29
+ # (This section remains the same - ensure TEXT_MODELS, UI_DEFAULT_TEXT_MODEL_KEY, etc. are defined)
30
  TEXT_MODELS = {}
31
  UI_DEFAULT_TEXT_MODEL_KEY = None
32
  if GEMINI_TEXT_IS_READY:
33
  TEXT_MODELS["✨ Gemini 1.5 Flash (Narrate)"] = {"id": "gemini-1.5-flash-latest", "type": "gemini"}
34
+ if HF_TEXT_IS_READY:
 
35
  TEXT_MODELS["Mistral 7B (Narrate via HF)"] = {"id": "mistralai/Mistral-7B-Instruct-v0.2", "type": "hf_text"}
36
+ if TEXT_MODELS:
37
+ UI_DEFAULT_TEXT_MODEL_KEY = list(TEXT_MODELS.keys())[0]
38
+ if GEMINI_TEXT_IS_READY and "✨ Gemini 1.5 Flash (Narrate)" in TEXT_MODELS: UI_DEFAULT_TEXT_MODEL_KEY = "✨ Gemini 1.5 Flash (Narrate)"
39
+ elif HF_TEXT_IS_READY and "Mistral 7B (Narrate via HF)" in TEXT_MODELS: UI_DEFAULT_TEXT_MODEL_KEY = "Mistral 7B (Narrate via HF)"
40
+ else:
 
 
 
 
 
41
  TEXT_MODELS["No Text Models Configured"] = {"id": "dummy_text_error", "type": "none"}
42
  UI_DEFAULT_TEXT_MODEL_KEY = "No Text Models Configured"
43
 
 
45
  UI_DEFAULT_IMAGE_PROVIDER_KEY = None
46
  if DALLE_IMAGE_IS_READY:
47
  IMAGE_PROVIDERS["πŸ–ΌοΈ OpenAI DALL-E 3"] = "dalle_3"
 
48
  UI_DEFAULT_IMAGE_PROVIDER_KEY = "πŸ–ΌοΈ OpenAI DALL-E 3"
49
+ elif HF_IMAGE_IS_READY:
50
  IMAGE_PROVIDERS["🎑 HF - SDXL Base"] = "hf_sdxl_base"
 
 
51
  UI_DEFAULT_IMAGE_PROVIDER_KEY = "🎑 HF - SDXL Base"
 
52
  if not IMAGE_PROVIDERS:
53
  IMAGE_PROVIDERS["No Image Providers Configured"] = "none"
54
  UI_DEFAULT_IMAGE_PROVIDER_KEY = "No Image Providers Configured"
 
 
55
 
56
 
57
  # --- Gradio UI Theme and CSS ---
58
+ # (omega_theme and omega_css definitions remain THE SAME as the last full app.py version)
59
+ omega_theme = gr.themes.Base(font=[gr.themes.GoogleFont("Lexend Deca")], primary_hue=gr.themes.colors.purple).set(body_background_fill="#0F0F1A", block_background_fill="#1A1A2E", slider_color="#A020F0")
60
+ omega_css = """ /* ... Paste your full omega_css string here ...
61
+ body, .gradio-container .gr-button span { white-space: nowrap !important; overflow: hidden; text-overflow: ellipsis; display: inline-block; max-width: 90%; }
62
+ .gradio-container .gr-button { display: flex; align-items: center; justify-content: center; }
63
+ .gradio-container .gr-button svg { margin-right: 4px !important; }
64
+ */ """
65
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
 
67
  # --- Helper: Placeholder Image Creation ---
68
  def create_placeholder_image(text="Processing...", size=(512, 512), color="#23233A", text_color="#E0E0FF"):
 
83
  progress=gr.Progress(track_tqdm=True)
84
  ):
85
  start_time = time.time()
86
+ if not current_story_obj: current_story_obj = Story() # Ensure story object exists
87
 
88
  log_accumulator = [f"**πŸš€ Scene {current_story_obj.current_scene_number + 1} - {time.strftime('%H:%M:%S')}**"]
89
 
90
+ # --- Initialize values for the final return tuple ---
91
+ # These correspond to the `outputs` list of `engage_button.click()`
92
+ # Order: story_state_output, output_gallery, output_latest_scene_image,
93
+ # output_latest_scene_narrative, output_status_bar, output_interaction_log_markdown
94
+
95
+ # Get initial gallery state based on current story object
96
+ # This ensures that if we error out early, the gallery doesn't just disappear if it had items
97
+ current_gallery_items = current_story_obj.get_all_scenes_for_gallery_display()
98
+ if not current_gallery_items: # Handle initially empty story for gallery
99
+ placeholder_gallery_img = create_placeholder_image("Start Weaving!", size=(180,180), color="#1A1A2E")
100
+ current_gallery_items = [(placeholder_gallery_img, "Your StoryVerse awaits!")]
101
 
102
+ # These will be updated and form the basis of the final 'return'
103
+ ret_story_state = current_story_obj
104
+ ret_gallery = current_gallery_items
105
  ret_latest_image = None
106
  ret_latest_narrative_md_obj = gr.Markdown(value="## Processing...\nNarrative being woven...")
107
  ret_status_bar_html_obj = gr.HTML(value="<p class='processing_text status_text'>Processing...</p>")
108
+ # ret_log_md is built up
109
 
110
+ # Initial UI update via yield (buttons disabled by .then() chain)
111
  yield {
112
  output_status_bar: gr.HTML(value=f"<p class='processing_text status_text'>🌌 Weaving Scene {current_story_obj.current_scene_number + 1}...</p>"),
113
  output_latest_scene_image: gr.Image(value=create_placeholder_image("🎨 Conjuring visuals...")),
 
121
 
122
  # --- 1. Generate Narrative Text ---
123
  progress(0.1, desc="✍️ Crafting narrative...")
124
+ narrative_text_generated = f"Narrative Error: Init failed." # Default
125
+ # ... (Full narrative generation logic from your previous working app.py)
126
+ # ... (This part should call generate_text_gemini or generate_text_hf and update narrative_text_generated)
127
  text_model_info = TEXT_MODELS.get(text_model_key)
128
  if text_model_info and text_model_info["type"] != "none":
129
+ system_p = get_narrative_system_prompt("default"); prev_narrative = current_story_obj.get_last_scene_narrative(); user_p = format_narrative_user_prompt(scene_prompt_text, prev_narrative)
130
+ log_accumulator.append(f" Narrative: Using {text_model_key} ({text_model_info['id']}).")
 
 
131
  text_response = None
132
  if text_model_info["type"] == "gemini": text_response = generate_text_gemini(user_p, model_id=text_model_info["id"], system_prompt=system_p, max_tokens=768 if narrative_length.startswith("Detailed") else 400)
133
  elif text_model_info["type"] == "hf_text": text_response = generate_text_hf(user_p, model_id=text_model_info["id"], system_prompt=system_p, max_tokens=768 if narrative_length.startswith("Detailed") else 400)
134
  if text_response and text_response.success: narrative_text_generated = basic_text_cleanup(text_response.text); log_accumulator.append(f" Narrative: Success.")
135
  elif text_response: narrative_text_generated = f"**Narrative Error ({text_model_key}):** {text_response.error}"; log_accumulator.append(f" Narrative: FAILED - {text_response.error}")
136
  else: log_accumulator.append(f" Narrative: FAILED - No response from {text_model_key}.")
137
+ else: narrative_text_generated = "**Narrative Error:** Text model unavailable."; log_accumulator.append(f" Narrative: FAILED - Model '{text_model_key}' unavailable.")
138
+
139
  ret_latest_narrative_str_content = f"## Scene Idea: {scene_prompt_text}\n\n{narrative_text_generated}"
140
+ ret_latest_narrative_md_obj = gr.Markdown(value=ret_latest_narrative_str_content) # Prepare for final return
141
  yield { output_latest_scene_narrative: ret_latest_narrative_md_obj,
142
  output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator)) }
143
 
 
145
  progress(0.5, desc="🎨 Conjuring visuals...")
146
  image_generated_pil = None
147
  image_generation_error_message = None
148
+ # ... (Full image generation logic from your previous working app.py) ...
149
+ # ... (This part should call generate_image_dalle or generate_image_hf_model and update image_generated_pil)
150
  selected_image_provider_key_from_ui = image_provider_key
151
  selected_image_provider_type = IMAGE_PROVIDERS.get(selected_image_provider_key_from_ui)
 
152
  image_content_prompt_for_gen = narrative_text_generated if narrative_text_generated and "Error" not in narrative_text_generated else scene_prompt_text
153
+ quality_keyword = "ultra detailed, " if image_quality == "High Detail" else ("concept sketch, " if image_quality == "Sketch Concept" else "")
154
  full_image_prompt = format_image_generation_prompt(quality_keyword + image_content_prompt_for_gen[:350], image_style_dropdown, artist_style_text)
155
+ log_accumulator.append(f" Image: Attempting with provider key '{selected_image_provider_key_from_ui}' (maps to type '{selected_image_provider_type}').")
156
+ if selected_image_provider_type and selected_image_provider_type != "none": # Actual call logic
157
+ image_response = None # ... (call DALL-E or HF based on selected_image_provider_type)
 
158
  if selected_image_provider_type.startswith("dalle_"):
159
+ if DALLE_IMAGE_IS_READY: image_response = generate_image_dalle(full_image_prompt, model="dall-e-3" if selected_image_provider_type == "dalle_3" else "dall-e-2")
160
+ else: image_generation_error_message = "**Image Error:** DALL-E selected but not ready."
 
 
 
 
161
  elif selected_image_provider_type.startswith("hf_"):
162
+ if HF_IMAGE_IS_READY:
163
+ hf_model_id = "stabilityai/stable-diffusion-xl-base-1.0" # Default
164
+ if selected_image_provider_type == "hf_openjourney": hf_model_id = "prompthero/openjourney"
165
+ elif selected_image_provider_type == "hf_sd_1_5": hf_model_id = "runwayml/stable-diffusion-v1-5"
166
+ image_response = generate_image_hf_model(full_image_prompt, model_id=hf_model_id, negative_prompt=negative_prompt_text or COMMON_NEGATIVE_PROMPTS)
167
+ else: image_generation_error_message = "**Image Error:** HF Image selected but not ready."
168
+ # ... (process image_response)
169
+ if image_response and image_response.success: image_generated_pil = image_response.image; log_accumulator.append(" Image: Success.")
170
+ elif image_response: image_generation_error_message = f"**Image Error:** {image_response.error}"; log_accumulator.append(f" Image: FAILED - {image_response.error}")
171
+ elif not image_generation_error_message: image_generation_error_message = "**Image Error:** No response/unknown issue."
172
+ else: image_generation_error_message = "**Image Error:** No valid image provider."
 
 
 
 
 
173
 
174
  ret_latest_image = image_generated_pil if image_generated_pil else create_placeholder_image("Image Gen Failed", color="#401010")
175
  yield { output_latest_scene_image: gr.Image(value=ret_latest_image),
 
177
 
178
  # --- 3. Add Scene to Story Object ---
179
  final_scene_error = None
180
+ if image_generation_error_message and "**Narrative Error**" in narrative_text_generated : final_scene_error = f"Narrative: {narrative_text_generated.split('**')[-1].strip()} \nImage: {image_generation_error_message.split('**')[-1].strip()}"
181
  elif "**Narrative Error**" in narrative_text_generated: final_scene_error = narrative_text_generated
182
  elif image_generation_error_message: final_scene_error = image_generation_error_message
183
 
184
  current_story_obj.add_scene_from_elements(
185
  user_prompt=scene_prompt_text,
186
+ narrative_text=narrative_text_generated if "**Narrative Error**" not in narrative_text_generated else "(Narrative generation failed, see error log)",
187
  image=image_generated_pil,
188
  image_style_prompt=f"{image_style_dropdown}{f', by {artist_style_text}' if artist_style_text and artist_style_text.strip() else ''}",
189
  image_provider=selected_image_provider_key_from_ui,
190
  error_message=final_scene_error
191
  )
192
  ret_story_state = current_story_obj
193
+ log_accumulator.append(f" Scene {current_story_obj.current_scene_number} processed and added to story object.")
194
 
195
  # --- 4. Prepare Final Values for Return Tuple ---
196
+ gallery_tuples_final = current_story_obj.get_all_scenes_for_gallery_display()
 
197
  processed_gallery_tuples = []
198
+ if not gallery_tuples_final: # Ensure gallery is not empty for Gradio if story just started
199
+ placeholder_gallery_img = create_placeholder_image("Your Story Begins!", size=(180,180), color="#1A1A2E")
200
+ processed_gallery_tuples = [(placeholder_gallery_img, "First scene pending or just added!")]
201
+ else:
202
+ for img_item, cap_text in gallery_tuples_final:
203
+ if img_item is None:
204
+ gallery_placeholder = create_placeholder_image(f"S{cap_text.split(':')[0][1:]}\nError/NoImg", size=(180,180), color="#2A2A4A")
205
+ processed_gallery_tuples.append((gallery_placeholder, cap_text))
206
+ else:
207
+ processed_gallery_tuples.append((img_item, cap_text))
208
  ret_gallery = processed_gallery_tuples
209
 
210
 
 
227
 
228
  current_total_time = time.time() - start_time
229
  log_accumulator.append(f" Cycle ended at {time.strftime('%H:%M:%S')}. Total time: {current_total_time:.2f}s")
230
+ ret_log_md = gr.Markdown(value="\n".join(log_accumulator)) # Prepare final log content
231
 
232
+ # Final return for the .click() handler's `outputs` list
233
  return (
234
+ ret_story_state,
235
+ ret_gallery, # This is now processed_gallery_tuples
236
+ ret_latest_image, # This is the PIL image or placeholder
237
+ ret_latest_narrative_md_obj, # This is a gr.Markdown object
238
+ ret_status_bar_html_obj, # This is a gr.HTML object
239
+ ret_log_md # This is a gr.Markdown object
240
  )
241
 
242
  def clear_story_state_ui_wrapper():
243
+ print("DEBUG: clear_story_state_ui_wrapper called")
244
+ new_story = Story()
245
+ placeholder_img = create_placeholder_image("Your StoryVerse is a blank canvas...", color="#1A1A2E", text_color="#A0A0C0")
246
+ cleared_gallery = [(placeholder_img, "Your StoryVerse is new and untold...")]
247
+ initial_narrative = "## ✨ A New Story Begins ✨\nDescribe your first scene idea..."
248
  status_msg = "<p class='processing_text status_text'>πŸ“œ Story Cleared.</p>"
249
+ return (new_story, cleared_gallery, None, gr.Markdown(initial_narrative), gr.HTML(status_msg), "Log Cleared.", "")
250
 
251
  def surprise_me_func():
252
+ print("DEBUG: surprise_me_func called")
253
  themes = ["Cosmic Horror", "Solarpunk Utopia", "Mythic Fantasy", "Noir Detective"]; actions = ["unearths an artifact", "negotiates"]; settings = ["on a rogue planet", "in a city in a tree"]; prompt = f"A protagonist {random.choice(actions)} {random.choice(settings)}. Theme: {random.choice(themes)}."; style = random.choice(list(STYLE_PRESETS.keys())); artist = random.choice(["H.R. Giger", "Moebius", ""]*2)
254
+ print(f"DEBUG: surprise_me_func returning: Prompt='{prompt}', Style='{style}', Artist='{artist}'")
255
  return prompt, style, artist
256
 
257
  def disable_buttons_for_processing():
 
267
  # Define Python variables for UI components
268
  story_state_output = gr.State(Story())
269
 
270
+ gr.Markdown("<div align='center'><h1>✨ StoryVerse Omega ✨</h1>\n<h3>Craft Immersive Multimodal Worlds with AI</h3></div>")
271
+ gr.HTML("<div class='important-note'><strong>Welcome, Worldsmith!</strong> Describe your vision, choose your style, and let Omega help you weave captivating scenes with narrative and imagery. Ensure API keys (<code>STORYVERSE_...</code>) are correctly set in Space Secrets!</div>")
272
+
273
+ with gr.Accordion("πŸ”§ AI Services Status & Info", open=False):
274
+ status_text_list = []; text_llm_ok = (GEMINI_TEXT_IS_READY or HF_TEXT_IS_READY); image_gen_ok = (DALLE_IMAGE_IS_READY or HF_IMAGE_IS_READY)
275
+ if not text_llm_ok and not image_gen_ok: status_text_list.append("<p style='color:#FCA5A5;font-weight:bold;'>⚠️ CRITICAL: NO AI SERVICES CONFIGURED.</p>")
276
+ else:
277
+ if text_llm_ok: status_text_list.append("<p style='color:#A7F3D0;'>βœ… Text Generation Ready.</p>")
278
+ else: status_text_list.append("<p style='color:#FCD34D;'>⚠️ Text Generation NOT Ready.</p>")
279
+ if image_gen_ok: status_text_list.append("<p style='color:#A7F3D0;'>βœ… Image Generation Ready.</p>")
280
+ else: status_text_list.append("<p style='color:#FCD34D;'>⚠️ Image Generation NOT Ready.</p>")
281
+ gr.HTML("".join(status_text_list))
282
+
283
+ with gr.Row(equal_height=False, variant="panel"):
284
  with gr.Column(scale=7, min_width=450):
285
  gr.Markdown("### πŸ’‘ **Craft Your Scene**", elem_classes="input-section-header")
286
  with gr.Group(): scene_prompt_input = gr.Textbox(lines=7, label="Scene Vision (Description, Dialogue, Action):", placeholder="e.g., Amidst swirling cosmic dust...")
 
297
  image_quality_dropdown = gr.Dropdown(["Standard", "High Detail", "Sketch Concept"], value="Standard", label="Image Detail/Style")
298
  with gr.Row(elem_classes=["compact-row"], equal_height=True):
299
  engage_button = gr.Button("🌌 Weave!", variant="primary", scale=3, icon="✨") # Shorter text
300
+ surprise_button = gr.Button("🎲 Surprise!", variant="secondary", scale=1, icon="🎁")
301
  clear_story_button = gr.Button("πŸ—‘οΈ New", variant="stop", scale=1, icon="♻️") # Shorter text
302
  output_status_bar = gr.HTML(value="<p class='processing_text status_text'>Ready to weave your first masterpiece!</p>")
303
 
 
304
  with gr.Column(scale=10, min_width=700):
305
  gr.Markdown("### πŸ–ΌοΈ **Your StoryVerse**", elem_classes="output-section-header")
306
  with gr.Tabs():
 
313
  with gr.Accordion("Interaction Log", open=False):
314
  output_interaction_log_markdown = gr.Markdown("Log...")
315
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
316
  # Event Handlers
317
+ engage_button.click(fn=disable_buttons_for_processing, outputs=[engage_button, surprise_button], queue=False)\
318
  .then(fn=add_scene_to_story_orchestrator,
319
  inputs=[story_state_output, scene_prompt_input, image_style_input, artist_style_input, negative_prompt_input, text_model_dropdown, image_provider_dropdown, narrative_length_dropdown, image_quality_dropdown],
320
  outputs=[story_state_output, output_gallery, output_latest_scene_image, output_latest_scene_narrative, output_status_bar, output_interaction_log_markdown])\
 
326
  surprise_button.click(fn=surprise_me_func,
327
  outputs=[scene_prompt_input, image_style_input, artist_style_input])
328
 
329
+ gr.Examples(
330
+ examples=[
331
+ ["A lone, weary traveler on a mechanical steed crosses a vast, crimson desert under twin suns. Dust devils dance in the distance.", "Sci-Fi Western", "Moebius", "greenery, water, modern city"],
332
+ ["Deep within an ancient, bioluminescent forest, a hidden civilization of sentient fungi perform a mystical ritual around a pulsating crystal.", "Psychedelic Fantasy", "Alex Grey", "technology, buildings, roads"],
333
+ ["A child sits on a crescent moon, fishing for stars in a swirling nebula. A friendly space whale swims nearby.", "Whimsical Cosmic", "James Jean", "realistic, dark, scary"],
334
+ ["A grand, baroque library where the books fly freely and whisper forgotten lore to those who listen closely.", "Magical Realism", "Remedios Varo", "minimalist, simple, technology"]
335
+ ],
336
+ inputs=[scene_prompt_input, image_style_input, artist_style_input, negative_prompt_input],
337
+ label="🌌 Example Universes to Weave 🌌",
338
+ )
339
+ gr.HTML("<div style='text-align:center; margin-top:30px; padding-bottom:20px;'><p style='font-size:0.9em; color:#8080A0;'>✨ StoryVerse Omegaβ„’ - Weaving Worlds with Words and Pixels ✨</p></div>")
340
+
341
  # --- Entry Point ---
342
  if __name__ == "__main__":
343
  print("="*80); print("✨ StoryVerse Omega (Full App with Fixes) Launching... ✨")
344
  print(f" Gemini Text Ready: {GEMINI_TEXT_IS_READY}"); print(f" HF Text Ready: {HF_TEXT_IS_READY}")
345
  print(f" DALL-E Image Ready: {DALLE_IMAGE_IS_READY}"); print(f" HF Image API Ready: {HF_IMAGE_IS_READY}")
346
+ if not (GEMINI_TEXT_IS_READY or HF_TEXT_IS_READY) or not (DALLE_IMAGE_IS_READY or HF_IMAGE_IS_READY):
347
+ print(" πŸ”΄ WARNING: Not all primary/fallback AI services configured.")
348
  print(f" Default Text Model: {UI_DEFAULT_TEXT_MODEL_KEY}"); print(f" Default Image Provider: {UI_DEFAULT_IMAGE_PROVIDER_KEY}")
349
  print("="*80)
350
  story_weaver_demo.launch(debug=True, server_name="0.0.0.0", share=False)