Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -10,7 +10,6 @@ import traceback
|
|
10 |
# --- Core Logic Imports ---
|
11 |
from core.llm_services import initialize_text_llms, is_gemini_text_ready, is_hf_text_ready, generate_text_gemini, generate_text_hf
|
12 |
from core.image_services import initialize_image_llms, is_hf_image_api_ready, generate_image_hf_model, ImageGenResponse
|
13 |
-
# Removed STABILITY_API_CONFIGURED, OPENAI_DALLE_CONFIGURED, generate_image_stabilityai, generate_image_dalle
|
14 |
from core.story_engine import Story, Scene
|
15 |
from prompts.narrative_prompts import get_narrative_system_prompt, format_narrative_user_prompt
|
16 |
from prompts.image_style_prompts import STYLE_PRESETS, COMMON_NEGATIVE_PROMPTS, format_image_generation_prompt
|
@@ -18,18 +17,19 @@ from core.utils import basic_text_cleanup
|
|
18 |
|
19 |
# --- Initialize Services ---
|
20 |
initialize_text_llms()
|
21 |
-
initialize_image_llms() # This
|
22 |
|
23 |
# --- Get API Readiness Status ---
|
24 |
GEMINI_TEXT_IS_READY = is_gemini_text_ready()
|
25 |
HF_TEXT_IS_READY = is_hf_text_ready()
|
26 |
-
HF_IMAGE_IS_READY = is_hf_image_api_ready() # Primary image status
|
27 |
|
28 |
# --- Application Configuration (Models, Defaults) ---
|
29 |
TEXT_MODELS = {}
|
30 |
UI_DEFAULT_TEXT_MODEL_KEY = None
|
31 |
if GEMINI_TEXT_IS_READY:
|
32 |
TEXT_MODELS["β¨ Gemini 1.5 Flash (Narrate)"] = {"id": "gemini-1.5-flash-latest", "type": "gemini"}
|
|
|
33 |
if HF_TEXT_IS_READY:
|
34 |
TEXT_MODELS["Mistral 7B (Narrate via HF)"] = {"id": "mistralai/Mistral-7B-Instruct-v0.2", "type": "hf_text"}
|
35 |
TEXT_MODELS["Gemma 2B (Narrate via HF)"] = {"id": "google/gemma-2b-it", "type": "hf_text"}
|
@@ -47,7 +47,7 @@ UI_DEFAULT_IMAGE_PROVIDER_KEY = None
|
|
47 |
if HF_IMAGE_IS_READY:
|
48 |
IMAGE_PROVIDERS["π‘ HF - Stable Diffusion XL Base"] = "hf_sdxl_base"
|
49 |
IMAGE_PROVIDERS["π HF - OpenJourney (Midjourney-like)"] = "hf_openjourney"
|
50 |
-
IMAGE_PROVIDERS["π HF - Stable Diffusion v1.5"] = "hf_sd_1_5"
|
51 |
UI_DEFAULT_IMAGE_PROVIDER_KEY = "π‘ HF - Stable Diffusion XL Base"
|
52 |
else:
|
53 |
IMAGE_PROVIDERS["No Image Providers Configured (HF Token needed)"] = "none"
|
@@ -126,9 +126,9 @@ def add_scene_to_story_orchestrator(
|
|
126 |
ret_latest_image = None
|
127 |
ret_latest_narrative_md_obj = gr.Markdown(value="## Processing...\nNarrative being woven...")
|
128 |
ret_status_bar_html_obj = gr.HTML(value="<p class='processing_text status_text'>Processing...</p>")
|
129 |
-
# ret_log_md
|
130 |
|
131 |
-
# Initial yield for UI updates (buttons
|
132 |
yield {
|
133 |
output_status_bar: gr.HTML(value=f"<p class='processing_text status_text'>π Weaving Scene {current_story_obj.current_scene_number + 1}...</p>"),
|
134 |
output_latest_scene_image: gr.Image(value=create_placeholder_image("π¨ Conjuring visuals...")),
|
@@ -154,11 +154,11 @@ def add_scene_to_story_orchestrator(
|
|
154 |
elif text_model_info["type"] == "hf_text": text_response = generate_text_hf(user_p, model_id=text_model_info["id"], system_prompt=system_p, max_tokens=768 if narrative_length.startswith("Detailed") else 400)
|
155 |
if text_response and text_response.success: narrative_text_generated = basic_text_cleanup(text_response.text); log_accumulator.append(f" Narrative: Success.")
|
156 |
elif text_response: narrative_text_generated = f"**Narrative Error ({text_model_key}):** {text_response.error}"; log_accumulator.append(f" Narrative: FAILED - {text_response.error}")
|
157 |
-
else: log_accumulator.append(f" Narrative: FAILED - No response
|
158 |
-
else: narrative_text_generated = "**Narrative Error:** Selected text model not available or misconfigured."; log_accumulator.append(f" Narrative: FAILED - Model '{text_model_key}'
|
159 |
|
160 |
ret_latest_narrative_str_content = f"## Scene Idea: {scene_prompt_text}\n\n{narrative_text_generated}"
|
161 |
-
ret_latest_narrative_md_obj = gr.Markdown(value=ret_latest_narrative_str_content)
|
162 |
yield { output_latest_scene_narrative: ret_latest_narrative_md_obj,
|
163 |
output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator)) }
|
164 |
|
@@ -166,8 +166,8 @@ def add_scene_to_story_orchestrator(
|
|
166 |
progress(0.5, desc="π¨ Conjuring visuals...")
|
167 |
image_generated_pil = None
|
168 |
image_generation_error_message = None
|
169 |
-
selected_image_provider_key_from_ui = image_provider_key
|
170 |
-
selected_image_provider_type = IMAGE_PROVIDERS.get(selected_image_provider_key_from_ui)
|
171 |
|
172 |
image_content_prompt_for_gen = narrative_text_generated if narrative_text_generated and "Error" not in narrative_text_generated else scene_prompt_text
|
173 |
quality_keyword = "ultra detailed, intricate, masterpiece, " if image_quality == "High Detail" else ("concept sketch, line art, " if image_quality == "Sketch Concept" else "")
|
@@ -175,39 +175,22 @@ def add_scene_to_story_orchestrator(
|
|
175 |
log_accumulator.append(f" Image: Using provider key '{selected_image_provider_key_from_ui}' (maps to type '{selected_image_provider_type}'). Style: {image_style_dropdown}.")
|
176 |
|
177 |
if selected_image_provider_type and selected_image_provider_type != "none":
|
178 |
-
if not HF_IMAGE_IS_READY and selected_image_provider_type.startswith("hf_"):
|
179 |
image_generation_error_message = "**Image Error:** Hugging Face Image API not configured (check STORYVERSE_HF_TOKEN)."
|
180 |
log_accumulator.append(f" Image: FAILED - {image_generation_error_message}")
|
181 |
else:
|
182 |
-
image_response = None
|
183 |
-
hf_model_id_to_call = None
|
184 |
if selected_image_provider_type == "hf_sdxl_base": hf_model_id_to_call = "stabilityai/stable-diffusion-xl-base-1.0"
|
185 |
elif selected_image_provider_type == "hf_openjourney": hf_model_id_to_call = "prompthero/openjourney"; img_width, img_height = 512, 512
|
186 |
elif selected_image_provider_type == "hf_sd_1_5": hf_model_id_to_call = "runwayml/stable-diffusion-v1-5"; img_width, img_height = 512, 512
|
187 |
-
# Add other specific HF model mappings here
|
188 |
|
189 |
-
if hf_model_id_to_call:
|
190 |
-
image_response = generate_image_hf_model(
|
191 |
-
|
192 |
-
negative_prompt=negative_prompt_text or COMMON_NEGATIVE_PROMPTS,
|
193 |
-
width=img_width if 'img_width' in locals() else 768,
|
194 |
-
height=img_height if 'img_height' in locals() else 768
|
195 |
-
)
|
196 |
-
# Add elif for other providers like StabilityAI or DALL-E if you re-enable them
|
197 |
-
# elif selected_image_provider_type == "stability_ai": ...
|
198 |
-
else:
|
199 |
-
image_generation_error_message = f"**Image Error:** Provider type '{selected_image_provider_type}' not handled."
|
200 |
-
log_accumulator.append(f" Image: FAILED - {image_generation_error_message}")
|
201 |
|
202 |
-
if image_response and image_response.success:
|
203 |
-
|
204 |
-
|
205 |
-
elif image_response:
|
206 |
-
image_generation_error_message = f"**Image Error ({image_response.provider} - {image_response.model_id_used}):** {image_response.error}"
|
207 |
-
log_accumulator.append(f" Image: FAILED - {image_response.error}")
|
208 |
-
elif not image_generation_error_message: # If no response and no specific error yet
|
209 |
-
image_generation_error_message = f"**Image Error:** No response from image service for {selected_image_provider_key_from_ui}."
|
210 |
-
log_accumulator.append(f" Image: FAILED - No response object.")
|
211 |
else:
|
212 |
image_generation_error_message = "**Image Error:** No image provider selected or configured."
|
213 |
log_accumulator.append(f" Image: FAILED - Provider key '{selected_image_provider_key_from_ui}' not found or type is 'none'.")
|
@@ -227,7 +210,7 @@ def add_scene_to_story_orchestrator(
|
|
227 |
narrative_text=narrative_text_generated if "**Narrative Error**" not in narrative_text_generated else "(Narrative gen failed)",
|
228 |
image=image_generated_pil,
|
229 |
image_style_prompt=f"{image_style_dropdown}{f', by {artist_style_text}' if artist_style_text and artist_style_text.strip() else ''}",
|
230 |
-
image_provider=selected_image_provider_key_from_ui,
|
231 |
error_message=final_scene_error
|
232 |
)
|
233 |
ret_story_state = current_story_obj
|
@@ -252,31 +235,22 @@ def add_scene_to_story_orchestrator(
|
|
252 |
ret_status_bar_html_obj = gr.HTML(value=f"<p class='error_text status_text'>β UNEXPECTED ERROR: {type(e).__name__}. Check logs.</p>")
|
253 |
ret_latest_narrative_md_obj = gr.Markdown(value=f"## Unexpected Error\n{type(e).__name__}: {e}\nSee log for details.")
|
254 |
|
255 |
-
# Final log update happens as part of the main return now
|
256 |
current_total_time = time.time() - start_time
|
257 |
log_accumulator.append(f" Cycle ended at {time.strftime('%H:%M:%S')}. Total time: {current_total_time:.2f}s")
|
258 |
ret_log_md = gr.Markdown(value="\n".join(log_accumulator))
|
259 |
|
260 |
# This is the FINAL return. It must be a tuple matching the `outputs` list of engage_button.click()
|
261 |
return (
|
262 |
-
ret_story_state,
|
263 |
-
|
264 |
-
ret_latest_image,
|
265 |
-
ret_latest_narrative_md_obj,
|
266 |
-
ret_status_bar_html_obj,
|
267 |
-
ret_log_md
|
268 |
)
|
269 |
|
270 |
def clear_story_state_ui_wrapper():
|
271 |
-
new_story = Story()
|
272 |
-
|
273 |
-
cleared_gallery = [(placeholder_img, "Your StoryVerse is new and untold...")]
|
274 |
-
initial_narrative = "## β¨ A New Story Begins β¨\nDescribe your first scene idea..."
|
275 |
-
status_msg = "<p class='processing_text status_text'>π Story Cleared.</p>"
|
276 |
-
return (new_story, cleared_gallery, None, gr.Markdown(initial_narrative), gr.HTML(status_msg), "Log Cleared.", "")
|
277 |
|
278 |
def surprise_me_func():
|
279 |
-
themes = ["Cosmic Horror", "Solarpunk Utopia", "Mythic Fantasy"
|
280 |
|
281 |
def disable_buttons_for_processing():
|
282 |
return gr.Button(interactive=False), gr.Button(interactive=False)
|
@@ -292,13 +266,13 @@ with gr.Blocks(theme=omega_theme, css=omega_css, title="β¨ StoryVerse Omega β¨
|
|
292 |
gr.HTML("<div class='important-note'><strong>Welcome, Worldsmith!</strong> Describe your vision, choose your style, and let Omega help you weave captivating scenes with narrative and imagery. Ensure API keys (<code>STORYVERSE_...</code>) are correctly set in Space Secrets!</div>")
|
293 |
|
294 |
with gr.Accordion("π§ AI Services Status & Info", open=False):
|
295 |
-
status_text_list = []; text_llm_ok, image_gen_ok = (GEMINI_TEXT_IS_READY or HF_TEXT_IS_READY), (
|
296 |
if not text_llm_ok and not image_gen_ok: status_text_list.append("<p style='color:#FCA5A5;font-weight:bold;'>β οΈ CRITICAL: NO AI SERVICES CONFIGURED.</p>")
|
297 |
else:
|
298 |
if text_llm_ok: status_text_list.append("<p style='color:#A7F3D0;'>β
Text Generation Service(s) Ready.</p>")
|
299 |
else: status_text_list.append("<p style='color:#FCD34D;'>β οΈ Text Generation Service(s) NOT Ready.</p>")
|
300 |
-
if image_gen_ok: status_text_list.append("<p style='color:#A7F3D0;'>β
Image Generation Service(
|
301 |
-
else: status_text_list.append("<p style='color:#FCD34D;'>β οΈ Image Generation Service(
|
302 |
gr.HTML("".join(status_text_list))
|
303 |
|
304 |
with gr.Row(equal_height=False, variant="panel"):
|
@@ -315,7 +289,7 @@ with gr.Blocks(theme=omega_theme, css=omega_css, title="β¨ StoryVerse Omega β¨
|
|
315 |
with gr.Accordion("βοΈ Advanced AI Configuration", open=False):
|
316 |
with gr.Group():
|
317 |
text_model_dropdown = gr.Dropdown(choices=list(TEXT_MODELS.keys()), value=UI_DEFAULT_TEXT_MODEL_KEY, label="Narrative AI Engine")
|
318 |
-
image_provider_dropdown = gr.Dropdown(choices=list(IMAGE_PROVIDERS.keys()), value=UI_DEFAULT_IMAGE_PROVIDER_KEY, label="Visual AI Engine")
|
319 |
with gr.Row():
|
320 |
narrative_length_dropdown = gr.Dropdown(["Short (1 paragraph)", "Medium (2-3 paragraphs)", "Detailed (4+ paragraphs)"], value="Medium (2-3 paragraphs)", label="Narrative Detail")
|
321 |
image_quality_dropdown = gr.Dropdown(["Standard", "High Detail", "Sketch Concept"], value="Standard", label="Image Detail/Style")
|
@@ -337,34 +311,24 @@ with gr.Blocks(theme=omega_theme, css=omega_css, title="β¨ StoryVerse Omega β¨
|
|
337 |
with gr.Accordion(label="Developer Interaction Log", open=False):
|
338 |
output_interaction_log_markdown = gr.Markdown("Log will appear here...")
|
339 |
|
340 |
-
# Chained event handling for engage_button
|
341 |
engage_button.click(
|
342 |
-
fn=disable_buttons_for_processing,
|
343 |
-
inputs=None,
|
344 |
-
outputs=[engage_button, surprise_button],
|
345 |
-
queue=False
|
346 |
).then(
|
347 |
fn=add_scene_to_story_orchestrator,
|
348 |
inputs=[
|
349 |
-
story_state_output, scene_prompt_input,
|
350 |
-
|
351 |
-
text_model_dropdown, image_provider_dropdown,
|
352 |
-
narrative_length_dropdown, image_quality_dropdown
|
353 |
],
|
354 |
outputs=[
|
355 |
story_state_output, output_gallery, output_latest_scene_image,
|
356 |
output_latest_scene_narrative, output_status_bar, output_interaction_log_markdown
|
357 |
]
|
358 |
).then(
|
359 |
-
fn=enable_buttons_after_processing,
|
360 |
-
inputs=None,
|
361 |
-
outputs=[engage_button, surprise_button],
|
362 |
-
queue=False
|
363 |
)
|
364 |
|
365 |
clear_story_button.click(
|
366 |
-
fn=clear_story_state_ui_wrapper,
|
367 |
-
inputs=[],
|
368 |
outputs=[
|
369 |
story_state_output, output_gallery, output_latest_scene_image,
|
370 |
output_latest_scene_narrative, output_status_bar, output_interaction_log_markdown,
|
@@ -372,12 +336,11 @@ with gr.Blocks(theme=omega_theme, css=omega_css, title="β¨ StoryVerse Omega β¨
|
|
372 |
]
|
373 |
)
|
374 |
surprise_button.click(
|
375 |
-
fn=surprise_me_func,
|
376 |
-
inputs=[],
|
377 |
outputs=[scene_prompt_input, image_style_input, artist_style_input]
|
378 |
)
|
379 |
gr.Examples(
|
380 |
-
examples=[
|
381 |
["A lone, weary traveler on a mechanical steed crosses a vast, crimson desert under twin suns. Dust devils dance in the distance.", "Sci-Fi Western", "Moebius", "greenery, water, modern city"],
|
382 |
["Deep within an ancient, bioluminescent forest, a hidden civilization of sentient fungi perform a mystical ritual around a pulsating crystal.", "Psychedelic Fantasy", "Alex Grey", "technology, buildings, roads"],
|
383 |
["A child sits on a crescent moon, fishing for stars in a swirling nebula. A friendly space whale swims nearby.", "Whimsical Cosmic", "James Jean", "realistic, dark, scary"],
|
@@ -391,9 +354,11 @@ with gr.Blocks(theme=omega_theme, css=omega_css, title="β¨ StoryVerse Omega β¨
|
|
391 |
# --- Entry Point ---
|
392 |
if __name__ == "__main__":
|
393 |
print("="*80); print("β¨ StoryVerse Omegaβ’ Launching... β¨")
|
394 |
-
print(f"
|
395 |
-
|
396 |
-
|
|
|
|
|
397 |
print(f" Default Text Model: {UI_DEFAULT_TEXT_MODEL_KEY}"); print(f" Default Image Provider: {UI_DEFAULT_IMAGE_PROVIDER_KEY}")
|
398 |
print("="*80)
|
399 |
story_weaver_demo.launch(debug=True, server_name="0.0.0.0", share=False)
|
|
|
10 |
# --- Core Logic Imports ---
|
11 |
from core.llm_services import initialize_text_llms, is_gemini_text_ready, is_hf_text_ready, generate_text_gemini, generate_text_hf
|
12 |
from core.image_services import initialize_image_llms, is_hf_image_api_ready, generate_image_hf_model, ImageGenResponse
|
|
|
13 |
from core.story_engine import Story, Scene
|
14 |
from prompts.narrative_prompts import get_narrative_system_prompt, format_narrative_user_prompt
|
15 |
from prompts.image_style_prompts import STYLE_PRESETS, COMMON_NEGATIVE_PROMPTS, format_image_generation_prompt
|
|
|
17 |
|
18 |
# --- Initialize Services ---
|
19 |
initialize_text_llms()
|
20 |
+
initialize_image_llms() # This now primarily initializes based on HF_TOKEN for images
|
21 |
|
22 |
# --- Get API Readiness Status ---
|
23 |
GEMINI_TEXT_IS_READY = is_gemini_text_ready()
|
24 |
HF_TEXT_IS_READY = is_hf_text_ready()
|
25 |
+
HF_IMAGE_IS_READY = is_hf_image_api_ready() # Primary image status from image_services.py
|
26 |
|
27 |
# --- Application Configuration (Models, Defaults) ---
|
28 |
TEXT_MODELS = {}
|
29 |
UI_DEFAULT_TEXT_MODEL_KEY = None
|
30 |
if GEMINI_TEXT_IS_READY:
|
31 |
TEXT_MODELS["β¨ Gemini 1.5 Flash (Narrate)"] = {"id": "gemini-1.5-flash-latest", "type": "gemini"}
|
32 |
+
TEXT_MODELS["Legacy Gemini 1.0 Pro (Narrate)"] = {"id": "gemini-1.0-pro-latest", "type": "gemini"}
|
33 |
if HF_TEXT_IS_READY:
|
34 |
TEXT_MODELS["Mistral 7B (Narrate via HF)"] = {"id": "mistralai/Mistral-7B-Instruct-v0.2", "type": "hf_text"}
|
35 |
TEXT_MODELS["Gemma 2B (Narrate via HF)"] = {"id": "google/gemma-2b-it", "type": "hf_text"}
|
|
|
47 |
if HF_IMAGE_IS_READY:
|
48 |
IMAGE_PROVIDERS["π‘ HF - Stable Diffusion XL Base"] = "hf_sdxl_base"
|
49 |
IMAGE_PROVIDERS["π HF - OpenJourney (Midjourney-like)"] = "hf_openjourney"
|
50 |
+
IMAGE_PROVIDERS["π HF - Stable Diffusion v1.5"] = "hf_sd_1_5"
|
51 |
UI_DEFAULT_IMAGE_PROVIDER_KEY = "π‘ HF - Stable Diffusion XL Base"
|
52 |
else:
|
53 |
IMAGE_PROVIDERS["No Image Providers Configured (HF Token needed)"] = "none"
|
|
|
126 |
ret_latest_image = None
|
127 |
ret_latest_narrative_md_obj = gr.Markdown(value="## Processing...\nNarrative being woven...")
|
128 |
ret_status_bar_html_obj = gr.HTML(value="<p class='processing_text status_text'>Processing...</p>")
|
129 |
+
# ret_log_md is built up
|
130 |
|
131 |
+
# Initial yield for UI updates (buttons handled by .then() chain)
|
132 |
yield {
|
133 |
output_status_bar: gr.HTML(value=f"<p class='processing_text status_text'>π Weaving Scene {current_story_obj.current_scene_number + 1}...</p>"),
|
134 |
output_latest_scene_image: gr.Image(value=create_placeholder_image("π¨ Conjuring visuals...")),
|
|
|
154 |
elif text_model_info["type"] == "hf_text": text_response = generate_text_hf(user_p, model_id=text_model_info["id"], system_prompt=system_p, max_tokens=768 if narrative_length.startswith("Detailed") else 400)
|
155 |
if text_response and text_response.success: narrative_text_generated = basic_text_cleanup(text_response.text); log_accumulator.append(f" Narrative: Success.")
|
156 |
elif text_response: narrative_text_generated = f"**Narrative Error ({text_model_key}):** {text_response.error}"; log_accumulator.append(f" Narrative: FAILED - {text_response.error}")
|
157 |
+
else: log_accumulator.append(f" Narrative: FAILED - No response from {text_model_key}.")
|
158 |
+
else: narrative_text_generated = "**Narrative Error:** Selected text model not available or misconfigured."; log_accumulator.append(f" Narrative: FAILED - Model '{text_model_key}' unavailable.")
|
159 |
|
160 |
ret_latest_narrative_str_content = f"## Scene Idea: {scene_prompt_text}\n\n{narrative_text_generated}"
|
161 |
+
ret_latest_narrative_md_obj = gr.Markdown(value=ret_latest_narrative_str_content)
|
162 |
yield { output_latest_scene_narrative: ret_latest_narrative_md_obj,
|
163 |
output_interaction_log_markdown: gr.Markdown(value="\n".join(log_accumulator)) }
|
164 |
|
|
|
166 |
progress(0.5, desc="π¨ Conjuring visuals...")
|
167 |
image_generated_pil = None
|
168 |
image_generation_error_message = None
|
169 |
+
selected_image_provider_key_from_ui = image_provider_key
|
170 |
+
selected_image_provider_type = IMAGE_PROVIDERS.get(selected_image_provider_key_from_ui)
|
171 |
|
172 |
image_content_prompt_for_gen = narrative_text_generated if narrative_text_generated and "Error" not in narrative_text_generated else scene_prompt_text
|
173 |
quality_keyword = "ultra detailed, intricate, masterpiece, " if image_quality == "High Detail" else ("concept sketch, line art, " if image_quality == "Sketch Concept" else "")
|
|
|
175 |
log_accumulator.append(f" Image: Using provider key '{selected_image_provider_key_from_ui}' (maps to type '{selected_image_provider_type}'). Style: {image_style_dropdown}.")
|
176 |
|
177 |
if selected_image_provider_type and selected_image_provider_type != "none":
|
178 |
+
if not HF_IMAGE_IS_READY and selected_image_provider_type.startswith("hf_"):
|
179 |
image_generation_error_message = "**Image Error:** Hugging Face Image API not configured (check STORYVERSE_HF_TOKEN)."
|
180 |
log_accumulator.append(f" Image: FAILED - {image_generation_error_message}")
|
181 |
else:
|
182 |
+
image_response = None; hf_model_id_to_call = None; img_width, img_height = 768, 768 # Defaults
|
|
|
183 |
if selected_image_provider_type == "hf_sdxl_base": hf_model_id_to_call = "stabilityai/stable-diffusion-xl-base-1.0"
|
184 |
elif selected_image_provider_type == "hf_openjourney": hf_model_id_to_call = "prompthero/openjourney"; img_width, img_height = 512, 512
|
185 |
elif selected_image_provider_type == "hf_sd_1_5": hf_model_id_to_call = "runwayml/stable-diffusion-v1-5"; img_width, img_height = 512, 512
|
|
|
186 |
|
187 |
+
if hf_model_id_to_call:
|
188 |
+
image_response = generate_image_hf_model(full_image_prompt, model_id=hf_model_id_to_call, negative_prompt=negative_prompt_text or COMMON_NEGATIVE_PROMPTS, width=img_width, height=img_height)
|
189 |
+
else: image_generation_error_message = f"**Image Error:** Provider type '{selected_image_provider_type}' not handled for image generation."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
190 |
|
191 |
+
if image_response and image_response.success: image_generated_pil = image_response.image; log_accumulator.append(f" Image: Success from HF model {image_response.model_id_used or hf_model_id_to_call}.")
|
192 |
+
elif image_response: image_generation_error_message = f"**Image Error (HF - {image_response.model_id_used or hf_model_id_to_call}):** {image_response.error}"; log_accumulator.append(f" Image: FAILED - {image_response.error}")
|
193 |
+
elif not image_generation_error_message: image_generation_error_message = f"**Image Error:** No response from HF image service."; log_accumulator.append(f" Image: FAILED - No response object.")
|
|
|
|
|
|
|
|
|
|
|
|
|
194 |
else:
|
195 |
image_generation_error_message = "**Image Error:** No image provider selected or configured."
|
196 |
log_accumulator.append(f" Image: FAILED - Provider key '{selected_image_provider_key_from_ui}' not found or type is 'none'.")
|
|
|
210 |
narrative_text=narrative_text_generated if "**Narrative Error**" not in narrative_text_generated else "(Narrative gen failed)",
|
211 |
image=image_generated_pil,
|
212 |
image_style_prompt=f"{image_style_dropdown}{f', by {artist_style_text}' if artist_style_text and artist_style_text.strip() else ''}",
|
213 |
+
image_provider=selected_image_provider_key_from_ui,
|
214 |
error_message=final_scene_error
|
215 |
)
|
216 |
ret_story_state = current_story_obj
|
|
|
235 |
ret_status_bar_html_obj = gr.HTML(value=f"<p class='error_text status_text'>β UNEXPECTED ERROR: {type(e).__name__}. Check logs.</p>")
|
236 |
ret_latest_narrative_md_obj = gr.Markdown(value=f"## Unexpected Error\n{type(e).__name__}: {e}\nSee log for details.")
|
237 |
|
|
|
238 |
current_total_time = time.time() - start_time
|
239 |
log_accumulator.append(f" Cycle ended at {time.strftime('%H:%M:%S')}. Total time: {current_total_time:.2f}s")
|
240 |
ret_log_md = gr.Markdown(value="\n".join(log_accumulator))
|
241 |
|
242 |
# This is the FINAL return. It must be a tuple matching the `outputs` list of engage_button.click()
|
243 |
return (
|
244 |
+
ret_story_state, ret_gallery, ret_latest_image,
|
245 |
+
ret_latest_narrative_md_obj, ret_status_bar_html_obj, ret_log_md
|
|
|
|
|
|
|
|
|
246 |
)
|
247 |
|
248 |
def clear_story_state_ui_wrapper():
|
249 |
+
new_story = Story(); ph_img = create_placeholder_image("Blank canvas...", color="#1A1A2E", text_color="#A0A0C0")
|
250 |
+
return (new_story, [(ph_img,"New StoryVerse...")], None, gr.Markdown("## β¨ New Story β¨"), gr.HTML("<p class='processing_text status_text'>π Story Cleared.</p>"), "Log Cleared.", "")
|
|
|
|
|
|
|
|
|
251 |
|
252 |
def surprise_me_func():
|
253 |
+
themes = ["Cosmic Horror", "Solarpunk Utopia", "Mythic Fantasy"]; actions = ["unearths an artifact", "negotiates"]; settings = ["on a rogue planet", "in a city in a tree"]; prompt = f"A protagonist {random.choice(actions)} {random.choice(settings)}. Theme: {random.choice(themes)}."; style = random.choice(list(STYLE_PRESETS.keys())); artist = random.choice(["H.R. Giger", "Moebius", ""]*2); return prompt, style, artist
|
254 |
|
255 |
def disable_buttons_for_processing():
|
256 |
return gr.Button(interactive=False), gr.Button(interactive=False)
|
|
|
266 |
gr.HTML("<div class='important-note'><strong>Welcome, Worldsmith!</strong> Describe your vision, choose your style, and let Omega help you weave captivating scenes with narrative and imagery. Ensure API keys (<code>STORYVERSE_...</code>) are correctly set in Space Secrets!</div>")
|
267 |
|
268 |
with gr.Accordion("π§ AI Services Status & Info", open=False):
|
269 |
+
status_text_list = []; text_llm_ok, image_gen_ok = (GEMINI_TEXT_IS_READY or HF_TEXT_IS_READY), (HF_IMAGE_IS_READY) # Simplified image_gen_ok
|
270 |
if not text_llm_ok and not image_gen_ok: status_text_list.append("<p style='color:#FCA5A5;font-weight:bold;'>β οΈ CRITICAL: NO AI SERVICES CONFIGURED.</p>")
|
271 |
else:
|
272 |
if text_llm_ok: status_text_list.append("<p style='color:#A7F3D0;'>β
Text Generation Service(s) Ready.</p>")
|
273 |
else: status_text_list.append("<p style='color:#FCD34D;'>β οΈ Text Generation Service(s) NOT Ready.</p>")
|
274 |
+
if image_gen_ok: status_text_list.append("<p style='color:#A7F3D0;'>β
Image Generation Service (HF) Ready.</p>") # Specify HF
|
275 |
+
else: status_text_list.append("<p style='color:#FCD34D;'>β οΈ Image Generation Service (HF) NOT Ready.</p>")
|
276 |
gr.HTML("".join(status_text_list))
|
277 |
|
278 |
with gr.Row(equal_height=False, variant="panel"):
|
|
|
289 |
with gr.Accordion("βοΈ Advanced AI Configuration", open=False):
|
290 |
with gr.Group():
|
291 |
text_model_dropdown = gr.Dropdown(choices=list(TEXT_MODELS.keys()), value=UI_DEFAULT_TEXT_MODEL_KEY, label="Narrative AI Engine")
|
292 |
+
image_provider_dropdown = gr.Dropdown(choices=list(IMAGE_PROVIDERS.keys()), value=UI_DEFAULT_IMAGE_PROVIDER_KEY, label="Visual AI Engine (HF Models)") # Updated label
|
293 |
with gr.Row():
|
294 |
narrative_length_dropdown = gr.Dropdown(["Short (1 paragraph)", "Medium (2-3 paragraphs)", "Detailed (4+ paragraphs)"], value="Medium (2-3 paragraphs)", label="Narrative Detail")
|
295 |
image_quality_dropdown = gr.Dropdown(["Standard", "High Detail", "Sketch Concept"], value="Standard", label="Image Detail/Style")
|
|
|
311 |
with gr.Accordion(label="Developer Interaction Log", open=False):
|
312 |
output_interaction_log_markdown = gr.Markdown("Log will appear here...")
|
313 |
|
|
|
314 |
engage_button.click(
|
315 |
+
fn=disable_buttons_for_processing, inputs=None, outputs=[engage_button, surprise_button], queue=False
|
|
|
|
|
|
|
316 |
).then(
|
317 |
fn=add_scene_to_story_orchestrator,
|
318 |
inputs=[
|
319 |
+
story_state_output, scene_prompt_input, image_style_input, artist_style_input, negative_prompt_input,
|
320 |
+
text_model_dropdown, image_provider_dropdown, narrative_length_dropdown, image_quality_dropdown
|
|
|
|
|
321 |
],
|
322 |
outputs=[
|
323 |
story_state_output, output_gallery, output_latest_scene_image,
|
324 |
output_latest_scene_narrative, output_status_bar, output_interaction_log_markdown
|
325 |
]
|
326 |
).then(
|
327 |
+
fn=enable_buttons_after_processing, inputs=None, outputs=[engage_button, surprise_button], queue=False
|
|
|
|
|
|
|
328 |
)
|
329 |
|
330 |
clear_story_button.click(
|
331 |
+
fn=clear_story_state_ui_wrapper, inputs=[],
|
|
|
332 |
outputs=[
|
333 |
story_state_output, output_gallery, output_latest_scene_image,
|
334 |
output_latest_scene_narrative, output_status_bar, output_interaction_log_markdown,
|
|
|
336 |
]
|
337 |
)
|
338 |
surprise_button.click(
|
339 |
+
fn=surprise_me_func, inputs=[],
|
|
|
340 |
outputs=[scene_prompt_input, image_style_input, artist_style_input]
|
341 |
)
|
342 |
gr.Examples(
|
343 |
+
examples=[
|
344 |
["A lone, weary traveler on a mechanical steed crosses a vast, crimson desert under twin suns. Dust devils dance in the distance.", "Sci-Fi Western", "Moebius", "greenery, water, modern city"],
|
345 |
["Deep within an ancient, bioluminescent forest, a hidden civilization of sentient fungi perform a mystical ritual around a pulsating crystal.", "Psychedelic Fantasy", "Alex Grey", "technology, buildings, roads"],
|
346 |
["A child sits on a crescent moon, fishing for stars in a swirling nebula. A friendly space whale swims nearby.", "Whimsical Cosmic", "James Jean", "realistic, dark, scary"],
|
|
|
354 |
# --- Entry Point ---
|
355 |
if __name__ == "__main__":
|
356 |
print("="*80); print("β¨ StoryVerse Omegaβ’ Launching... β¨")
|
357 |
+
print(f" Text LLM Ready (Gemini): {GEMINI_TEXT_IS_READY}"); print(f" Text LLM Ready (HF): {HF_TEXT_IS_READY}")
|
358 |
+
# Corrected to only show HF_IMAGE_IS_READY as per our simplification
|
359 |
+
print(f" Image Provider Ready (HF): {HF_IMAGE_IS_READY}")
|
360 |
+
if not (GEMINI_TEXT_IS_READY or HF_TEXT_IS_READY) or not HF_IMAGE_IS_READY: # Adjusted condition
|
361 |
+
print(" π΄ WARNING: Not all required AI services (Text and HF Image) are configured.")
|
362 |
print(f" Default Text Model: {UI_DEFAULT_TEXT_MODEL_KEY}"); print(f" Default Image Provider: {UI_DEFAULT_IMAGE_PROVIDER_KEY}")
|
363 |
print("="*80)
|
364 |
story_weaver_demo.launch(debug=True, server_name="0.0.0.0", share=False)
|