Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,30 +1,29 @@
|
|
1 |
-
# app.py (Streamlit version for StoryVerse Weaver -
|
2 |
-
import streamlit as st
|
3 |
-
from PIL import Image, ImageDraw, ImageFont
|
4 |
import os
|
5 |
import time
|
6 |
import random
|
7 |
-
import traceback
|
8 |
|
9 |
# --- Page Configuration (MUST BE THE VERY FIRST STREAMLIT COMMAND) ---
|
10 |
st.set_page_config(
|
11 |
page_title="β¨ StoryVerse Weaver β¨",
|
12 |
-
page_icon="π»",
|
13 |
-
layout="wide",
|
14 |
-
initial_sidebar_state="expanded"
|
15 |
)
|
16 |
-
# --- END OF PAGE CONFIG ---
|
17 |
|
18 |
-
# --- Core Logic Imports
|
19 |
from core.llm_services import initialize_text_llms, is_gemini_text_ready, is_hf_text_ready, generate_text_gemini, generate_text_hf, LLMTextResponse
|
20 |
from core.image_services import initialize_image_llms, is_dalle_ready, is_hf_image_api_ready, generate_image_dalle, generate_image_hf_model, ImageGenResponse
|
21 |
-
from core.story_engine import Story, Scene
|
22 |
from prompts.narrative_prompts import get_narrative_system_prompt, format_narrative_user_prompt
|
23 |
from prompts.image_style_prompts import STYLE_PRESETS, COMMON_NEGATIVE_PROMPTS, format_image_generation_prompt
|
24 |
from core.utils import basic_text_cleanup
|
25 |
|
26 |
# --- Initialize AI Services (Cached) ---
|
27 |
-
@st.cache_resource
|
28 |
def load_ai_services_config():
|
29 |
print("--- Initializing AI Services (Streamlit Cache Resource) ---")
|
30 |
initialize_text_llms()
|
@@ -43,10 +42,8 @@ TEXT_MODELS = {}
|
|
43 |
UI_DEFAULT_TEXT_MODEL_KEY = None
|
44 |
if AI_SERVICES_STATUS["gemini_text_ready"]:
|
45 |
TEXT_MODELS["β¨ Gemini 1.5 Flash (Narrate)"] = {"id": "gemini-1.5-flash-latest", "type": "gemini"}
|
46 |
-
|
47 |
-
if AI_SERVICES_STATUS["hf_text_ready"]:
|
48 |
TEXT_MODELS["Mistral 7B (Narrate via HF)"] = {"id": "mistralai/Mistral-7B-Instruct-v0.2", "type": "hf_text"}
|
49 |
-
TEXT_MODELS["Gemma 2B (Narrate via HF)"] = {"id": "google/gemma-2b-it", "type": "hf_text"}
|
50 |
|
51 |
if TEXT_MODELS:
|
52 |
if AI_SERVICES_STATUS["gemini_text_ready"] and "β¨ Gemini 1.5 Flash (Narrate)" in TEXT_MODELS: UI_DEFAULT_TEXT_MODEL_KEY = "β¨ Gemini 1.5 Flash (Narrate)"
|
@@ -60,54 +57,17 @@ IMAGE_PROVIDERS = {}
|
|
60 |
UI_DEFAULT_IMAGE_PROVIDER_KEY = None
|
61 |
if AI_SERVICES_STATUS["dalle_image_ready"]:
|
62 |
IMAGE_PROVIDERS["πΌοΈ OpenAI DALL-E 3"] = "dalle_3"
|
63 |
-
IMAGE_PROVIDERS["πΌοΈ OpenAI DALL-E 2 (Legacy)"] = "dalle_2"
|
64 |
UI_DEFAULT_IMAGE_PROVIDER_KEY = "πΌοΈ OpenAI DALL-E 3"
|
65 |
elif AI_SERVICES_STATUS["hf_image_ready"]:
|
66 |
-
IMAGE_PROVIDERS["π‘ HF -
|
67 |
IMAGE_PROVIDERS["π HF - OpenJourney"] = "hf_openjourney"
|
68 |
-
|
69 |
-
UI_DEFAULT_IMAGE_PROVIDER_KEY = "π‘ HF - Stable Diffusion XL Base"
|
70 |
-
|
71 |
if not IMAGE_PROVIDERS:
|
72 |
IMAGE_PROVIDERS["No Image Providers Configured"] = "none"
|
73 |
UI_DEFAULT_IMAGE_PROVIDER_KEY = "No Image Providers Configured"
|
74 |
-
elif not UI_DEFAULT_IMAGE_PROVIDER_KEY and IMAGE_PROVIDERS :
|
75 |
-
UI_DEFAULT_IMAGE_PROVIDER_KEY = list(IMAGE_PROVIDERS.keys())[0]
|
76 |
|
77 |
-
# --- Custom CSS
|
78 |
-
streamlit_omega_css = """
|
79 |
-
<style>
|
80 |
-
body { color: #D0D0E0; background-color: #0F0F1A; font-family: 'Lexend Deca', sans-serif;}
|
81 |
-
.stApp { background-color: #0F0F1A; }
|
82 |
-
h1 { font-size: 2.8em !important; text-align: center; color: transparent !important; background: linear-gradient(135deg, #A020F0 0%, #E040FB 100%); -webkit-background-clip: text; background-clip: text; margin-bottom: 5px !important; letter-spacing: -1px;}
|
83 |
-
h3 { color: #C080F0 !important; text-align: center; font-weight: 400; margin-bottom: 25px !important;}
|
84 |
-
.main .block-container { padding-top: 2rem; padding-bottom: 2rem; padding-left: 2rem; padding-right: 2rem; max-width: 1400px; margin: auto; background-color: #1A1A2E; border-radius: 15px; box-shadow: 0 8px 24px rgba(0,0,0,0.15);}
|
85 |
-
[data-testid="stSidebar"] { background-color: #131325; border-right: 1px solid #2A2A4A; padding: 1rem;}
|
86 |
-
[data-testid="stSidebar"] .stMarkdown h3 { color: #D0D0FF !important; font-size: 1.5em; border-bottom: 2px solid #7F00FF; padding-bottom: 5px;}
|
87 |
-
.stTextInput > div > div > input, .stTextArea > div > div > textarea, .stSelectbox > div > div > div[data-baseweb="select"] > div, div[data-baseweb="input"] > input { background-color: #2A2A4A !important; color: #E0E0FF !important; border: 1px solid #4A4A6A !important; border-radius: 8px !important;}
|
88 |
-
.stButton > button {
|
89 |
-
background: linear-gradient(135deg, #7F00FF 0%, #E100FF 100%) !important;
|
90 |
-
color: white !important; border: none !important; border-radius: 8px !important;
|
91 |
-
padding: 0.7em 1.3em !important; font-weight: 600 !important;
|
92 |
-
box-shadow: 0 4px 8px rgba(0,0,0,0.15) !important;
|
93 |
-
transition: all 0.2s ease-in-out; width: 100%;
|
94 |
-
}
|
95 |
-
.stButton > button:hover { transform: scale(1.03) translateY(-1px); box-shadow: 0 8px 16px rgba(127,0,255,0.3) !important; }
|
96 |
-
.stButton > button:disabled { background: #4A4A6A !important; color: #8080A0 !important; cursor: not-allowed; }
|
97 |
-
button[kind="secondary"] { background: #4A4A6A !important; color: #E0E0FF !important;} /* For other buttons if not primary */
|
98 |
-
button[kind="secondary"]:hover { background: #5A5A7A !important; }
|
99 |
-
.stImage > img { border-radius: 12px; box-shadow: 0 6px 15px rgba(0,0,0,0.25); max-height: 550px; margin: auto; display: block;}
|
100 |
-
.stExpander { background-color: #161628; border: 1px solid #2A2A4A; border-radius: 12px; margin-bottom: 1em;}
|
101 |
-
.stExpander header { font-size: 1.1em; font-weight: 500; color: #D0D0FF;}
|
102 |
-
.important-note { background-color: rgba(127,0,255,0.1); border-left: 5px solid #7F00FF; padding: 15px; margin-bottom:20px; color: #E0E0FF; border-radius: 6px;}
|
103 |
-
.status-message { padding: 10px; border-radius: 6px; margin-top: 10px; text-align: center; font-weight: 500; }
|
104 |
-
.status-success { background-color: #104010; color: #B0FFB0; border: 1px solid #208020; }
|
105 |
-
.status-error { background-color: #401010; color: #FFB0B0; border: 1px solid #802020; }
|
106 |
-
.status-processing { background-color: #102040; color: #B0D0FF; border: 1px solid #204080; }
|
107 |
-
.gallery-col img { border-radius: 8px; box-shadow: 0 2px 6px rgba(0,0,0,0.15); margin-bottom: 5px;}
|
108 |
-
.gallery-col .stCaption { font-size: 0.85em; text-align: center; color: #A0A0C0;}
|
109 |
-
</style>
|
110 |
-
"""
|
111 |
st.markdown(streamlit_omega_css, unsafe_allow_html=True)
|
112 |
|
113 |
# --- Helper: Placeholder Image Creation ---
|
@@ -123,71 +83,47 @@ def create_placeholder_image_st(text="Processing...", size=(512, 512), color="#2
|
|
123 |
draw.text(((size[0]-tw)/2, (size[1]-th)/2), text, font=font, fill=text_color); return img
|
124 |
|
125 |
# --- Initialize Session State ---
|
126 |
-
|
127 |
-
if '
|
128 |
-
if '
|
129 |
-
if '
|
130 |
-
if '
|
131 |
-
if '
|
132 |
-
if '
|
133 |
-
if '
|
134 |
-
if '
|
|
|
135 |
|
136 |
# --- UI Definition ---
|
137 |
st.markdown("<div align='center'><h1>β¨ StoryVerse Weaver β¨</h1>\n<h3>Craft Immersive Multimodal Worlds with AI</h3></div>", unsafe_allow_html=True)
|
138 |
-
st.markdown("<div class='important-note'><strong>Welcome, Worldsmith!</strong> Describe your vision, choose your style, and let Weaver help you craft captivating scenes
|
139 |
|
140 |
# --- Sidebar for Inputs & Configuration ---
|
141 |
with st.sidebar:
|
142 |
st.markdown("### π‘ **Craft Your Scene**")
|
143 |
-
with st.form("scene_input_form_key", clear_on_submit=False):
|
144 |
-
scene_prompt_text_val = st.text_area("Scene Vision
|
145 |
-
value=st.session_state.form_scene_prompt, height=150,
|
146 |
-
placeholder="e.g., A lone astronaut discovers a glowing alien artifact...")
|
147 |
st.markdown("#### π¨ Visual Style")
|
148 |
col_style1, col_style2 = st.columns(2)
|
149 |
-
with col_style1:
|
150 |
-
|
151 |
-
|
152 |
-
index=(["Default (Cinematic Realism)"] + sorted(list(STYLE_PRESETS.keys()))).index(st.session_state.form_image_style) if st.session_state.form_image_style in (["Default (Cinematic Realism)"] + sorted(list(STYLE_PRESETS.keys()))) else 0)
|
153 |
-
with col_style2:
|
154 |
-
artist_style_val = st.text_input("Artistic Inspiration (Optional):",
|
155 |
-
value=st.session_state.form_artist_style, placeholder="e.g., Moebius")
|
156 |
-
|
157 |
-
negative_prompt_val = st.text_area("Exclude from Image (Negative Prompt):", value=COMMON_NEGATIVE_PROMPTS, height=80)
|
158 |
-
|
159 |
with st.expander("βοΈ Advanced AI Configuration", expanded=False):
|
160 |
-
text_model_val = st.selectbox("Narrative
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
image_quality_val = st.selectbox("Image Detail:", ["Standard", "High Detail", "Sketch Concept"], index=0)
|
166 |
-
|
167 |
-
submit_button_val = st.form_submit_button("π Weave Scene!", use_container_width=True, type="primary", disabled=st.session_state.processing_scene)
|
168 |
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
st.session_state.form_image_style = sur_style
|
177 |
-
st.session_state.form_artist_style = sur_artist
|
178 |
-
st.rerun()
|
179 |
-
with col_btn2:
|
180 |
-
if st.button("ποΈ New Story", use_container_width=True, disabled=st.session_state.processing_scene, key="sidebar_clear_btn", type="secondary"): # type="secondary" is conceptual
|
181 |
-
st.session_state.story_object = Story()
|
182 |
-
st.session_state.current_log = ["Story Cleared."]
|
183 |
-
st.session_state.latest_scene_image_pil = None
|
184 |
-
st.session_state.latest_scene_narrative = "## β¨ New Story Begins β¨"
|
185 |
-
st.session_state.status_message = {"text": "π Story Cleared.", "type": "processing"}
|
186 |
-
st.session_state.form_scene_prompt = ""
|
187 |
-
st.rerun()
|
188 |
|
189 |
with st.expander("π§ AI Services Status", expanded=False):
|
190 |
-
# ... (API status HTML generation as before, using AI_SERVICES_STATUS) ...
|
191 |
text_llm_ok, image_gen_ok = (AI_SERVICES_STATUS["gemini_text_ready"] or AI_SERVICES_STATUS["hf_text_ready"]), (AI_SERVICES_STATUS["dalle_image_ready"] or AI_SERVICES_STATUS["hf_image_ready"])
|
192 |
if not text_llm_ok and not image_gen_ok: st.error("CRITICAL: NO AI SERVICES CONFIGURED.")
|
193 |
else:
|
@@ -196,56 +132,54 @@ with st.sidebar:
|
|
196 |
if image_gen_ok: st.success("Image Generation Ready.")
|
197 |
else: st.warning("Image Generation NOT Ready.")
|
198 |
|
199 |
-
|
200 |
# --- Main Display Area ---
|
201 |
-
st.markdown("---")
|
202 |
st.markdown("### πΌοΈ **Your Evolving StoryVerse**", unsafe_allow_html=True)
|
203 |
|
204 |
-
|
205 |
-
|
206 |
-
st.markdown(f"<p class='status-message status-{status_type}'>{st.session_state.status_message['text']}</p>", unsafe_allow_html=True)
|
207 |
|
208 |
-
# Tabs for display
|
209 |
tab_latest, tab_scroll, tab_log = st.tabs(["π Latest Scene", "π Story Scroll", "βοΈ Interaction Log"])
|
210 |
|
211 |
with tab_latest:
|
212 |
-
if
|
213 |
-
st.image(create_placeholder_image_st("π¨ Conjuring visuals..."),
|
214 |
-
elif
|
215 |
-
st.image(
|
216 |
else:
|
217 |
-
st.image(create_placeholder_image_st("Describe a scene to begin!", size=(512,300), color="#1A1A2E"),
|
218 |
-
st.markdown(
|
219 |
|
220 |
with tab_scroll:
|
221 |
-
if
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
|
|
230 |
else:
|
231 |
st.caption("Your story scroll is empty. Weave your first scene!")
|
232 |
|
233 |
with tab_log:
|
234 |
-
|
235 |
-
st.markdown(
|
236 |
|
237 |
# --- Logic for Form Submission ---
|
238 |
if submit_button_val:
|
239 |
if not scene_prompt_text_val.strip():
|
240 |
-
|
241 |
st.rerun()
|
242 |
else:
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
st.rerun() # Rerun to show "processing" and disable button
|
247 |
|
248 |
-
#
|
249 |
_scene_prompt = scene_prompt_text_val
|
250 |
_image_style = image_style_val
|
251 |
_artist_style = artist_style_val
|
@@ -255,66 +189,79 @@ if submit_button_val:
|
|
255 |
_narr_length = narrative_length_val
|
256 |
_img_quality = image_quality_val
|
257 |
|
258 |
-
# ---- Main Generation Logic ----
|
259 |
current_narrative_text = f"Narrative Error: Init failed."
|
260 |
generated_image_pil = None
|
261 |
-
image_gen_error_msg = None
|
262 |
-
|
263 |
|
264 |
# 1. Generate Narrative
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
278 |
|
279 |
# 2. Generate Image
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
|
290 |
-
|
291 |
-
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
300 |
|
301 |
# 3. Add Scene
|
302 |
-
if
|
303 |
-
|
304 |
-
|
|
|
305 |
|
306 |
-
|
307 |
user_prompt=_scene_prompt, narrative_text=current_narrative_text, image=generated_image_pil,
|
308 |
image_style_prompt=f"{_image_style}{f', by {_artist_style}' if _artist_style else ''}",
|
309 |
-
image_provider=_image_provider, error_message=
|
310 |
)
|
311 |
-
|
312 |
|
313 |
# 4. Set final status message
|
314 |
-
if
|
315 |
-
|
316 |
else:
|
317 |
-
|
318 |
|
319 |
-
|
320 |
-
st.rerun()
|
|
|
1 |
+
# app.py (Streamlit version for StoryVerse Weaver - Robust Error Handling)
|
2 |
+
import streamlit as st
|
3 |
+
from PIL import Image, ImageDraw, ImageFont
|
4 |
import os
|
5 |
import time
|
6 |
import random
|
7 |
+
import traceback
|
8 |
|
9 |
# --- Page Configuration (MUST BE THE VERY FIRST STREAMLIT COMMAND) ---
|
10 |
st.set_page_config(
|
11 |
page_title="β¨ StoryVerse Weaver β¨",
|
12 |
+
page_icon="π»",
|
13 |
+
layout="wide",
|
14 |
+
initial_sidebar_state="expanded"
|
15 |
)
|
|
|
16 |
|
17 |
+
# --- Core Logic Imports ---
|
18 |
from core.llm_services import initialize_text_llms, is_gemini_text_ready, is_hf_text_ready, generate_text_gemini, generate_text_hf, LLMTextResponse
|
19 |
from core.image_services import initialize_image_llms, is_dalle_ready, is_hf_image_api_ready, generate_image_dalle, generate_image_hf_model, ImageGenResponse
|
20 |
+
from core.story_engine import Story, Scene
|
21 |
from prompts.narrative_prompts import get_narrative_system_prompt, format_narrative_user_prompt
|
22 |
from prompts.image_style_prompts import STYLE_PRESETS, COMMON_NEGATIVE_PROMPTS, format_image_generation_prompt
|
23 |
from core.utils import basic_text_cleanup
|
24 |
|
25 |
# --- Initialize AI Services (Cached) ---
|
26 |
+
@st.cache_resource
|
27 |
def load_ai_services_config():
|
28 |
print("--- Initializing AI Services (Streamlit Cache Resource) ---")
|
29 |
initialize_text_llms()
|
|
|
42 |
UI_DEFAULT_TEXT_MODEL_KEY = None
|
43 |
if AI_SERVICES_STATUS["gemini_text_ready"]:
|
44 |
TEXT_MODELS["β¨ Gemini 1.5 Flash (Narrate)"] = {"id": "gemini-1.5-flash-latest", "type": "gemini"}
|
45 |
+
if AI_SERVICES_STATUS["hf_text_ready"]: # Used if Gemini not ready or as an option
|
|
|
46 |
TEXT_MODELS["Mistral 7B (Narrate via HF)"] = {"id": "mistralai/Mistral-7B-Instruct-v0.2", "type": "hf_text"}
|
|
|
47 |
|
48 |
if TEXT_MODELS:
|
49 |
if AI_SERVICES_STATUS["gemini_text_ready"] and "β¨ Gemini 1.5 Flash (Narrate)" in TEXT_MODELS: UI_DEFAULT_TEXT_MODEL_KEY = "β¨ Gemini 1.5 Flash (Narrate)"
|
|
|
57 |
UI_DEFAULT_IMAGE_PROVIDER_KEY = None
|
58 |
if AI_SERVICES_STATUS["dalle_image_ready"]:
|
59 |
IMAGE_PROVIDERS["πΌοΈ OpenAI DALL-E 3"] = "dalle_3"
|
|
|
60 |
UI_DEFAULT_IMAGE_PROVIDER_KEY = "πΌοΈ OpenAI DALL-E 3"
|
61 |
elif AI_SERVICES_STATUS["hf_image_ready"]:
|
62 |
+
IMAGE_PROVIDERS["π‘ HF - SDXL Base"] = "hf_sdxl_base"
|
63 |
IMAGE_PROVIDERS["π HF - OpenJourney"] = "hf_openjourney"
|
64 |
+
UI_DEFAULT_IMAGE_PROVIDER_KEY = "π‘ HF - SDXL Base"
|
|
|
|
|
65 |
if not IMAGE_PROVIDERS:
|
66 |
IMAGE_PROVIDERS["No Image Providers Configured"] = "none"
|
67 |
UI_DEFAULT_IMAGE_PROVIDER_KEY = "No Image Providers Configured"
|
|
|
|
|
68 |
|
69 |
+
# --- Custom CSS ---
|
70 |
+
streamlit_omega_css = """ /* ... Your full omega_css string here ... */ """
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
st.markdown(streamlit_omega_css, unsafe_allow_html=True)
|
72 |
|
73 |
# --- Helper: Placeholder Image Creation ---
|
|
|
83 |
draw.text(((size[0]-tw)/2, (size[1]-th)/2), text, font=font, fill=text_color); return img
|
84 |
|
85 |
# --- Initialize Session State ---
|
86 |
+
ss = st.session_state # Shortcut
|
87 |
+
if 'story_object' not in ss: ss.story_object = Story()
|
88 |
+
if 'current_log' not in ss: ss.current_log = ["Welcome to StoryVerse Weaver!"]
|
89 |
+
if 'latest_scene_image_pil' not in ss: ss.latest_scene_image_pil = None
|
90 |
+
if 'latest_scene_narrative' not in ss: ss.latest_scene_narrative = "## β¨ A New Story Begins β¨\nDescribe your first scene idea in the panel on the left!"
|
91 |
+
if 'status_message' not in ss: ss.status_message = {"text": "Ready to weave your first masterpiece!", "type": "processing"}
|
92 |
+
if 'processing_scene' not in ss: ss.processing_scene = False
|
93 |
+
if 'form_scene_prompt' not in ss: ss.form_scene_prompt = ""
|
94 |
+
if 'form_image_style' not in ss: ss.form_image_style = "Default (Cinematic Realism)"
|
95 |
+
if 'form_artist_style' not in ss: ss.form_artist_style = ""
|
96 |
|
97 |
# --- UI Definition ---
|
98 |
st.markdown("<div align='center'><h1>β¨ StoryVerse Weaver β¨</h1>\n<h3>Craft Immersive Multimodal Worlds with AI</h3></div>", unsafe_allow_html=True)
|
99 |
+
st.markdown("<div class='important-note'><strong>Welcome, Worldsmith!</strong> Describe your vision, choose your style, and let Weaver help you craft captivating scenes. Ensure API keys (<code>STORYVERSE_...</code>) are set in your environment/secrets!</div>", unsafe_allow_html=True)
|
100 |
|
101 |
# --- Sidebar for Inputs & Configuration ---
|
102 |
with st.sidebar:
|
103 |
st.markdown("### π‘ **Craft Your Scene**")
|
104 |
+
with st.form("scene_input_form_key", clear_on_submit=False):
|
105 |
+
scene_prompt_text_val = st.text_area("Scene Vision:", value=ss.form_scene_prompt, height=150)
|
|
|
|
|
106 |
st.markdown("#### π¨ Visual Style")
|
107 |
col_style1, col_style2 = st.columns(2)
|
108 |
+
with col_style1: image_style_val = st.selectbox("Style Preset:", options=["Default (Cinematic Realism)"] + sorted(list(STYLE_PRESETS.keys())), index=(["Default (Cinematic Realism)"] + sorted(list(STYLE_PRESETS.keys()))).index(ss.form_image_style) if ss.form_image_style in (["Default (Cinematic Realism)"] + sorted(list(STYLE_PRESETS.keys()))) else 0)
|
109 |
+
with col_style2: artist_style_val = st.text_input("Artistic Inspiration:", value=ss.form_artist_style)
|
110 |
+
negative_prompt_val = st.text_area("Exclude from Image:", value=COMMON_NEGATIVE_PROMPTS, height=80)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
111 |
with st.expander("βοΈ Advanced AI Configuration", expanded=False):
|
112 |
+
text_model_val = st.selectbox("Narrative Engine:", options=list(TEXT_MODELS.keys()), index=list(TEXT_MODELS.keys()).index(UI_DEFAULT_TEXT_MODEL_KEY) if UI_DEFAULT_TEXT_MODEL_KEY in TEXT_MODELS else 0)
|
113 |
+
image_provider_val = st.selectbox("Visual Engine:", options=list(IMAGE_PROVIDERS.keys()), index=list(IMAGE_PROVIDERS.keys()).index(UI_DEFAULT_IMAGE_PROVIDER_KEY) if UI_DEFAULT_IMAGE_PROVIDER_KEY in IMAGE_PROVIDERS else 0)
|
114 |
+
narrative_length_val = st.selectbox("Narrative Detail:", ["Short", "Medium", "Detailed"], index=1)
|
115 |
+
image_quality_val = st.selectbox("Image Detail:", ["Standard", "High Detail", "Sketch"], index=0)
|
116 |
+
submit_button_val = st.form_submit_button("π Weave Scene!", use_container_width=True, type="primary", disabled=ss.processing_scene)
|
|
|
|
|
|
|
117 |
|
118 |
+
col_btn_s, col_btn_c = st.columns(2)
|
119 |
+
with col_btn_s:
|
120 |
+
if st.button("π² Surprise Me!", use_container_width=True, disabled=ss.processing_scene, key="sidebar_surprise_btn"):
|
121 |
+
sur_prompt, sur_style, sur_artist = surprise_me_func(); ss.form_scene_prompt = sur_prompt; ss.form_image_style = sur_style; ss.form_artist_style = sur_artist; st.rerun()
|
122 |
+
with col_btn_c:
|
123 |
+
if st.button("ποΈ New Story", use_container_width=True, disabled=ss.processing_scene, key="sidebar_clear_btn"):
|
124 |
+
ss.story_object = Story(); ss.current_log = ["Cleared."]; ss.latest_scene_image_pil = None; ss.latest_scene_narrative = "## New Story"; ss.status_message = {"text": "Cleared", "type": "processing"}; ss.form_scene_prompt = ""; st.rerun()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
125 |
|
126 |
with st.expander("π§ AI Services Status", expanded=False):
|
|
|
127 |
text_llm_ok, image_gen_ok = (AI_SERVICES_STATUS["gemini_text_ready"] or AI_SERVICES_STATUS["hf_text_ready"]), (AI_SERVICES_STATUS["dalle_image_ready"] or AI_SERVICES_STATUS["hf_image_ready"])
|
128 |
if not text_llm_ok and not image_gen_ok: st.error("CRITICAL: NO AI SERVICES CONFIGURED.")
|
129 |
else:
|
|
|
132 |
if image_gen_ok: st.success("Image Generation Ready.")
|
133 |
else: st.warning("Image Generation NOT Ready.")
|
134 |
|
|
|
135 |
# --- Main Display Area ---
|
136 |
+
st.markdown("---")
|
137 |
st.markdown("### πΌοΈ **Your Evolving StoryVerse**", unsafe_allow_html=True)
|
138 |
|
139 |
+
status_type = ss.status_message.get("type", "processing")
|
140 |
+
st.markdown(f"<p class='status-message status-{status_type}'>{ss.status_message['text']}</p>", unsafe_allow_html=True)
|
|
|
141 |
|
|
|
142 |
tab_latest, tab_scroll, tab_log = st.tabs(["π Latest Scene", "π Story Scroll", "βοΈ Interaction Log"])
|
143 |
|
144 |
with tab_latest:
|
145 |
+
if ss.processing_scene and ss.latest_scene_image_pil is None :
|
146 |
+
st.image(create_placeholder_image_st("π¨ Conjuring visuals..."), use_container_width=True)
|
147 |
+
elif ss.latest_scene_image_pil:
|
148 |
+
st.image(ss.latest_scene_image_pil, use_container_width=True, caption="Latest Generated Image")
|
149 |
else:
|
150 |
+
st.image(create_placeholder_image_st("Describe a scene to begin!", size=(512,300), color="#1A1A2E"), use_container_width=True)
|
151 |
+
st.markdown(ss.latest_scene_narrative, unsafe_allow_html=True)
|
152 |
|
153 |
with tab_scroll:
|
154 |
+
if ss.story_object and ss.story_object.scenes:
|
155 |
+
st.subheader("Story Scroll")
|
156 |
+
num_cols_gallery = st.slider("Gallery Columns:", 1, 5, 3, key="gallery_cols_slider")
|
157 |
+
gallery_cols_list = st.columns(num_cols_gallery)
|
158 |
+
scenes_for_gallery_data = ss.story_object.get_all_scenes_for_gallery_display()
|
159 |
+
for i, (img, caption) in enumerate(scenes_for_gallery_data):
|
160 |
+
with gallery_cols_list[i % num_cols_gallery]:
|
161 |
+
display_img_gallery = img
|
162 |
+
if img is None: display_img_gallery = create_placeholder_image_st(f"S{i+1}\nNo Image", size=(180,180), color="#2A2A4A")
|
163 |
+
st.image(display_img_gallery, caption=caption if caption else f"Scene {i+1}", use_container_width=True, output_format="PNG")
|
164 |
else:
|
165 |
st.caption("Your story scroll is empty. Weave your first scene!")
|
166 |
|
167 |
with tab_log:
|
168 |
+
log_display_text = "\n\n---\n\n".join(ss.current_log[::-1][:50])
|
169 |
+
st.markdown(log_display_text, unsafe_allow_html=True)
|
170 |
|
171 |
# --- Logic for Form Submission ---
|
172 |
if submit_button_val:
|
173 |
if not scene_prompt_text_val.strip():
|
174 |
+
ss.status_message = {"text": "Scene prompt cannot be empty!", "type": "error"}
|
175 |
st.rerun()
|
176 |
else:
|
177 |
+
ss.processing_scene = True
|
178 |
+
ss.status_message = {"text": f"π Weaving Scene {ss.story_object.current_scene_number + 1}...", "type": "processing"}
|
179 |
+
ss.current_log.append(f"**π Scene {ss.story_object.current_scene_number + 1} - {time.strftime('%H:%M:%S')}**")
|
180 |
+
st.rerun() # Rerun to show "processing" and disable button
|
181 |
|
182 |
+
# ---- Main Generation Logic ----
|
183 |
_scene_prompt = scene_prompt_text_val
|
184 |
_image_style = image_style_val
|
185 |
_artist_style = artist_style_val
|
|
|
189 |
_narr_length = narrative_length_val
|
190 |
_img_quality = image_quality_val
|
191 |
|
|
|
192 |
current_narrative_text = f"Narrative Error: Init failed."
|
193 |
generated_image_pil = None
|
194 |
+
image_gen_error_msg = None # Specific to image generation part
|
195 |
+
final_scene_error_overall = None # Overall error for the scene
|
196 |
|
197 |
# 1. Generate Narrative
|
198 |
+
try:
|
199 |
+
with st.spinner("βοΈ Crafting narrative... (This may take a moment)"):
|
200 |
+
text_model_info = TEXT_MODELS.get(_text_model)
|
201 |
+
if text_model_info and text_model_info["type"] != "none":
|
202 |
+
system_p = get_narrative_system_prompt("default"); prev_narr = ss.story_object.get_last_scene_narrative(); user_p = format_narrative_user_prompt(_scene_prompt, prev_narr)
|
203 |
+
ss.current_log.append(f" Narrative: Using {_text_model} ({text_model_info['id']}).")
|
204 |
+
text_resp = None
|
205 |
+
if text_model_info["type"] == "gemini" and AI_SERVICES_STATUS["gemini_text_ready"]: text_resp = generate_text_gemini(user_p, model_id=text_model_info["id"], system_prompt=system_p, max_tokens=768 if _narr_length.startswith("Detailed") else 400)
|
206 |
+
elif text_model_info["type"] == "hf_text" and AI_SERVICES_STATUS["hf_text_ready"]: text_resp = generate_text_hf(user_p, model_id=text_model_info["id"], system_prompt=system_p, max_tokens=768 if _narr_length.startswith("Detailed") else 400)
|
207 |
+
if text_resp and text_resp.success: current_narrative_text = basic_text_cleanup(text_resp.text); ss.current_log.append(" Narrative: Success.")
|
208 |
+
elif text_resp: current_narrative_text = f"**Narrative Error ({_text_model}):** {text_resp.error}"; ss.current_log.append(f" Narrative: FAILED - {text_resp.error}")
|
209 |
+
else: ss.current_log.append(f" Narrative: FAILED - No response.")
|
210 |
+
else: current_narrative_text = "**Narrative Error:** Model unavailable."; ss.current_log.append(f" Narrative: FAILED - Model '{_text_model}' unavailable.")
|
211 |
+
ss.latest_scene_narrative = f"## Scene Idea: {_scene_prompt}\n\n{current_narrative_text}"
|
212 |
+
except Exception as e_narr:
|
213 |
+
current_narrative_text = f"**Narrative Generation Exception:** {type(e_narr).__name__} - {str(e_narr)}"
|
214 |
+
ss.current_log.append(f" CRITICAL NARRATIVE ERROR: {traceback.format_exc()}")
|
215 |
+
ss.latest_scene_narrative = f"## Scene Idea: {_scene_prompt}\n\n{current_narrative_text}"
|
216 |
+
final_scene_error_overall = current_narrative_text
|
217 |
|
218 |
# 2. Generate Image
|
219 |
+
try:
|
220 |
+
with st.spinner("π¨ Conjuring visuals... (This may take a moment)"):
|
221 |
+
selected_img_prov_type = IMAGE_PROVIDERS.get(_image_provider)
|
222 |
+
img_content_prompt = current_narrative_text if current_narrative_text and "Error" not in current_narrative_text else _scene_prompt
|
223 |
+
quality_kw = "ultra detailed, " if _img_quality == "High Detail" else ("concept sketch, " if _img_quality == "Sketch Concept" else "")
|
224 |
+
full_img_prompt_for_gen = format_image_generation_prompt(quality_kw + img_content_prompt[:350], _image_style, _artist_style)
|
225 |
+
ss.current_log.append(f" Image: Using {_image_provider} (type '{selected_img_prov_type}').")
|
226 |
+
if selected_img_prov_type and selected_img_prov_type != "none":
|
227 |
+
img_resp = None
|
228 |
+
if selected_img_prov_type.startswith("dalle_") and AI_SERVICES_STATUS["dalle_image_ready"]:
|
229 |
+
dalle_model = "dall-e-3" if selected_img_prov_type == "dalle_3" else "dall-e-2"
|
230 |
+
img_resp = generate_image_dalle(full_img_prompt_for_gen, model=dalle_model)
|
231 |
+
elif selected_img_prov_type.startswith("hf_") and AI_SERVICES_STATUS["hf_image_ready"]:
|
232 |
+
hf_model_id = "stabilityai/stable-diffusion-xl-base-1.0"; iw,ih=768,768
|
233 |
+
if selected_img_prov_type == "hf_openjourney": hf_model_id="prompthero/openjourney";iw,ih=512,512
|
234 |
+
img_resp = generate_image_hf_model(full_img_prompt_for_gen, model_id=hf_model_id, negative_prompt=_negative_prompt, width=iw, height=ih)
|
235 |
+
if img_resp and img_resp.success: generated_image_pil = img_resp.image; ss.current_log.append(" Image: Success.")
|
236 |
+
elif img_resp: image_gen_error_msg = f"**Image Error:** {img_resp.error}"; ss.current_log.append(f" Image: FAILED - {img_resp.error}")
|
237 |
+
else: image_gen_error_msg = "**Image Error:** No response."; ss.current_log.append(" Image: FAILED - No response.")
|
238 |
+
else: image_gen_error_msg = "**Image Error:** No provider configured."; ss.current_log.append(f" Image: FAILED - No provider.")
|
239 |
+
ss.latest_scene_image_pil = generated_image_pil if generated_image_pil else create_placeholder_image_st("Image Gen Failed", color="#401010")
|
240 |
+
except Exception as e_img:
|
241 |
+
image_gen_error_msg = f"**Image Generation Exception:** {type(e_img).__name__} - {str(e_img)}"
|
242 |
+
ss.current_log.append(f" CRITICAL IMAGE ERROR: {traceback.format_exc()}")
|
243 |
+
ss.latest_scene_image_pil = create_placeholder_image_st("Image Gen Exception", color="#401010")
|
244 |
+
if not final_scene_error_overall: final_scene_error_overall = image_gen_error_msg
|
245 |
+
else: final_scene_error_overall += f"\n{image_gen_error_msg}"
|
246 |
|
247 |
# 3. Add Scene
|
248 |
+
if not final_scene_error_overall: # If no major error from narrative already
|
249 |
+
if image_gen_error_msg and "**Narrative Error**" in current_narrative_text: final_scene_error_overall = f"{current_narrative_text}\n{image_gen_error_msg}"
|
250 |
+
elif "**Narrative Error**" in current_narrative_text: final_scene_error_overall = current_narrative_text
|
251 |
+
elif image_gen_error_msg: final_scene_error_overall = image_gen_error_msg
|
252 |
|
253 |
+
ss.story_object.add_scene_from_elements(
|
254 |
user_prompt=_scene_prompt, narrative_text=current_narrative_text, image=generated_image_pil,
|
255 |
image_style_prompt=f"{_image_style}{f', by {_artist_style}' if _artist_style else ''}",
|
256 |
+
image_provider=_image_provider, error_message=final_scene_error_overall
|
257 |
)
|
258 |
+
ss.current_log.append(f" Scene {ss.story_object.current_scene_number} processed.")
|
259 |
|
260 |
# 4. Set final status message
|
261 |
+
if final_scene_error_overall:
|
262 |
+
ss.status_message = {"text": f"Scene {ss.story_object.current_scene_number} generated with errors. Check log.", "type": "error"}
|
263 |
else:
|
264 |
+
ss.status_message = {"text": f"π Scene {ss.story_object.current_scene_number} woven successfully!", "type": "success"}
|
265 |
|
266 |
+
ss.processing_scene = False
|
267 |
+
st.rerun()
|