Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
# Copyright 2025 Google LLC. Based on work by Yousif Ahmed.
|
2 |
-
# Concept: ChronoWeave
|
3 |
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
# you may not use this file except in compliance with the License.
|
5 |
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
@@ -14,19 +14,20 @@ import time
|
|
14 |
import wave
|
15 |
import contextlib
|
16 |
import asyncio
|
17 |
-
import uuid
|
18 |
-
import shutil
|
19 |
-
import logging
|
20 |
|
21 |
# Image handling
|
22 |
from PIL import Image
|
|
|
23 |
# Pydantic for data validation
|
24 |
from pydantic import BaseModel, Field, ValidationError, field_validator, model_validator
|
25 |
from typing import List, Optional, Literal, Dict, Any
|
26 |
|
27 |
# Video and audio processing
|
28 |
from moviepy.editor import ImageClip, AudioFileClip, concatenate_videoclips
|
29 |
-
# from moviepy.config import change_settings
|
30 |
|
31 |
# Type hints
|
32 |
import typing_extensions as typing
|
@@ -36,7 +37,10 @@ import nest_asyncio
|
|
36 |
nest_asyncio.apply()
|
37 |
|
38 |
# --- Logging Setup ---
|
39 |
-
logging.basicConfig(
|
|
|
|
|
|
|
40 |
logger = logging.getLogger(__name__)
|
41 |
|
42 |
# --- Configuration ---
|
@@ -51,7 +55,7 @@ Generate multiple, branching story timelines from a single theme using AI, compl
|
|
51 |
TEXT_MODEL_ID = "models/gemini-1.5-flash"
|
52 |
AUDIO_MODEL_ID = "models/gemini-1.5-flash"
|
53 |
AUDIO_SAMPLING_RATE = 24000
|
54 |
-
IMAGE_MODEL_ID = "imagen-3"
|
55 |
DEFAULT_ASPECT_RATIO = "1:1"
|
56 |
VIDEO_FPS = 24
|
57 |
VIDEO_CODEC = "libx264"
|
@@ -65,22 +69,38 @@ try:
|
|
65 |
logger.info("Google API Key loaded from Streamlit secrets.")
|
66 |
except KeyError:
|
67 |
GOOGLE_API_KEY = os.environ.get('GOOGLE_API_KEY')
|
68 |
-
if GOOGLE_API_KEY:
|
69 |
-
|
|
|
|
|
|
|
70 |
|
71 |
# --- Initialize Google Clients ---
|
72 |
try:
|
73 |
genai.configure(api_key=GOOGLE_API_KEY)
|
74 |
logger.info("Configured google-generativeai with API key.")
|
|
|
|
|
75 |
client_standard = genai.GenerativeModel(TEXT_MODEL_ID)
|
76 |
logger.info(f"Initialized text/JSON model handle: {TEXT_MODEL_ID}.")
|
|
|
|
|
77 |
live_model = genai.GenerativeModel(AUDIO_MODEL_ID)
|
78 |
logger.info(f"Initialized audio model handle: {AUDIO_MODEL_ID}.")
|
|
|
|
|
79 |
image_model_genai = genai.GenerativeModel(IMAGE_MODEL_ID)
|
80 |
logger.info(f"Initialized google-generativeai handle for image model: {IMAGE_MODEL_ID} (May require Vertex AI SDK).")
|
|
|
81 |
# ---> TODO: Initialize Vertex AI client here if switching SDK <---
|
82 |
-
except AttributeError as ae:
|
83 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
|
85 |
# --- Define Pydantic Schemas (Using V2 Syntax) ---
|
86 |
class StorySegment(BaseModel):
|
@@ -89,90 +109,148 @@ class StorySegment(BaseModel):
|
|
89 |
audio_text: str = Field(..., min_length=5, max_length=150)
|
90 |
character_description: str = Field(..., max_length=250)
|
91 |
timeline_visual_modifier: Optional[str] = Field(None, max_length=50)
|
|
|
92 |
@field_validator('image_prompt')
|
93 |
@classmethod
|
94 |
def image_prompt_no_humans(cls, v: str) -> str:
|
95 |
-
if any(w in v.lower() for w in ["person", "people", "human", "man", "woman", "boy", "girl", "child"]):
|
|
|
96 |
return v
|
|
|
97 |
class Timeline(BaseModel):
|
98 |
timeline_id: int = Field(..., ge=0)
|
99 |
divergence_reason: str = Field(..., min_length=5)
|
100 |
segments: List[StorySegment] = Field(..., min_items=1)
|
|
|
101 |
class ChronoWeaveResponse(BaseModel):
|
102 |
core_theme: str = Field(..., min_length=5)
|
103 |
timelines: List[Timeline] = Field(..., min_items=1)
|
104 |
total_scenes_per_timeline: int = Field(..., gt=0)
|
|
|
105 |
@model_validator(mode='after')
|
106 |
def check_timeline_segment_count(self) -> 'ChronoWeaveResponse':
|
107 |
expected = self.total_scenes_per_timeline
|
108 |
for i, t in enumerate(self.timelines):
|
109 |
-
if len(t.segments) != expected:
|
|
|
110 |
return self
|
111 |
|
112 |
# --- Helper Functions ---
|
113 |
|
114 |
-
# CORRECTED wave_file_writer function with proper indentation
|
115 |
@contextlib.contextmanager
|
116 |
def wave_file_writer(filename: str, channels: int = 1, rate: int = AUDIO_SAMPLING_RATE, sample_width: int = 2):
|
117 |
"""Context manager to safely write WAV files."""
|
118 |
wf = None
|
119 |
try:
|
120 |
-
# Indented correctly
|
121 |
wf = wave.open(filename, "wb")
|
122 |
wf.setnchannels(channels)
|
123 |
wf.setsampwidth(sample_width)
|
124 |
wf.setframerate(rate)
|
125 |
yield wf
|
126 |
except Exception as e:
|
127 |
-
logger.error(f"Error wave file {filename}: {e}")
|
128 |
raise
|
129 |
finally:
|
130 |
if wf:
|
131 |
-
# Indented correctly
|
132 |
try:
|
133 |
wf.close()
|
134 |
except Exception as e_close:
|
135 |
logger.error(f"Error closing wave file {filename}: {e_close}")
|
136 |
|
137 |
-
|
138 |
async def generate_audio_live_async(api_text: str, output_filename: str, voice: Optional[str] = None) -> Optional[str]:
|
139 |
-
"""
|
140 |
-
|
|
|
|
|
|
|
141 |
logger.info(f"ποΈ [{task_id}] Requesting audio: '{api_text[:60]}...'")
|
142 |
try:
|
143 |
-
# Corrected config structure
|
144 |
-
config = {
|
|
|
|
|
|
|
|
|
145 |
directive_prompt = f"Narrate directly: \"{api_text}\""
|
146 |
async with live_model.connect(config=config) as session:
|
147 |
await session.send_request([directive_prompt])
|
148 |
async for response in session.stream_content():
|
149 |
-
if response.audio_chunk and response.audio_chunk.data:
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
154 |
return output_filename
|
155 |
-
except genai.types.generation_types.BlockedPromptException as bpe:
|
156 |
-
|
157 |
-
|
158 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
159 |
|
160 |
def generate_story_sequence_chrono(theme: str, num_scenes: int, num_timelines: int, divergence_prompt: str = "") -> Optional[ChronoWeaveResponse]:
|
161 |
-
"""
|
|
|
|
|
162 |
st.info(f"π Generating {num_timelines} timeline(s) x {num_scenes} scenes for: '{theme}'...")
|
163 |
logger.info(f"Requesting story structure: Theme='{theme}', Timelines={num_timelines}, Scenes={num_scenes}")
|
164 |
-
divergence_instruction = (
|
165 |
-
|
|
|
|
|
|
|
|
|
|
|
166 |
try:
|
167 |
-
response = client_standard.generate_content(
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
176 |
|
177 |
def generate_image_imagen(prompt: str, aspect_ratio: str = "1:1", task_id: str = "IMG") -> Optional[Image.Image]:
|
178 |
"""
|
@@ -181,15 +259,17 @@ def generate_image_imagen(prompt: str, aspect_ratio: str = "1:1", task_id: str =
|
|
181 |
(google-cloud-aiplatform) to correctly call Imagen models. >>>
|
182 |
"""
|
183 |
logger.info(f"πΌοΈ [{task_id}] Requesting image: '{prompt[:70]}...' (Aspect: {aspect_ratio})")
|
184 |
-
logger.error(f"
|
185 |
st.error(f"Image generation for {task_id} skipped: Requires Vertex AI SDK implementation.", icon="πΌοΈ")
|
186 |
-
# Return None because the current method is known to fail based on previous logs
|
187 |
return None
|
188 |
|
189 |
# --- Streamlit UI Elements ---
|
190 |
st.sidebar.header("βοΈ Configuration")
|
191 |
-
if GOOGLE_API_KEY:
|
192 |
-
|
|
|
|
|
|
|
193 |
theme = st.sidebar.text_input("π Story Theme:", "A curious squirrel finds a mysterious, glowing acorn")
|
194 |
num_scenes = st.sidebar.slider("π¬ Scenes per Timeline:", min_value=2, max_value=7, value=3)
|
195 |
num_timelines = st.sidebar.slider("πΏ Number of Timelines:", min_value=1, max_value=4, value=2)
|
@@ -198,123 +278,229 @@ st.sidebar.subheader("π¨ Visual & Audio Settings")
|
|
198 |
aspect_ratio = st.sidebar.selectbox("πΌοΈ Image Aspect Ratio:", ["1:1", "16:9", "9:16"], index=0)
|
199 |
audio_voice = None
|
200 |
generate_button = st.sidebar.button("β¨ Generate ChronoWeave β¨", type="primary", disabled=(not GOOGLE_API_KEY), use_container_width=True)
|
201 |
-
st.sidebar.markdown("---")
|
|
|
|
|
202 |
|
203 |
# --- Main Logic ---
|
204 |
if generate_button:
|
205 |
-
if not theme:
|
|
|
206 |
else:
|
207 |
-
run_id = str(uuid.uuid4()).split('-')[0]
|
208 |
-
|
209 |
-
|
|
|
|
|
|
|
|
|
|
|
210 |
final_video_paths, generation_errors = {}, {}
|
211 |
|
212 |
chrono_response: Optional[ChronoWeaveResponse] = None
|
213 |
-
with st.spinner("Generating narrative structure... π€"):
|
|
|
214 |
|
215 |
if chrono_response:
|
216 |
-
overall_start_time = time.time()
|
|
|
217 |
with st.status("Generating assets and composing videos...", expanded=True) as status:
|
218 |
for timeline_index, timeline in enumerate(chrono_response.timelines):
|
219 |
timeline_id, divergence, segments = timeline.timeline_id, timeline.divergence_reason, timeline.segments
|
220 |
-
timeline_label = f"Timeline {timeline_id}"
|
221 |
-
|
|
|
|
|
222 |
temp_image_files, temp_audio_files, video_clips = {}, {}, []
|
223 |
-
timeline_start_time = time.time()
|
|
|
224 |
|
225 |
for scene_index, segment in enumerate(segments):
|
226 |
-
scene_id = segment.scene_id
|
|
|
227 |
status.update(label=f"Processing {timeline_label}, Scene {scene_id + 1}/{len(segments)}...")
|
228 |
st.markdown(f"--- **Scene {scene_id + 1} ({task_id})** ---")
|
229 |
logger.info(f"Processing {timeline_label}, Scene {scene_id + 1}/{len(segments)}...")
|
230 |
scene_has_error = False
|
231 |
-
st.write(f"
|
|
|
232 |
|
233 |
# --- 2a. Image Generation ---
|
234 |
generated_image: Optional[Image.Image] = None
|
235 |
with st.spinner(f"[{task_id}] Generating image... π¨"):
|
236 |
combined_prompt = segment.image_prompt
|
237 |
-
if segment.character_description:
|
238 |
-
|
239 |
-
|
|
|
|
|
240 |
if generated_image:
|
241 |
image_path = os.path.join(temp_dir, f"{task_id}_image.png")
|
242 |
-
try:
|
243 |
-
|
244 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
245 |
|
246 |
# --- 2b. Audio Generation ---
|
247 |
generated_audio_path: Optional[str] = None
|
248 |
-
if not scene_has_error:
|
249 |
with st.spinner(f"[{task_id}] Generating audio... π"):
|
250 |
audio_path_temp = os.path.join(temp_dir, f"{task_id}_audio.wav")
|
251 |
-
try:
|
252 |
-
|
253 |
-
except
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
254 |
if generated_audio_path:
|
255 |
-
temp_audio_files[scene_id] = generated_audio_path
|
256 |
-
|
257 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
258 |
|
259 |
# --- 2c. Create Video Clip ---
|
260 |
-
if not scene_has_error and scene_id in temp_image_files and scene_id in temp_audio_files:
|
261 |
-
st.write(f"
|
|
|
262 |
audio_clip_instance, image_clip_instance, composite_clip = None, None, None
|
263 |
try:
|
264 |
-
if not os.path.exists(img_path):
|
265 |
-
|
266 |
-
|
|
|
|
|
|
|
267 |
image_clip_instance = ImageClip(np_image).set_duration(audio_clip_instance.duration)
|
268 |
-
composite_clip = image_clip_instance.set_audio(audio_clip_instance)
|
269 |
-
|
270 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
271 |
finally:
|
272 |
-
if audio_clip_instance:
|
273 |
-
|
|
|
|
|
274 |
|
275 |
# --- 2d. Assemble Timeline Video ---
|
276 |
timeline_duration = time.time() - timeline_start_time
|
277 |
if video_clips and scene_success_count == len(segments):
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
289 |
|
290 |
# --- End of Timelines Loop ---
|
291 |
overall_duration = time.time() - overall_start_time
|
292 |
-
if all_timelines_successful and final_video_paths:
|
293 |
-
|
294 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
295 |
|
296 |
# --- 3. Display Results ---
|
297 |
st.header("π¬ Generated Timelines")
|
298 |
if final_video_paths:
|
299 |
-
sorted_timeline_ids = sorted(final_video_paths.keys())
|
|
|
|
|
300 |
for idx, timeline_id in enumerate(sorted_timeline_ids):
|
301 |
-
col = cols[idx % num_cols]
|
|
|
302 |
timeline_data = next((t for t in chrono_response.timelines if t.timeline_id == timeline_id), None)
|
303 |
reason = timeline_data.divergence_reason if timeline_data else "Unknown"
|
304 |
with col:
|
305 |
-
st.subheader(f"Timeline {timeline_id}")
|
|
|
306 |
try:
|
307 |
-
with open(video_path, 'rb') as vf:
|
308 |
-
|
|
|
|
|
309 |
st.download_button(f"Download T{timeline_id}", video_bytes, f"timeline_{timeline_id}.mp4", "video/mp4", key=f"dl_{timeline_id}")
|
310 |
if generation_errors.get(timeline_id):
|
311 |
scene_errors = [err for err in generation_errors[timeline_id] if not err.startswith(f"T{timeline_id}:")]
|
312 |
if scene_errors:
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
-
except
|
317 |
-
|
|
|
|
|
|
|
|
|
|
|
318 |
st.warning("No final videos were successfully generated.")
|
319 |
st.subheader("Summary of Generation Issues")
|
320 |
has_errors = any(generation_errors.values())
|
@@ -323,15 +509,26 @@ if generate_button:
|
|
323 |
for tid, errors in generation_errors.items():
|
324 |
if errors:
|
325 |
st.error(f"**Timeline {tid}:**")
|
326 |
-
for msg in errors:
|
327 |
-
|
|
|
|
|
328 |
|
329 |
# --- 4. Cleanup ---
|
330 |
st.info(f"Attempting cleanup: {temp_dir}")
|
331 |
-
try:
|
332 |
-
|
333 |
-
|
334 |
-
|
335 |
-
|
336 |
-
|
337 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
# Copyright 2025 Google LLC. Based on work by Yousif Ahmed.
|
2 |
+
# Concept: ChronoWeave β Branching Narrative Generation
|
3 |
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
# you may not use this file except in compliance with the License.
|
5 |
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
|
|
14 |
import wave
|
15 |
import contextlib
|
16 |
import asyncio
|
17 |
+
import uuid # For unique identifiers
|
18 |
+
import shutil # For directory operations
|
19 |
+
import logging # For improved logging
|
20 |
|
21 |
# Image handling
|
22 |
from PIL import Image
|
23 |
+
|
24 |
# Pydantic for data validation
|
25 |
from pydantic import BaseModel, Field, ValidationError, field_validator, model_validator
|
26 |
from typing import List, Optional, Literal, Dict, Any
|
27 |
|
28 |
# Video and audio processing
|
29 |
from moviepy.editor import ImageClip, AudioFileClip, concatenate_videoclips
|
30 |
+
# from moviepy.config import change_settings # Potential for setting ImageMagick path if needed
|
31 |
|
32 |
# Type hints
|
33 |
import typing_extensions as typing
|
|
|
37 |
nest_asyncio.apply()
|
38 |
|
39 |
# --- Logging Setup ---
|
40 |
+
logging.basicConfig(
|
41 |
+
level=logging.INFO,
|
42 |
+
format='%(asctime)s - %(levelname)s - %(message)s'
|
43 |
+
)
|
44 |
logger = logging.getLogger(__name__)
|
45 |
|
46 |
# --- Configuration ---
|
|
|
55 |
TEXT_MODEL_ID = "models/gemini-1.5-flash"
|
56 |
AUDIO_MODEL_ID = "models/gemini-1.5-flash"
|
57 |
AUDIO_SAMPLING_RATE = 24000
|
58 |
+
IMAGE_MODEL_ID = "imagen-3" # NOTE: Requires Vertex AI SDK access
|
59 |
DEFAULT_ASPECT_RATIO = "1:1"
|
60 |
VIDEO_FPS = 24
|
61 |
VIDEO_CODEC = "libx264"
|
|
|
69 |
logger.info("Google API Key loaded from Streamlit secrets.")
|
70 |
except KeyError:
|
71 |
GOOGLE_API_KEY = os.environ.get('GOOGLE_API_KEY')
|
72 |
+
if GOOGLE_API_KEY:
|
73 |
+
logger.info("Google API Key loaded from environment variable.")
|
74 |
+
else:
|
75 |
+
st.error("π¨ **Google API Key Not Found!** Please configure it.", icon="π¨")
|
76 |
+
st.stop()
|
77 |
|
78 |
# --- Initialize Google Clients ---
|
79 |
try:
|
80 |
genai.configure(api_key=GOOGLE_API_KEY)
|
81 |
logger.info("Configured google-generativeai with API key.")
|
82 |
+
|
83 |
+
# Initialize text/JSON model
|
84 |
client_standard = genai.GenerativeModel(TEXT_MODEL_ID)
|
85 |
logger.info(f"Initialized text/JSON model handle: {TEXT_MODEL_ID}.")
|
86 |
+
|
87 |
+
# Initialize audio model
|
88 |
live_model = genai.GenerativeModel(AUDIO_MODEL_ID)
|
89 |
logger.info(f"Initialized audio model handle: {AUDIO_MODEL_ID}.")
|
90 |
+
|
91 |
+
# Initialize image model (placeholder for future Vertex AI SDK integration)
|
92 |
image_model_genai = genai.GenerativeModel(IMAGE_MODEL_ID)
|
93 |
logger.info(f"Initialized google-generativeai handle for image model: {IMAGE_MODEL_ID} (May require Vertex AI SDK).")
|
94 |
+
|
95 |
# ---> TODO: Initialize Vertex AI client here if switching SDK <---
|
96 |
+
except AttributeError as ae:
|
97 |
+
logger.exception("AttributeError during Client Init.")
|
98 |
+
st.error(f"π¨ Init Error: {ae}. Update library?", icon="π¨")
|
99 |
+
st.stop()
|
100 |
+
except Exception as e:
|
101 |
+
logger.exception("Failed to initialize Google Clients/Models.")
|
102 |
+
st.error(f"π¨ Failed Init: {e}", icon="π¨")
|
103 |
+
st.stop()
|
104 |
|
105 |
# --- Define Pydantic Schemas (Using V2 Syntax) ---
|
106 |
class StorySegment(BaseModel):
|
|
|
109 |
audio_text: str = Field(..., min_length=5, max_length=150)
|
110 |
character_description: str = Field(..., max_length=250)
|
111 |
timeline_visual_modifier: Optional[str] = Field(None, max_length=50)
|
112 |
+
|
113 |
@field_validator('image_prompt')
|
114 |
@classmethod
|
115 |
def image_prompt_no_humans(cls, v: str) -> str:
|
116 |
+
if any(w in v.lower() for w in ["person", "people", "human", "man", "woman", "boy", "girl", "child"]):
|
117 |
+
logger.warning(f"Prompt '{v[:50]}...' may contain humans.")
|
118 |
return v
|
119 |
+
|
120 |
class Timeline(BaseModel):
|
121 |
timeline_id: int = Field(..., ge=0)
|
122 |
divergence_reason: str = Field(..., min_length=5)
|
123 |
segments: List[StorySegment] = Field(..., min_items=1)
|
124 |
+
|
125 |
class ChronoWeaveResponse(BaseModel):
|
126 |
core_theme: str = Field(..., min_length=5)
|
127 |
timelines: List[Timeline] = Field(..., min_items=1)
|
128 |
total_scenes_per_timeline: int = Field(..., gt=0)
|
129 |
+
|
130 |
@model_validator(mode='after')
|
131 |
def check_timeline_segment_count(self) -> 'ChronoWeaveResponse':
|
132 |
expected = self.total_scenes_per_timeline
|
133 |
for i, t in enumerate(self.timelines):
|
134 |
+
if len(t.segments) != expected:
|
135 |
+
raise ValueError(f"Timeline {i} ID {t.timeline_id}: Expected {expected}, found {len(t.segments)}.")
|
136 |
return self
|
137 |
|
138 |
# --- Helper Functions ---
|
139 |
|
|
|
140 |
@contextlib.contextmanager
|
141 |
def wave_file_writer(filename: str, channels: int = 1, rate: int = AUDIO_SAMPLING_RATE, sample_width: int = 2):
|
142 |
"""Context manager to safely write WAV files."""
|
143 |
wf = None
|
144 |
try:
|
|
|
145 |
wf = wave.open(filename, "wb")
|
146 |
wf.setnchannels(channels)
|
147 |
wf.setsampwidth(sample_width)
|
148 |
wf.setframerate(rate)
|
149 |
yield wf
|
150 |
except Exception as e:
|
151 |
+
logger.error(f"Error opening/configuring wave file {filename}: {e}")
|
152 |
raise
|
153 |
finally:
|
154 |
if wf:
|
|
|
155 |
try:
|
156 |
wf.close()
|
157 |
except Exception as e_close:
|
158 |
logger.error(f"Error closing wave file {filename}: {e_close}")
|
159 |
|
|
|
160 |
async def generate_audio_live_async(api_text: str, output_filename: str, voice: Optional[str] = None) -> Optional[str]:
|
161 |
+
"""
|
162 |
+
Generates audio using Gemini Live API (async version) via the GenerativeModel.
|
163 |
+
"""
|
164 |
+
collected_audio = bytearray()
|
165 |
+
task_id = os.path.basename(output_filename).split('.')[0]
|
166 |
logger.info(f"ποΈ [{task_id}] Requesting audio: '{api_text[:60]}...'")
|
167 |
try:
|
168 |
+
# Corrected config structure for audio generation
|
169 |
+
config = {
|
170 |
+
"response_modalities": ["AUDIO"],
|
171 |
+
"audio_encoding": "LINEAR16",
|
172 |
+
"sample_rate_hertz": AUDIO_SAMPLING_RATE,
|
173 |
+
}
|
174 |
directive_prompt = f"Narrate directly: \"{api_text}\""
|
175 |
async with live_model.connect(config=config) as session:
|
176 |
await session.send_request([directive_prompt])
|
177 |
async for response in session.stream_content():
|
178 |
+
if response.audio_chunk and response.audio_chunk.data:
|
179 |
+
collected_audio.extend(response.audio_chunk.data)
|
180 |
+
if hasattr(response, 'error') and response.error:
|
181 |
+
logger.error(f"β [{task_id}] Audio stream error: {response.error}")
|
182 |
+
st.error(f"Audio stream error {task_id}: {response.error}", icon="π")
|
183 |
+
return None
|
184 |
+
if not collected_audio:
|
185 |
+
logger.warning(f"β οΈ [{task_id}] No audio data received.")
|
186 |
+
st.warning(f"No audio data for {task_id}.", icon="π")
|
187 |
+
return None
|
188 |
+
with wave_file_writer(output_filename, rate=AUDIO_SAMPLING_RATE) as wf:
|
189 |
+
wf.writeframes(bytes(collected_audio))
|
190 |
+
logger.info(f"β
[{task_id}] Audio saved: {os.path.basename(output_filename)} ({len(collected_audio)} bytes)")
|
191 |
return output_filename
|
192 |
+
except genai.types.generation_types.BlockedPromptException as bpe:
|
193 |
+
logger.error(f"β [{task_id}] Audio blocked: {bpe}")
|
194 |
+
st.error(f"Audio blocked {task_id}.", icon="π")
|
195 |
+
return None
|
196 |
+
except TypeError as te:
|
197 |
+
logger.exception(f"β [{task_id}] Audio config TypeError: {te}")
|
198 |
+
st.error(f"Audio config error {task_id} (TypeError): {te}. Check library/config.", icon="βοΈ")
|
199 |
+
return None
|
200 |
+
except Exception as e:
|
201 |
+
logger.exception(f"β [{task_id}] Audio failed: {e}")
|
202 |
+
st.error(f"Audio failed {task_id}: {e}", icon="π")
|
203 |
+
return None
|
204 |
|
205 |
def generate_story_sequence_chrono(theme: str, num_scenes: int, num_timelines: int, divergence_prompt: str = "") -> Optional[ChronoWeaveResponse]:
|
206 |
+
"""
|
207 |
+
Generates branching story sequences using Gemini structured output and validates with Pydantic.
|
208 |
+
"""
|
209 |
st.info(f"π Generating {num_timelines} timeline(s) x {num_scenes} scenes for: '{theme}'...")
|
210 |
logger.info(f"Requesting story structure: Theme='{theme}', Timelines={num_timelines}, Scenes={num_scenes}")
|
211 |
+
divergence_instruction = (
|
212 |
+
f"Introduce clear points of divergence between timelines, after first scene if possible. "
|
213 |
+
f"Hint: '{divergence_prompt}'. State divergence reason clearly. **For timeline_id 0, use 'Initial path' or 'Baseline scenario'.**"
|
214 |
+
)
|
215 |
+
prompt = f"""Act as narrative designer. Create story for theme: "{theme}". Instructions: 1. Exactly **{num_timelines}** timelines. 2. Each timeline exactly **{num_scenes}** scenes. 3. **NO humans/humanoids**. Focus: animals, fantasy creatures, animated objects, nature. 4. {divergence_instruction}. 5. Style: **'Simple, friendly kids animation, bright colors, rounded shapes'**, unless `timeline_visual_modifier` alters. 6. `audio_text`: single concise sentence (max 30 words). 7. `image_prompt`: descriptive, concise (target 15-35 words MAX). Focus on scene elements. **AVOID repeating general style**. 8. `character_description`: VERY brief (name, features). Target < 20 words. Output: ONLY valid JSON object adhering to schema. No text before/after. JSON Schema: ```json
|
216 |
+
{json.dumps(ChronoWeaveResponse.model_json_schema(), indent=2)}
|
217 |
+
```"""
|
218 |
try:
|
219 |
+
response = client_standard.generate_content(
|
220 |
+
contents=prompt,
|
221 |
+
generation_config=genai.types.GenerationConfig(
|
222 |
+
response_mime_type="application/json", temperature=0.7
|
223 |
+
)
|
224 |
+
)
|
225 |
+
try:
|
226 |
+
raw_data = json.loads(response.text)
|
227 |
+
except json.JSONDecodeError as json_err:
|
228 |
+
logger.error(f"Failed JSON decode: {json_err}\nResponse:\n{response.text}")
|
229 |
+
st.error(f"π¨ Failed parse story: {json_err}", icon="π")
|
230 |
+
st.text_area("Problem Response:", response.text, height=150)
|
231 |
+
return None
|
232 |
+
except Exception as e:
|
233 |
+
logger.error(f"Error processing text: {e}")
|
234 |
+
st.error(f"π¨ Error processing AI response: {e}", icon="π")
|
235 |
+
return None
|
236 |
+
try:
|
237 |
+
validated_data = ChronoWeaveResponse.model_validate(raw_data)
|
238 |
+
logger.info("β
Story structure OK!")
|
239 |
+
st.success("β
Story structure OK!")
|
240 |
+
return validated_data
|
241 |
+
except ValidationError as val_err:
|
242 |
+
logger.error(f"JSON validation failed: {val_err}\nData:\n{json.dumps(raw_data, indent=2)}")
|
243 |
+
st.error(f"π¨ Gen structure invalid: {val_err}", icon="π§¬")
|
244 |
+
st.json(raw_data)
|
245 |
+
return None
|
246 |
+
except genai.types.generation_types.BlockedPromptException as bpe:
|
247 |
+
logger.error(f"Story gen blocked: {bpe}")
|
248 |
+
st.error("π¨ Story prompt blocked.", icon="π«")
|
249 |
+
return None
|
250 |
+
except Exception as e:
|
251 |
+
logger.exception("Error during story gen:")
|
252 |
+
st.error(f"π¨ Story gen error: {e}", icon="π₯")
|
253 |
+
return None
|
254 |
|
255 |
def generate_image_imagen(prompt: str, aspect_ratio: str = "1:1", task_id: str = "IMG") -> Optional[Image.Image]:
|
256 |
"""
|
|
|
259 |
(google-cloud-aiplatform) to correctly call Imagen models. >>>
|
260 |
"""
|
261 |
logger.info(f"πΌοΈ [{task_id}] Requesting image: '{prompt[:70]}...' (Aspect: {aspect_ratio})")
|
262 |
+
logger.error(f"β [{task_id}] Image generation skipped: Function needs update to use Vertex AI SDK for Imagen.")
|
263 |
st.error(f"Image generation for {task_id} skipped: Requires Vertex AI SDK implementation.", icon="πΌοΈ")
|
|
|
264 |
return None
|
265 |
|
266 |
# --- Streamlit UI Elements ---
|
267 |
st.sidebar.header("βοΈ Configuration")
|
268 |
+
if GOOGLE_API_KEY:
|
269 |
+
st.sidebar.success("Google API Key Loaded", icon="β
")
|
270 |
+
else:
|
271 |
+
st.sidebar.error("Google API Key Missing!", icon="π¨")
|
272 |
+
|
273 |
theme = st.sidebar.text_input("π Story Theme:", "A curious squirrel finds a mysterious, glowing acorn")
|
274 |
num_scenes = st.sidebar.slider("π¬ Scenes per Timeline:", min_value=2, max_value=7, value=3)
|
275 |
num_timelines = st.sidebar.slider("πΏ Number of Timelines:", min_value=1, max_value=4, value=2)
|
|
|
278 |
aspect_ratio = st.sidebar.selectbox("πΌοΈ Image Aspect Ratio:", ["1:1", "16:9", "9:16"], index=0)
|
279 |
audio_voice = None
|
280 |
generate_button = st.sidebar.button("β¨ Generate ChronoWeave β¨", type="primary", disabled=(not GOOGLE_API_KEY), use_container_width=True)
|
281 |
+
st.sidebar.markdown("---")
|
282 |
+
st.sidebar.info("β³ Generation can take minutes.")
|
283 |
+
st.sidebar.markdown(f"<small>Txt:{TEXT_MODEL_ID}, Img:{IMAGE_MODEL_ID}, Aud:{AUDIO_MODEL_ID}</small>", unsafe_allow_html=True)
|
284 |
|
285 |
# --- Main Logic ---
|
286 |
if generate_button:
|
287 |
+
if not theme:
|
288 |
+
st.error("Please enter a story theme.", icon="π")
|
289 |
else:
|
290 |
+
run_id = str(uuid.uuid4()).split('-')[0]
|
291 |
+
temp_dir = os.path.join(TEMP_DIR_BASE, f"run_{run_id}")
|
292 |
+
try:
|
293 |
+
os.makedirs(temp_dir, exist_ok=True)
|
294 |
+
logger.info(f"Created temp dir: {temp_dir}")
|
295 |
+
except OSError as e:
|
296 |
+
st.error(f"π¨ Failed create temp dir {temp_dir}: {e}", icon="π")
|
297 |
+
st.stop()
|
298 |
final_video_paths, generation_errors = {}, {}
|
299 |
|
300 |
chrono_response: Optional[ChronoWeaveResponse] = None
|
301 |
+
with st.spinner("Generating narrative structure... π€"):
|
302 |
+
chrono_response = generate_story_sequence_chrono(theme, num_scenes, num_timelines, divergence_prompt)
|
303 |
|
304 |
if chrono_response:
|
305 |
+
overall_start_time = time.time()
|
306 |
+
all_timelines_successful = True
|
307 |
with st.status("Generating assets and composing videos...", expanded=True) as status:
|
308 |
for timeline_index, timeline in enumerate(chrono_response.timelines):
|
309 |
timeline_id, divergence, segments = timeline.timeline_id, timeline.divergence_reason, timeline.segments
|
310 |
+
timeline_label = f"Timeline {timeline_id}"
|
311 |
+
st.subheader(f"Processing {timeline_label}: {divergence}")
|
312 |
+
logger.info(f"--- Processing {timeline_label} (Idx: {timeline_index}) ---")
|
313 |
+
generation_errors[timeline_id] = []
|
314 |
temp_image_files, temp_audio_files, video_clips = {}, {}, []
|
315 |
+
timeline_start_time = time.time()
|
316 |
+
scene_success_count = 0
|
317 |
|
318 |
for scene_index, segment in enumerate(segments):
|
319 |
+
scene_id = segment.scene_id
|
320 |
+
task_id = f"T{timeline_id}_S{scene_id}"
|
321 |
status.update(label=f"Processing {timeline_label}, Scene {scene_id + 1}/{len(segments)}...")
|
322 |
st.markdown(f"--- **Scene {scene_id + 1} ({task_id})** ---")
|
323 |
logger.info(f"Processing {timeline_label}, Scene {scene_id + 1}/{len(segments)}...")
|
324 |
scene_has_error = False
|
325 |
+
st.write(f"*Img Prompt:* {segment.image_prompt}" + (f" *(Mod: {segment.timeline_visual_modifier})*" if segment.timeline_visual_modifier else ""))
|
326 |
+
st.write(f"*Audio Text:* {segment.audio_text}")
|
327 |
|
328 |
# --- 2a. Image Generation ---
|
329 |
generated_image: Optional[Image.Image] = None
|
330 |
with st.spinner(f"[{task_id}] Generating image... π¨"):
|
331 |
combined_prompt = segment.image_prompt
|
332 |
+
if segment.character_description:
|
333 |
+
combined_prompt += f" Featuring: {segment.character_description}"
|
334 |
+
if segment.timeline_visual_modifier:
|
335 |
+
combined_prompt += f" Style hint: {segment.timeline_visual_modifier}."
|
336 |
+
generated_image = generate_image_imagen(combined_prompt, aspect_ratio, task_id)
|
337 |
if generated_image:
|
338 |
image_path = os.path.join(temp_dir, f"{task_id}_image.png")
|
339 |
+
try:
|
340 |
+
generated_image.save(image_path)
|
341 |
+
temp_image_files[scene_id] = image_path
|
342 |
+
st.image(generated_image, width=180, caption=f"Scene {scene_id + 1}")
|
343 |
+
except Exception as e:
|
344 |
+
logger.error(f"β [{task_id}] Img save error: {e}")
|
345 |
+
st.error(f"Save image {task_id} failed.", icon="πΎ")
|
346 |
+
scene_has_error = True
|
347 |
+
generation_errors[timeline_id].append(f"S{scene_id + 1}: Img save fail.")
|
348 |
+
else:
|
349 |
+
scene_has_error = True
|
350 |
+
generation_errors[timeline_id].append(f"S{scene_id + 1}: Img gen fail.")
|
351 |
+
continue
|
352 |
|
353 |
# --- 2b. Audio Generation ---
|
354 |
generated_audio_path: Optional[str] = None
|
355 |
+
if not scene_has_error:
|
356 |
with st.spinner(f"[{task_id}] Generating audio... π"):
|
357 |
audio_path_temp = os.path.join(temp_dir, f"{task_id}_audio.wav")
|
358 |
+
try:
|
359 |
+
generated_audio_path = asyncio.run(generate_audio_live_async(segment.audio_text, audio_path_temp, audio_voice))
|
360 |
+
except RuntimeError as e:
|
361 |
+
logger.error(f"β [{task_id}] Asyncio error: {e}")
|
362 |
+
st.error(f"Asyncio audio error {task_id}: {e}", icon="β‘")
|
363 |
+
scene_has_error = True
|
364 |
+
generation_errors[timeline_id].append(f"S{scene_id + 1}: Audio async err.")
|
365 |
+
except Exception as e:
|
366 |
+
logger.exception(f"β [{task_id}] Audio error: {e}")
|
367 |
+
st.error(f"Audio error {task_id}: {e}", icon="π₯")
|
368 |
+
scene_has_error = True
|
369 |
+
generation_errors[timeline_id].append(f"S{scene_id + 1}: Audio gen err.")
|
370 |
if generated_audio_path:
|
371 |
+
temp_audio_files[scene_id] = generated_audio_path
|
372 |
+
try:
|
373 |
+
with open(generated_audio_path, 'rb') as ap:
|
374 |
+
st.audio(ap.read(), format='audio/wav')
|
375 |
+
except Exception as e:
|
376 |
+
logger.warning(f"β οΈ [{task_id}] Audio preview error: {e}")
|
377 |
+
else:
|
378 |
+
scene_has_error = True
|
379 |
+
generation_errors[timeline_id].append(f"S{scene_id + 1}: Audio gen fail.")
|
380 |
+
continue
|
381 |
|
382 |
# --- 2c. Create Video Clip ---
|
383 |
+
if not scene_has_error and scene_id in temp_image_files and scene_id in temp_audio_files:
|
384 |
+
st.write(f"π¬ Creating clip S{scene_id + 1}...")
|
385 |
+
img_path, aud_path = temp_image_files[scene_id], temp_audio_files[scene_id]
|
386 |
audio_clip_instance, image_clip_instance, composite_clip = None, None, None
|
387 |
try:
|
388 |
+
if not os.path.exists(img_path):
|
389 |
+
raise FileNotFoundError(f"Img missing: {img_path}")
|
390 |
+
if not os.path.exists(aud_path):
|
391 |
+
raise FileNotFoundError(f"Aud missing: {aud_path}")
|
392 |
+
audio_clip_instance = AudioFileClip(aud_path)
|
393 |
+
np_image = np.array(Image.open(img_path))
|
394 |
image_clip_instance = ImageClip(np_image).set_duration(audio_clip_instance.duration)
|
395 |
+
composite_clip = image_clip_instance.set_audio(audio_clip_instance)
|
396 |
+
video_clips.append(composite_clip)
|
397 |
+
logger.info(f"β
[{task_id}] Clip created (Dur: {audio_clip_instance.duration:.2f}s).")
|
398 |
+
st.write(f"β
Clip created (Dur: {audio_clip_instance.duration:.2f}s).")
|
399 |
+
scene_success_count += 1
|
400 |
+
except Exception as e:
|
401 |
+
logger.exception(f"β [{task_id}] Failed clip creation: {e}")
|
402 |
+
st.error(f"Failed clip {task_id}: {e}", icon="π¬")
|
403 |
+
scene_has_error = True
|
404 |
+
generation_errors[timeline_id].append(f"S{scene_id + 1}: Clip fail.")
|
405 |
finally:
|
406 |
+
if audio_clip_instance:
|
407 |
+
audio_clip_instance.close()
|
408 |
+
if image_clip_instance:
|
409 |
+
image_clip_instance.close()
|
410 |
|
411 |
# --- 2d. Assemble Timeline Video ---
|
412 |
timeline_duration = time.time() - timeline_start_time
|
413 |
if video_clips and scene_success_count == len(segments):
|
414 |
+
status.update(label=f"Composing video {timeline_label}...")
|
415 |
+
st.write(f"ποΈ Assembling video {timeline_label}...")
|
416 |
+
logger.info(f"ποΈ Assembling video {timeline_label}...")
|
417 |
+
output_filename = os.path.join(temp_dir, f"timeline_{timeline_id}_final.mp4")
|
418 |
+
final_timeline_video = None
|
419 |
+
try:
|
420 |
+
final_timeline_video = concatenate_videoclips(video_clips, method="compose")
|
421 |
+
final_timeline_video.write_videofile(
|
422 |
+
output_filename, fps=VIDEO_FPS, codec=VIDEO_CODEC, audio_codec=AUDIO_CODEC, logger=None
|
423 |
+
)
|
424 |
+
final_video_paths[timeline_id] = output_filename
|
425 |
+
logger.info(f"β
[{timeline_label}] Video saved: {os.path.basename(output_filename)}")
|
426 |
+
st.success(f"β
Video {timeline_label} completed in {timeline_duration:.2f}s.")
|
427 |
+
except Exception as e:
|
428 |
+
logger.exception(f"β [{timeline_label}] Video assembly failed: {e}")
|
429 |
+
st.error(f"Assemble video {timeline_label} failed: {e}", icon="πΌ")
|
430 |
+
all_timelines_successful = False
|
431 |
+
generation_errors[timeline_id].append(f"T{timeline_id}: Assembly fail.")
|
432 |
+
finally:
|
433 |
+
logger.debug(f"[{timeline_label}] Closing {len(video_clips)} clips...")
|
434 |
+
for i, clip in enumerate(video_clips):
|
435 |
+
try:
|
436 |
+
clip.close()
|
437 |
+
except Exception as e_close:
|
438 |
+
logger.warning(f"β οΈ [{timeline_label}] Clip close err {i}: {e_close}")
|
439 |
+
if final_timeline_video:
|
440 |
+
try:
|
441 |
+
final_timeline_video.close()
|
442 |
+
except Exception as e_close_final:
|
443 |
+
logger.warning(f"β οΈ [{timeline_label}] Final vid close err: {e_close_final}")
|
444 |
+
elif not video_clips:
|
445 |
+
logger.warning(f"[{timeline_label}] No clips. Skip assembly.")
|
446 |
+
st.warning(f"No scenes for {timeline_label}. No video.", icon="π«")
|
447 |
+
all_timelines_successful = False
|
448 |
+
else:
|
449 |
+
error_count = len(generation_errors[timeline_id])
|
450 |
+
logger.warning(f"[{timeline_label}] {error_count} scene err(s). Skip assembly.")
|
451 |
+
st.warning(f"{timeline_label}: {error_count} err(s). Video not assembled.", icon="β οΈ")
|
452 |
+
all_timelines_successful = False
|
453 |
+
if generation_errors[timeline_id]:
|
454 |
+
logger.error(f"Errors {timeline_label}: {generation_errors[timeline_id]}")
|
455 |
|
456 |
# --- End of Timelines Loop ---
|
457 |
overall_duration = time.time() - overall_start_time
|
458 |
+
if all_timelines_successful and final_video_paths:
|
459 |
+
status_msg = f"Complete! ({len(final_video_paths)} videos in {overall_duration:.2f}s)"
|
460 |
+
status.update(label=status_msg, state="complete", expanded=False)
|
461 |
+
logger.info(status_msg)
|
462 |
+
elif final_video_paths:
|
463 |
+
status_msg = f"Partially Complete ({len(final_video_paths)} videos, errors). {overall_duration:.2f}s"
|
464 |
+
status.update(label=status_msg, state="warning", expanded=True)
|
465 |
+
logger.warning(status_msg)
|
466 |
+
else:
|
467 |
+
status_msg = f"Failed. No videos. {overall_duration:.2f}s"
|
468 |
+
status.update(label=status_msg, state="error", expanded=True)
|
469 |
+
logger.error(status_msg)
|
470 |
|
471 |
# --- 3. Display Results ---
|
472 |
st.header("π¬ Generated Timelines")
|
473 |
if final_video_paths:
|
474 |
+
sorted_timeline_ids = sorted(final_video_paths.keys())
|
475 |
+
num_cols = min(len(sorted_timeline_ids), 3)
|
476 |
+
cols = st.columns(num_cols)
|
477 |
for idx, timeline_id in enumerate(sorted_timeline_ids):
|
478 |
+
col = cols[idx % num_cols]
|
479 |
+
video_path = final_video_paths[timeline_id]
|
480 |
timeline_data = next((t for t in chrono_response.timelines if t.timeline_id == timeline_id), None)
|
481 |
reason = timeline_data.divergence_reason if timeline_data else "Unknown"
|
482 |
with col:
|
483 |
+
st.subheader(f"Timeline {timeline_id}")
|
484 |
+
st.caption(f"Divergence: {reason}")
|
485 |
try:
|
486 |
+
with open(video_path, 'rb') as vf:
|
487 |
+
video_bytes = vf.read()
|
488 |
+
st.video(video_bytes)
|
489 |
+
logger.info(f"Displaying T{timeline_id}")
|
490 |
st.download_button(f"Download T{timeline_id}", video_bytes, f"timeline_{timeline_id}.mp4", "video/mp4", key=f"dl_{timeline_id}")
|
491 |
if generation_errors.get(timeline_id):
|
492 |
scene_errors = [err for err in generation_errors[timeline_id] if not err.startswith(f"T{timeline_id}:")]
|
493 |
if scene_errors:
|
494 |
+
with st.expander(f"β οΈ View {len(scene_errors)} Scene Issues"):
|
495 |
+
for err in scene_errors:
|
496 |
+
st.warning(f"- {err}")
|
497 |
+
except FileNotFoundError:
|
498 |
+
logger.error(f"Video missing: {video_path}")
|
499 |
+
st.error(f"Error: Video missing T{timeline_id}.", icon="π¨")
|
500 |
+
except Exception as e:
|
501 |
+
logger.exception(f"Display error {video_path}: {e}")
|
502 |
+
st.error(f"Display error T{timeline_id}: {e}", icon="π¨")
|
503 |
+
else:
|
504 |
st.warning("No final videos were successfully generated.")
|
505 |
st.subheader("Summary of Generation Issues")
|
506 |
has_errors = any(generation_errors.values())
|
|
|
509 |
for tid, errors in generation_errors.items():
|
510 |
if errors:
|
511 |
st.error(f"**Timeline {tid}:**")
|
512 |
+
for msg in errors:
|
513 |
+
st.error(f" - {msg}")
|
514 |
+
else:
|
515 |
+
st.info("No generation errors recorded.")
|
516 |
|
517 |
# --- 4. Cleanup ---
|
518 |
st.info(f"Attempting cleanup: {temp_dir}")
|
519 |
+
try:
|
520 |
+
shutil.rmtree(temp_dir)
|
521 |
+
logger.info(f"β
Temp dir removed: {temp_dir}")
|
522 |
+
st.success("β
Temp files cleaned.")
|
523 |
+
except Exception as e:
|
524 |
+
logger.error(f"β οΈ Failed remove temp dir {temp_dir}: {e}")
|
525 |
+
st.warning(f"Could not remove temp files: {temp_dir}.", icon="β οΈ")
|
526 |
+
|
527 |
+
elif not chrono_response:
|
528 |
+
logger.error("Story gen/validation failed.")
|
529 |
+
else:
|
530 |
+
st.error("Unexpected issue post-gen.", icon="π")
|
531 |
+
logger.error("Chrono_response truthy but invalid.")
|
532 |
+
|
533 |
+
else:
|
534 |
+
st.info("Configure settings and click 'β¨ Generate ChronoWeave β¨' to start.")
|