ChronoWeave / app.py
mgbam's picture
Update app.py
e9679bf verified
raw
history blame
31.4 kB
# Copyright 2025 Google LLC. Based on work by Yousif Ahmed.
# Concept: ChronoWeave - Branching Narrative Generation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
import streamlit as st
import google.generativeai as genai
import os
import json
import numpy as np
from io import BytesIO
import time
import wave
import contextlib
import asyncio
import uuid # For unique identifiers
import shutil # For directory operations
import logging # For better logging
# Image handling
from PIL import Image
# Pydantic for data validation
from pydantic import BaseModel, Field, ValidationError, field_validator, model_validator
from typing import List, Optional, Literal, Dict, Any
# Video and audio processing
from moviepy.editor import ImageClip, AudioFileClip, concatenate_videoclips
# from moviepy.config import change_settings # Potential for setting imagemagick path if needed
# Type hints
import typing_extensions as typing
# Async support for Streamlit/Google API
import nest_asyncio
nest_asyncio.apply() # Apply patch for asyncio in environments like Streamlit/Jupyter
# --- Logging Setup ---
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
# --- Configuration ---
st.set_page_config(page_title="ChronoWeave", layout="wide", initial_sidebar_state="expanded")
st.title("πŸŒ€ ChronoWeave: Advanced Branching Narrative Generator")
st.markdown("""
Generate multiple, branching story timelines from a single theme using AI, complete with images and narration.
*Based on the work of Yousif Ahmed. Copyright 2025 Google LLC.*
""")
# --- Constants ---
# Text/JSON Model
TEXT_MODEL_ID = "models/gemini-1.5-flash" # Or "gemini-1.5-pro"
# Audio Model Config
AUDIO_MODEL_ID = "models/gemini-1.5-flash" # Model used for audio tasks
AUDIO_SAMPLING_RATE = 24000
# Image Model Config
IMAGE_MODEL_ID = "imagen-3" # <<< YOUR IMAGE MODEL
DEFAULT_ASPECT_RATIO = "1:1"
# Video Config
VIDEO_FPS = 24
VIDEO_CODEC = "libx264"
AUDIO_CODEC = "aac"
# File Management
TEMP_DIR_BASE = ".chrono_temp"
# --- API Key Handling ---
GOOGLE_API_KEY = None
try:
GOOGLE_API_KEY = st.secrets["GOOGLE_API_KEY"]
logger.info("Google API Key loaded from Streamlit secrets.")
except KeyError:
GOOGLE_API_KEY = os.environ.get('GOOGLE_API_KEY')
if GOOGLE_API_KEY:
logger.info("Google API Key loaded from environment variable.")
else:
st.error("🚨 **Google API Key Not Found!** Please configure it.", icon="🚨")
st.stop()
# --- Initialize Google Clients ---
# Initialize handles for Text, Audio (using Text model), and Image models
try:
genai.configure(api_key=GOOGLE_API_KEY)
logger.info("Configured google-generativeai with API key.")
# Handle for Text/JSON Generation
client_standard = genai.GenerativeModel(TEXT_MODEL_ID)
logger.info(f"Initialized text/JSON model handle: {TEXT_MODEL_ID}.")
# Handle for Audio Generation (uses a text-capable model via connect)
live_model = genai.GenerativeModel(AUDIO_MODEL_ID)
logger.info(f"Initialized audio model handle: {AUDIO_MODEL_ID}.")
# Handle for Image Generation <<<<------ NEW/CORRECTED
image_model = genai.GenerativeModel(IMAGE_MODEL_ID)
logger.info(f"Initialized image model handle: {IMAGE_MODEL_ID}.")
except AttributeError as ae:
logger.exception("AttributeError during Google AI Client Initialization.")
st.error(f"🚨 Initialization Error: {ae}. Ensure library is up-to-date.", icon="🚨")
st.stop()
except Exception as e:
# Catch potential errors if a model ID is invalid or inaccessible
logger.exception("Failed to initialize Google AI Clients/Models.")
st.error(f"🚨 Failed to initialize Google AI Clients/Models: {e}", icon="🚨")
st.stop()
# --- Define Pydantic Schemas (Using V2 Syntax) ---
class StorySegment(BaseModel):
scene_id: int = Field(..., ge=0)
image_prompt: str = Field(..., min_length=10, max_length=250)
audio_text: str = Field(..., min_length=5, max_length=150)
character_description: str = Field(..., max_length=250)
timeline_visual_modifier: Optional[str] = Field(None, max_length=50)
@field_validator('image_prompt')
@classmethod
def image_prompt_no_humans(cls, v: str) -> str:
if any(word in v.lower() for word in ["person", "people", "human", "man", "woman", "boy", "girl", "child"]):
logger.warning(f"Image prompt '{v[:50]}...' may contain human descriptions.")
return v
class Timeline(BaseModel):
timeline_id: int = Field(..., ge=0)
divergence_reason: str = Field(..., min_length=5) # Relying on prompt for 1st timeline
segments: List[StorySegment] = Field(..., min_items=1)
class ChronoWeaveResponse(BaseModel):
core_theme: str = Field(..., min_length=5)
timelines: List[Timeline] = Field(..., min_items=1)
total_scenes_per_timeline: int = Field(..., gt=0)
@model_validator(mode='after')
def check_timeline_segment_count(self) -> 'ChronoWeaveResponse':
expected_scenes = self.total_scenes_per_timeline
for i, timeline in enumerate(self.timelines):
if len(timeline.segments) != expected_scenes:
raise ValueError(f"Timeline {i} ID {timeline.timeline_id}: Expected {expected_scenes} segments, found {len(timeline.segments)}.")
return self
# --- Helper Functions ---
@contextlib.contextmanager
def wave_file_writer(filename: str, channels: int = 1, rate: int = AUDIO_SAMPLING_RATE, sample_width: int = 2):
"""Context manager to safely write WAV files."""
wf = None
try:
wf = wave.open(filename, "wb")
wf.setnchannels(channels); wf.setsampwidth(sample_width); wf.setframerate(rate)
yield wf
except Exception as e: logger.error(f"Error opening/configuring wave file {filename}: {e}"); raise
finally:
if wf:
try: wf.close()
except Exception as e_close: logger.error(f"Error closing wave file {filename}: {e_close}")
async def generate_audio_live_async(api_text: str, output_filename: str, voice: Optional[str] = None) -> Optional[str]:
"""Generates audio using Gemini Live API (async version) via the GenerativeModel."""
collected_audio = bytearray(); task_id = os.path.basename(output_filename).split('.')[0]
logger.info(f"πŸŽ™οΈ [{task_id}] Requesting audio: '{api_text[:60]}...'")
try:
config = {"response_modalities": ["AUDIO"], "audio_config": {"audio_encoding": "LINEAR16", "sample_rate_hertz": AUDIO_SAMPLING_RATE}}
directive_prompt = f"Narrate directly: \"{api_text}\"" # Shorter directive
async with live_model.connect(config=config) as session:
await session.send_request([directive_prompt])
async for response in session.stream_content():
if response.audio_chunk and response.audio_chunk.data: collected_audio.extend(response.audio_chunk.data)
if hasattr(response, 'error') and response.error: logger.error(f" ❌ [{task_id}] Audio stream error: {response.error}"); st.error(f"Audio stream error {task_id}: {response.error}", icon="πŸ”Š"); return None
if not collected_audio: logger.warning(f"⚠️ [{task_id}] No audio data received."); st.warning(f"No audio data for {task_id}.", icon="πŸ”Š"); return None
with wave_file_writer(output_filename, rate=AUDIO_SAMPLING_RATE) as wf: wf.writeframes(bytes(collected_audio))
logger.info(f" βœ… [{task_id}] Audio saved: {os.path.basename(output_filename)} ({len(collected_audio)} bytes)")
return output_filename
except genai.types.generation_types.BlockedPromptException as bpe: logger.error(f" ❌ [{task_id}] Audio blocked: {bpe}"); st.error(f"Audio blocked {task_id}.", icon="πŸ”‡"); return None
except Exception as e: logger.exception(f" ❌ [{task_id}] Audio failed: {e}"); st.error(f"Audio failed {task_id}: {e}", icon="πŸ”Š"); return None
def generate_story_sequence_chrono(theme: str, num_scenes: int, num_timelines: int, divergence_prompt: str = "") -> Optional[ChronoWeaveResponse]:
"""Generates branching story sequences using Gemini structured output and validates with Pydantic."""
st.info(f"πŸ“š Generating {num_timelines} timeline(s) x {num_scenes} scenes for: '{theme}'...")
logger.info(f"Requesting story structure: Theme='{theme}', Timelines={num_timelines}, Scenes={num_scenes}")
divergence_instruction = (
f"Introduce clear points of divergence between timelines, after the first scene if possible. "
f"Use hint if provided: '{divergence_prompt}'. "
f"State divergence reason clearly. **For timeline_id 0, use 'Initial path' or 'Baseline scenario'.**" # Explicit instruction for first timeline
)
prompt = f"""
Act as narrative designer. Create story based on theme: "{theme}".
**Instructions:**
1. Generate exactly **{num_timelines}** timelines.
2. Each timeline exactly **{num_scenes}** scenes.
3. **NO humans/humanoids**. Focus: animals, fantasy creatures, animated objects, nature.
4. {divergence_instruction}
5. Maintain consistent style: **'Simple, friendly kids animation, bright colors, rounded shapes'**, unless `timeline_visual_modifier` alters it.
6. `audio_text`: single concise sentence (max 30 words).
7. `image_prompt`: descriptive, concise (target 15-35 words MAX). Focus on scene elements. **AVOID repeating general style description**.
8. `character_description`: VERY brief description of characters in scene prompt (name, features). Target < 20 words total.
**Output Format:** ONLY valid JSON object adhering to schema. No text before/after.
**JSON Schema:** ```json\n{json.dumps(ChronoWeaveResponse.model_json_schema(), indent=2)}\n```"""
try:
response = client_standard.generate_content(contents=prompt, generation_config=genai.types.GenerationConfig(response_mime_type="application/json", temperature=0.7))
try: raw_data = json.loads(response.text)
except json.JSONDecodeError as json_err: logger.error(f"Failed JSON decode: {json_err}\nResponse:\n{response.text}"); st.error(f"🚨 Failed parse story: {json_err}", icon="πŸ“„"); st.text_area("Problem Response:", response.text, height=150); return None
except Exception as e: logger.error(f"Error processing text: {e}"); st.error(f"🚨 Error processing AI response: {e}", icon="πŸ“„"); return None
try:
validated_data = ChronoWeaveResponse.model_validate(raw_data)
logger.info("βœ… Story structure generated and validated successfully!")
st.success("βœ… Story structure generated and validated!")
return validated_data
except ValidationError as val_err: logger.error(f"JSON validation failed: {val_err}\nData:\n{json.dumps(raw_data, indent=2)}"); st.error(f"🚨 Generated structure invalid: {val_err}", icon="🧬"); st.json(raw_data); return None
except genai.types.generation_types.BlockedPromptException as bpe: logger.error(f"Story gen blocked: {bpe}"); st.error("🚨 Story prompt blocked.", icon="🚫"); return None
except Exception as e: logger.exception("Error during story gen:"); st.error(f"🚨 Story gen error: {e}", icon="πŸ’₯"); return None
def generate_image_imagen(prompt: str, aspect_ratio: str = "1:1", task_id: str = "IMG") -> Optional[Image.Image]:
"""Generates an image using the dedicated image model handle."""
logger.info(f"πŸ–ΌοΈ [{task_id}] Requesting image: '{prompt[:70]}...' (Aspect: {aspect_ratio})")
full_prompt = (f"Simple kids animation style, bright colors, rounded shapes. NO humans/humanoids. Aspect ratio {aspect_ratio}. Scene: {prompt}")
try:
# Use the dedicated image_model handle <<<<<------ CORRECTED CALL
response = image_model.generate_content(
full_prompt, generation_config=genai.types.GenerationConfig(candidate_count=1)
)
image_bytes, safety_ratings, block_reason, finish_reason = None, [], None, None
if hasattr(response, 'candidates') and response.candidates:
candidate = response.candidates[0]
if hasattr(candidate, 'finish_reason'): finish_reason = getattr(candidate.finish_reason, 'name', str(candidate.finish_reason))
if hasattr(candidate, 'content') and candidate.content and hasattr(candidate.content, 'parts') and candidate.content.parts:
part = candidate.content.parts[0]
if hasattr(part, 'inline_data') and part.inline_data and hasattr(part.inline_data, 'data'): image_bytes = part.inline_data.data
if hasattr(candidate, 'safety_ratings'): safety_ratings = candidate.safety_ratings
if hasattr(response, 'prompt_feedback') and response.prompt_feedback:
if hasattr(response.prompt_feedback, 'block_reason') and response.prompt_feedback.block_reason.name != 'BLOCK_REASON_UNSPECIFIED': block_reason = response.prompt_feedback.block_reason.name
if hasattr(response.prompt_feedback, 'safety_ratings'): safety_ratings.extend(response.prompt_feedback.safety_ratings)
if image_bytes:
try:
image = Image.open(BytesIO(image_bytes)); logger.info(f" βœ… [{task_id}] Image generated.")
filtered_ratings = [f"{r.category.name}: {r.probability.name}" for r in safety_ratings if hasattr(r,'probability') and r.probability.name != 'NEGLIGIBLE']
if filtered_ratings: logger.warning(f" ⚠️ [{task_id}] Image flagged: {', '.join(filtered_ratings)}."); st.warning(f"Image {task_id} flagged: {', '.join(filtered_ratings)}", icon="⚠️")
return image
except Exception as img_err: logger.error(f" ❌ [{task_id}] Img decode error: {img_err}"); st.warning(f"Decode image data {task_id} failed.", icon="πŸ–ΌοΈ"); return None
else:
fail_reason = "Unknown reason."
if block_reason: fail_reason = f"Blocked ({block_reason})."
elif finish_reason and finish_reason not in ['STOP', 'FINISH_REASON_UNSPECIFIED']: fail_reason = f"Finished early ({finish_reason})."
else:
filtered_ratings = [f"{r.category.name}: {r.probability.name}" for r in safety_ratings if hasattr(r,'probability') and r.probability.name != 'NEGLIGIBLE']
if filtered_ratings: fail_reason = f"Safety filters: {', '.join(filtered_ratings)}."
# Log full response only if reason remains unknown
if fail_reason == "Unknown reason.": logger.warning(f" ⚠️ [{task_id}] Full API response object: {response}") # Keep this debug log for now
logger.warning(f" ⚠️ [{task_id}] No image data. Reason: {fail_reason} Prompt: '{prompt[:70]}...'")
st.warning(f"No image data {task_id}. Reason: {fail_reason}", icon="πŸ–ΌοΈ"); return None
except genai.types.generation_types.BlockedPromptException as bpe: logger.error(f" ❌ [{task_id}] Image blocked (exception): {bpe}"); st.error(f"Image blocked {task_id} (exception).", icon="🚫"); return None
except Exception as e: logger.exception(f" ❌ [{task_id}] Image gen failed: {e}"); st.error(f"Image gen failed {task_id}: {e}", icon="πŸ–ΌοΈ"); return None
# --- Streamlit UI Elements ---
st.sidebar.header("βš™οΈ Configuration")
if GOOGLE_API_KEY: st.sidebar.success("Google API Key Loaded", icon="βœ…")
else: st.sidebar.error("Google API Key Missing!", icon="🚨")
theme = st.sidebar.text_input("πŸ“– Story Theme:", "A curious squirrel finds a mysterious, glowing acorn")
num_scenes = st.sidebar.slider("🎬 Scenes per Timeline:", min_value=2, max_value=7, value=3)
num_timelines = st.sidebar.slider("🌿 Number of Timelines:", min_value=1, max_value=4, value=2)
divergence_prompt = st.sidebar.text_input("↔️ Divergence Hint (Optional):", placeholder="e.g., What if a bird tried to steal it?")
st.sidebar.subheader("🎨 Visual & Audio Settings")
aspect_ratio = st.sidebar.selectbox("πŸ–ΌοΈ Image Aspect Ratio:", ["1:1", "16:9", "9:16"], index=0)
audio_voice = None
generate_button = st.sidebar.button("✨ Generate ChronoWeave ✨", type="primary", disabled=(not GOOGLE_API_KEY), use_container_width=True)
st.sidebar.markdown("---")
st.sidebar.info("⏳ Generation can take several minutes.", icon="⏳")
st.sidebar.markdown(f"<small>Txt:{TEXT_MODEL_ID}, Img:{IMAGE_MODEL_ID}, Aud:{AUDIO_MODEL_ID}</small>", unsafe_allow_html=True)
# --- Main Logic ---
if generate_button:
if not theme: st.error("Please enter a story theme.", icon="πŸ‘ˆ")
else:
run_id = str(uuid.uuid4()).split('-')[0]; temp_dir = os.path.join(TEMP_DIR_BASE, f"run_{run_id}")
try: os.makedirs(temp_dir, exist_ok=True); logger.info(f"Created temp dir: {temp_dir}")
except OSError as e: st.error(f"🚨 Failed create temp dir {temp_dir}: {e}", icon="πŸ“‚"); st.stop()
final_video_paths = {}; generation_errors = {}
# --- 1. Generate Narrative Structure ---
chrono_response: Optional[ChronoWeaveResponse] = None
with st.spinner("Generating narrative structure... πŸ€”"): chrono_response = generate_story_sequence_chrono(theme, num_scenes, num_timelines, divergence_prompt)
if chrono_response:
# --- 2. Process Each Timeline ---
overall_start_time = time.time(); all_timelines_successful = True
with st.status("Generating assets and composing videos...", expanded=True) as status:
for timeline_index, timeline in enumerate(chrono_response.timelines):
timeline_id, divergence, segments = timeline.timeline_id, timeline.divergence_reason, timeline.segments
timeline_label = f"Timeline {timeline_id}"; st.subheader(f"Processing {timeline_label}: {divergence}")
logger.info(f"--- Processing {timeline_label} (Idx: {timeline_index}) ---"); generation_errors[timeline_id] = []
temp_image_files, temp_audio_files, video_clips = {}, {}, []
timeline_start_time = time.time(); scene_success_count = 0
for scene_index, segment in enumerate(segments):
scene_id = segment.scene_id; task_id = f"T{timeline_id}_S{scene_id}"
status.update(label=f"Processing {timeline_label}, Scene {scene_id + 1}/{len(segments)}...")
st.markdown(f"--- **Scene {scene_id + 1} ({task_id})** ---")
logger.info(f"Processing {timeline_label}, Scene {scene_id + 1}/{len(segments)}...")
scene_has_error = False
st.write(f" *Img Prompt:* {segment.image_prompt}" + (f" *(Mod: {segment.timeline_visual_modifier})*" if segment.timeline_visual_modifier else "")); st.write(f" *Audio Text:* {segment.audio_text}")
# --- 2a. Image Generation ---
generated_image: Optional[Image.Image] = None
with st.spinner(f"[{task_id}] Generating image... 🎨"):
combined_prompt = segment.image_prompt
if segment.character_description: combined_prompt += f" Featuring: {segment.character_description}"
if segment.timeline_visual_modifier: combined_prompt += f" Style hint: {segment.timeline_visual_modifier}."
generated_image = generate_image_imagen(combined_prompt, aspect_ratio, task_id)
if generated_image:
image_path = os.path.join(temp_dir, f"{task_id}_image.png")
try: generated_image.save(image_path); temp_image_files[scene_id] = image_path; st.image(generated_image, width=180, caption=f"Scene {scene_id+1}")
except Exception as e: logger.error(f" ❌ [{task_id}] Img save error: {e}"); st.error(f"Save image {task_id} failed.", icon="πŸ’Ύ"); scene_has_error = True; generation_errors[timeline_id].append(f"S{scene_id+1}: Img save fail.")
else: scene_has_error = True; generation_errors[timeline_id].append(f"S{scene_id+1}: Img gen fail."); continue
# --- 2b. Audio Generation ---
generated_audio_path: Optional[str] = None
if not scene_has_error:
with st.spinner(f"[{task_id}] Generating audio... πŸ”Š"):
audio_path_temp = os.path.join(temp_dir, f"{task_id}_audio.wav")
try: generated_audio_path = asyncio.run(generate_audio_live_async(segment.audio_text, audio_path_temp, audio_voice))
except RuntimeError as e: logger.error(f" ❌ [{task_id}] Asyncio error: {e}"); st.error(f"Asyncio audio error {task_id}: {e}", icon="⚑"); scene_has_error = True; generation_errors[timeline_id].append(f"S{scene_id+1}: Audio async err.")
except Exception as e: logger.exception(f" ❌ [{task_id}] Audio error: {e}"); st.error(f"Audio error {task_id}: {e}", icon="πŸ’₯"); scene_has_error = True; generation_errors[timeline_id].append(f"S{scene_id+1}: Audio gen err.")
if generated_audio_path:
temp_audio_files[scene_id] = generated_audio_path
try:
with open(generated_audio_path, 'rb') as ap: st.audio(ap.read(), format='audio/wav')
except Exception as e: logger.warning(f" ⚠️ [{task_id}] Audio preview error: {e}")
else:
scene_has_error = True; generation_errors[timeline_id].append(f"S{scene_id+1}: Audio gen fail.")
if scene_id in temp_image_files and os.path.exists(temp_image_files[scene_id]):
try: os.remove(temp_image_files[scene_id]); logger.info(f" πŸ—‘οΈ [{task_id}] Removed img due to audio fail."); del temp_image_files[scene_id]
except OSError as e: logger.warning(f" ⚠️ [{task_id}] Failed remove img after audio fail: {e}")
continue
# --- 2c. Create Video Clip ---
if not scene_has_error and scene_id in temp_image_files and scene_id in temp_audio_files:
st.write(f" 🎬 Creating clip S{scene_id+1}...")
img_path, aud_path = temp_image_files[scene_id], temp_audio_files[scene_id]
audio_clip_instance, image_clip_instance, composite_clip = None, None, None
try:
if not os.path.exists(img_path): raise FileNotFoundError(f"Img missing: {img_path}")
if not os.path.exists(aud_path): raise FileNotFoundError(f"Aud missing: {aud_path}")
audio_clip_instance = AudioFileClip(aud_path); np_image = np.array(Image.open(img_path))
image_clip_instance = ImageClip(np_image).set_duration(audio_clip_instance.duration)
composite_clip = image_clip_instance.set_audio(audio_clip_instance)
video_clips.append(composite_clip); logger.info(f" βœ… [{task_id}] Clip created (Dur: {audio_clip_instance.duration:.2f}s).")
st.write(f" βœ… Clip created (Dur: {audio_clip_instance.duration:.2f}s)."); scene_success_count += 1
except Exception as e:
logger.exception(f" ❌ [{task_id}] Failed clip creation: {e}"); st.error(f"Failed clip {task_id}: {e}", icon="🎬")
scene_has_error = True; generation_errors[timeline_id].append(f"S{scene_id+1}: Clip fail.")
if audio_clip_instance: audio_clip_instance.close();
if image_clip_instance: image_clip_instance.close()
try:
if os.path.exists(img_path): os.remove(img_path)
if os.path.exists(aud_path): os.remove(aud_path)
except OSError as e_rem: logger.warning(f" ⚠️ [{task_id}] Failed remove files after clip err: {e_rem}")
# --- 2d. Assemble Timeline Video ---
timeline_duration = time.time() - timeline_start_time
if video_clips and scene_success_count == len(segments):
status.update(label=f"Composing video {timeline_label}...")
st.write(f"🎞️ Assembling video {timeline_label}..."); logger.info(f"🎞️ Assembling video {timeline_label}...")
output_filename = os.path.join(temp_dir, f"timeline_{timeline_id}_final.mp4"); final_timeline_video = None
try:
final_timeline_video = concatenate_videoclips(video_clips, method="compose")
final_timeline_video.write_videofile(output_filename, fps=VIDEO_FPS, codec=VIDEO_CODEC, audio_codec=AUDIO_CODEC, logger=None)
final_video_paths[timeline_id] = output_filename; logger.info(f" βœ… [{timeline_label}] Video saved: {os.path.basename(output_filename)}")
st.success(f"βœ… Video {timeline_label} completed in {timeline_duration:.2f}s.")
except Exception as e:
logger.exception(f" ❌ [{timeline_label}] Video assembly failed: {e}"); st.error(f"Assemble video {timeline_label} failed: {e}", icon="πŸ“Ό")
all_timelines_successful = False; generation_errors[timeline_id].append(f"T{timeline_id}: Assembly failed.")
finally:
logger.debug(f"[{timeline_label}] Closing clips...");
for i, clip in enumerate(video_clips):
try:
if clip:
if clip.audio: clip.audio.close()
clip.close()
except Exception as e_close: logger.warning(f" ⚠️ [{timeline_label}] Clip close err {i}: {e_close}")
if final_timeline_video:
try:
if final_timeline_video.audio: final_timeline_video.audio.close()
final_timeline_video.close()
except Exception as e_close_final: logger.warning(f" ⚠️ [{timeline_label}] Final vid close err: {e_close_final}")
elif not video_clips: logger.warning(f"[{timeline_label}] No clips. Skip assembly."); st.warning(f"No scenes for {timeline_label}. No video.", icon="🚫"); all_timelines_successful = False
else: error_count = len(segments) - scene_success_count; logger.warning(f"[{timeline_label}] {error_count} scene err(s). Skip assembly."); st.warning(f"{timeline_label}: {error_count} err(s). Video not assembled.", icon="⚠️"); all_timelines_successful = False
if generation_errors[timeline_id]: logger.error(f"Errors {timeline_label}: {generation_errors[timeline_id]}")
# --- End of Timelines Loop ---
overall_duration = time.time() - overall_start_time
if all_timelines_successful and final_video_paths: status_msg = f"Complete! ({len(final_video_paths)} videos in {overall_duration:.2f}s)"; status.update(label=status_msg, state="complete", expanded=False); logger.info(status_msg)
elif final_video_paths: status_msg = f"Partially Complete ({len(final_video_paths)} videos, errors). {overall_duration:.2f}s"; status.update(label=status_msg, state="warning", expanded=True); logger.warning(status_msg)
else: status_msg = f"Failed. No videos. {overall_duration:.2f}s"; status.update(label=status_msg, state="error", expanded=True); logger.error(status_msg)
# --- 3. Display Results ---
st.header("🎬 Generated Timelines")
if final_video_paths:
sorted_timeline_ids = sorted(final_video_paths.keys()); num_cols = min(len(sorted_timeline_ids), 3); cols = st.columns(num_cols)
for idx, timeline_id in enumerate(sorted_timeline_ids):
col = cols[idx % num_cols]; video_path = final_video_paths[timeline_id]
timeline_data = next((t for t in chrono_response.timelines if t.timeline_id == timeline_id), None)
reason = timeline_data.divergence_reason if timeline_data else "Unknown"
with col:
st.subheader(f"Timeline {timeline_id}"); st.caption(f"Divergence: {reason}")
try:
with open(video_path, 'rb') as vf: video_bytes = vf.read()
st.video(video_bytes); logger.info(f"Displaying T{timeline_id}")
st.download_button(f"Download T{timeline_id}", video_bytes, f"timeline_{timeline_id}.mp4", "video/mp4", key=f"dl_{timeline_id}")
if generation_errors.get(timeline_id):
with st.expander(f"⚠️ View {len(generation_errors[timeline_id])} Issues"): [st.warning(f"- {err}") for err in generation_errors[timeline_id]]
except FileNotFoundError: logger.error(f"Video missing: {video_path}"); st.error(f"Error: Video missing T{timeline_id}.", icon="🚨")
except Exception as e: logger.exception(f"Display error {video_path}: {e}"); st.error(f"Display error T{timeline_id}: {e}", icon="🚨")
else:
st.warning("No final videos were successfully generated.")
all_errors = [msg for err_list in generation_errors.values() for msg in err_list]
if all_errors:
st.subheader("Summary of Generation Issues");
with st.expander("View All Errors", expanded=True):
for tid, errors in generation_errors.items():
if errors: st.error(f"T{tid}:"); [st.error(f" - {msg}") for msg in errors]
# --- 4. Cleanup ---
st.info(f"Attempting cleanup: {temp_dir}")
try: shutil.rmtree(temp_dir); logger.info(f"βœ… Temp dir removed: {temp_dir}"); st.success("βœ… Temp files cleaned.")
except Exception as e: logger.error(f"⚠️ Failed remove temp dir {temp_dir}: {e}"); st.warning(f"Could not remove temp files: {temp_dir}.", icon="⚠️")
elif not chrono_response: logger.error("Story gen/validation failed.")
else: st.error("Unexpected issue post-gen.", icon="πŸ›‘"); logger.error("Chrono_response truthy but invalid.")
else: st.info("Configure settings and click '✨ Generate ChronoWeave ✨' to start.")