Update app.py
Browse files
app.py
CHANGED
|
@@ -1,378 +1,444 @@
|
|
| 1 |
-
#
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
from moviepy.editor import (ImageClip, VideoFileClip, concatenate_videoclips, TextClip,
|
| 15 |
-
CompositeVideoClip, AudioFileClip)
|
| 16 |
-
import moviepy.video.fx.all as vfx
|
| 17 |
-
import numpy as np
|
| 18 |
import os
|
| 19 |
-
import openai
|
| 20 |
-
import requests
|
| 21 |
-
import io
|
| 22 |
-
import time
|
| 23 |
-
import random
|
| 24 |
import logging
|
| 25 |
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
try:
|
| 32 |
-
from elevenlabs.client import ElevenLabs as ImportedElevenLabsClient
|
| 33 |
-
from elevenlabs import Voice as ImportedVoice, VoiceSettings as ImportedVoiceSettings
|
| 34 |
-
ElevenLabsAPIClient = ImportedElevenLabsClient; Voice = ImportedVoice; VoiceSettings = ImportedVoiceSettings
|
| 35 |
-
ELEVENLABS_CLIENT_IMPORTED = True; logger.info("ElevenLabs client components imported.")
|
| 36 |
-
except Exception as e_eleven: logger.warning(f"ElevenLabs client import failed: {e_eleven}. Audio disabled.")
|
| 37 |
-
|
| 38 |
-
# --- RunwayML Client Import (Placeholder) ---
|
| 39 |
-
RUNWAYML_SDK_IMPORTED = False; RunwayMLClient = None
|
| 40 |
-
try:
|
| 41 |
-
logger.info("RunwayML SDK import is a placeholder.")
|
| 42 |
-
except ImportError: logger.warning("RunwayML SDK (placeholder) not found. RunwayML disabled.")
|
| 43 |
-
except Exception as e_runway_sdk: logger.warning(f"Error importing RunwayML SDK (placeholder): {e_runway_sdk}. RunwayML disabled.")
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
class VisualEngine:
|
| 47 |
-
def __init__(self, output_dir="temp_cinegen_media", default_elevenlabs_voice_id="Rachel"):
|
| 48 |
-
self.output_dir = output_dir
|
| 49 |
-
os.makedirs(self.output_dir, exist_ok=True)
|
| 50 |
-
self.font_filename = "DejaVuSans-Bold.ttf"
|
| 51 |
-
font_paths_to_try = [
|
| 52 |
-
self.font_filename,
|
| 53 |
-
f"/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf",
|
| 54 |
-
f"/usr/share/fonts/truetype/liberation/LiberationSans-Bold.ttf",
|
| 55 |
-
f"/System/Library/Fonts/Supplemental/Arial.ttf", f"C:/Windows/Fonts/arial.ttf",
|
| 56 |
-
f"/usr/local/share/fonts/truetype/mycustomfonts/arial.ttf"
|
| 57 |
-
]
|
| 58 |
-
self.font_path_pil = next((p for p in font_paths_to_try if os.path.exists(p)), None)
|
| 59 |
-
self.font_size_pil = 20
|
| 60 |
-
self.video_overlay_font_size = 30
|
| 61 |
-
self.video_overlay_font_color = 'white'
|
| 62 |
-
self.video_overlay_font = 'DejaVu-Sans-Bold'
|
| 63 |
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
return
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
|
| 202 |
-
|
| 203 |
-
|
| 204 |
-
|
| 205 |
-
|
| 206 |
-
|
| 207 |
-
|
| 208 |
-
|
| 209 |
-
|
| 210 |
-
tc.write_videofile(fp,fps=24,codec='libx264',preset='ultrafast',logger=None,threads=2); logger.info(f"Generic placeholder video: {fp}"); return fp
|
| 211 |
-
except Exception as e: logger.error(f"Generic placeholder error {fp}: {e}",exc_info=True); return None
|
| 212 |
-
finally:
|
| 213 |
-
if tc and hasattr(tc,'close'): tc.close()
|
| 214 |
-
|
| 215 |
-
def generate_scene_asset(self, image_generation_prompt_text, motion_prompt_text_for_video,
|
| 216 |
-
scene_data, scene_identifier_filename_base, # This is base_name, no ext
|
| 217 |
-
generate_as_video_clip=False, runway_target_duration=5):
|
| 218 |
-
base_name = scene_identifier_filename_base # Already a base name
|
| 219 |
-
asset_info = {'path': None, 'type': 'none', 'error': True, 'prompt_used': image_generation_prompt_text, 'error_message': 'Generation not attempted'}
|
| 220 |
-
input_image_for_runway_path = None
|
| 221 |
-
image_filename_for_base = base_name + "_base_image.png" # Specific name for base image file
|
| 222 |
-
temp_image_asset_info = {'error': True, 'prompt_used': image_generation_prompt_text, 'error_message': 'Base image generation not attempted'}
|
| 223 |
-
|
| 224 |
-
if self.USE_AI_IMAGE_GENERATION and self.openai_api_key:
|
| 225 |
-
max_r, att_n = 2, 0
|
| 226 |
-
for att_n in range(max_r):
|
| 227 |
try:
|
| 228 |
-
|
| 229 |
-
|
| 230 |
-
|
| 231 |
-
|
| 232 |
-
|
| 233 |
-
|
| 234 |
-
|
| 235 |
-
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
|
| 239 |
-
|
| 240 |
-
|
| 241 |
-
|
| 242 |
-
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
|
| 253 |
-
|
| 254 |
-
|
| 255 |
-
|
| 256 |
-
|
| 257 |
-
|
| 258 |
-
|
| 259 |
-
|
| 260 |
-
|
| 261 |
-
|
| 262 |
-
|
| 263 |
-
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
|
| 267 |
-
|
| 268 |
-
|
| 269 |
-
|
| 270 |
-
|
| 271 |
-
|
| 272 |
-
|
| 273 |
-
|
| 274 |
-
|
| 275 |
-
|
| 276 |
-
|
| 277 |
-
|
| 278 |
-
|
| 279 |
-
|
| 280 |
-
|
| 281 |
-
|
| 282 |
-
|
| 283 |
-
|
| 284 |
-
|
| 285 |
-
|
| 286 |
-
|
| 287 |
-
|
| 288 |
-
|
| 289 |
-
|
| 290 |
-
|
| 291 |
-
|
| 292 |
-
|
| 293 |
-
for
|
| 294 |
-
|
| 295 |
-
|
| 296 |
-
|
| 297 |
-
|
| 298 |
-
|
| 299 |
-
|
| 300 |
-
|
| 301 |
-
|
| 302 |
-
|
| 303 |
-
|
| 304 |
-
|
| 305 |
-
|
| 306 |
-
|
| 307 |
-
|
| 308 |
-
|
| 309 |
-
|
| 310 |
-
|
| 311 |
-
|
| 312 |
-
|
| 313 |
-
|
| 314 |
-
|
| 315 |
-
|
| 316 |
-
|
| 317 |
-
|
| 318 |
-
|
| 319 |
-
|
| 320 |
-
|
| 321 |
-
|
| 322 |
-
|
| 323 |
-
|
| 324 |
-
|
| 325 |
-
|
| 326 |
-
|
| 327 |
-
|
| 328 |
-
|
| 329 |
-
|
| 330 |
-
|
| 331 |
-
|
| 332 |
-
|
| 333 |
-
|
| 334 |
-
|
| 335 |
-
|
| 336 |
-
|
| 337 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 338 |
try:
|
| 339 |
-
|
| 340 |
-
|
| 341 |
-
|
| 342 |
-
|
| 343 |
-
|
| 344 |
-
if
|
| 345 |
-
|
| 346 |
-
|
| 347 |
-
|
| 348 |
-
|
| 349 |
-
|
| 350 |
-
|
| 351 |
-
|
| 352 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 353 |
try:
|
| 354 |
-
|
| 355 |
-
|
| 356 |
-
|
| 357 |
-
|
| 358 |
-
|
| 359 |
-
|
| 360 |
-
|
| 361 |
-
else:final_clip=final_clip.fx(vfx.fadein,min(td,final_clip.duration/2.0))
|
| 362 |
-
if overall_narration_path and os.path.exists(overall_narration_path) and final_clip.duration>0:
|
| 363 |
-
try:narration_clip=AudioFileClip(overall_narration_path);final_clip=final_clip.set_audio(narration_clip);logger.info("Narration added.")
|
| 364 |
-
except Exception as e:logger.error(f"Narration add error:{e}",exc_info=True)
|
| 365 |
-
elif final_clip.duration<=0:logger.warning("Video no duration. No audio.")
|
| 366 |
-
if final_clip and final_clip.duration>0:
|
| 367 |
-
op=os.path.join(self.output_dir,output_filename);logger.info(f"Writing video:{op} (Dur:{final_clip.duration:.2f}s)")
|
| 368 |
-
final_clip.write_videofile(op,fps=fps,codec='libx264',preset='medium',audio_codec='aac',temp_audiofile=os.path.join(self.output_dir,f'temp-audio-{os.urandom(4).hex()}.m4a'),remove_temp=True,threads=os.cpu_count()or 2,logger='bar',bitrate="5000k",ffmpeg_params=["-pix_fmt", "yuv420p"])
|
| 369 |
-
logger.info(f"Video created:{op}");return op
|
| 370 |
-
else:logger.error("Final clip invalid. No write.");return None
|
| 371 |
-
except Exception as e:logger.error(f"Video write error:{e}",exc_info=True);return None
|
| 372 |
-
finally:
|
| 373 |
-
logger.debug("Closing all MoviePy clips in `assemble_animatic_from_assets` finally block.")
|
| 374 |
-
clips_to_close = processed_clips + ([narration_clip] if narration_clip else []) + ([final_clip] if final_clip else [])
|
| 375 |
-
for clip_obj in clips_to_close:
|
| 376 |
-
if clip_obj and hasattr(clip_obj, 'close'):
|
| 377 |
-
try: clip_obj.close()
|
| 378 |
-
except Exception as e_close: logger.warning(f"Ignoring error while closing a clip: {e_close}")
|
|
|
|
| 1 |
+
# app.py
|
| 2 |
+
import streamlit as st
|
| 3 |
+
from core.gemini_handler import GeminiHandler
|
| 4 |
+
from core.visual_engine import VisualEngine
|
| 5 |
+
from core.prompt_engineering import (
|
| 6 |
+
create_cinematic_treatment_prompt,
|
| 7 |
+
construct_dalle_prompt,
|
| 8 |
+
construct_text_to_video_prompt_for_gen4, # <<< USE THIS FOR RUNWAY
|
| 9 |
+
create_narration_script_prompt_enhanced,
|
| 10 |
+
create_scene_regeneration_prompt,
|
| 11 |
+
create_visual_regeneration_prompt # This is for DALL-E image prompt refinement
|
| 12 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
import os
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
import logging
|
| 15 |
|
| 16 |
+
# --- Configuration & Initialization ---
|
| 17 |
+
st.set_page_config(page_title="CineGen AI Ultra+", layout="wide", initial_sidebar_state="expanded")
|
| 18 |
+
# Configure logging to be more verbose for debugging if needed
|
| 19 |
+
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
| 20 |
+
logger = logging.getLogger(__name__) # Get logger for this module
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
|
| 22 |
+
# --- Global Definitions ---
|
| 23 |
+
SHOT_TYPES_OPTIONS = [ "Director's Choice", "Establishing Shot", "Long Shot", "Full Shot", "Medium Long Shot (Cowboy)", "Medium Shot", "Medium Close-up", "Close-up", "Extreme Close-up", "Point of View (POV)", "Over the Shoulder", "Tracking Shot", "Dolly Zoom", "Crane Shot", "Aerial Shot", "Static Shot", "Dutch Angle", "Whip Pan"]
|
| 24 |
+
DEFAULT_SCENE_DURATION_SECS = 5
|
| 25 |
+
DEFAULT_SHOT_TYPE = "Director's Choice"
|
| 26 |
+
ASSET_TYPE_OPTIONS = ["Auto (Director's Choice)", "Image", "Video Clip"]
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
# --- Global State Variables & API Key Setup ---
|
| 30 |
+
def load_api_key(key_name_streamlit, key_name_env, service_name):
|
| 31 |
+
# (Keep this function as it was - robust)
|
| 32 |
+
key = None; secrets_available = hasattr(st, 'secrets')
|
| 33 |
+
try:
|
| 34 |
+
if secrets_available and key_name_streamlit in st.secrets:
|
| 35 |
+
key = st.secrets[key_name_streamlit]
|
| 36 |
+
if key: logger.info(f"{service_name} API Key found in Streamlit secrets.")
|
| 37 |
+
except Exception as e: logger.warning(f"Could not access st.secrets for {key_name_streamlit}: {e}")
|
| 38 |
+
if not key and key_name_env in os.environ:
|
| 39 |
+
key = os.environ[key_name_env]
|
| 40 |
+
if key: logger.info(f"{service_name} API Key found in environment variable.")
|
| 41 |
+
if not key: logger.warning(f"{service_name} API Key NOT FOUND. Related features may be disabled or use fallbacks.")
|
| 42 |
+
return key
|
| 43 |
+
|
| 44 |
+
if 'services_initialized' not in st.session_state:
|
| 45 |
+
logger.info("Initializing services and API keys for the first time this session...")
|
| 46 |
+
st.session_state.GEMINI_API_KEY = load_api_key("GEMINI_API_KEY", "GEMINI_API_KEY", "Gemini")
|
| 47 |
+
st.session_state.OPENAI_API_KEY = load_api_key("OPENAI_API_KEY", "OPENAI_API_KEY", "OpenAI/DALL-E")
|
| 48 |
+
st.session_state.ELEVENLABS_API_KEY = load_api_key("ELEVENLABS_API_KEY", "ELEVENLABS_API_KEY", "ElevenLabs")
|
| 49 |
+
st.session_state.PEXELS_API_KEY = load_api_key("PEXELS_API_KEY", "PEXELS_API_KEY", "Pexels")
|
| 50 |
+
st.session_state.ELEVENLABS_VOICE_ID_CONFIG = load_api_key("ELEVENLABS_VOICE_ID", "ELEVENLABS_VOICE_ID", "ElevenLabs Voice ID")
|
| 51 |
+
st.session_state.RUNWAY_API_KEY = load_api_key("RUNWAY_API_KEY", "RUNWAY_API_KEY", "RunwayML")
|
| 52 |
+
|
| 53 |
+
if not st.session_state.GEMINI_API_KEY: st.error("CRITICAL: Gemini API Key missing!"); logger.critical("Gemini API Key missing."); st.stop()
|
| 54 |
+
|
| 55 |
+
try: st.session_state.gemini_handler = GeminiHandler(api_key=st.session_state.GEMINI_API_KEY); logger.info("GeminiHandler initialized.")
|
| 56 |
+
except Exception as e: st.error(f"Failed to init GeminiHandler: {e}"); logger.critical(f"GeminiHandler init failed: {e}", exc_info=True); st.stop()
|
| 57 |
+
|
| 58 |
+
try:
|
| 59 |
+
default_voice_id_el = "Rachel" # Fallback
|
| 60 |
+
configured_voice_id_el = st.session_state.ELEVENLABS_VOICE_ID_CONFIG or default_voice_id_el
|
| 61 |
+
st.session_state.visual_engine = VisualEngine(output_dir="temp_cinegen_media", default_elevenlabs_voice_id=configured_voice_id_el)
|
| 62 |
+
st.session_state.visual_engine.set_openai_api_key(st.session_state.OPENAI_API_KEY)
|
| 63 |
+
st.session_state.visual_engine.set_elevenlabs_api_key(st.session_state.ELEVENLABS_API_KEY, voice_id_from_secret=st.session_state.ELEVENLABS_VOICE_ID_CONFIG)
|
| 64 |
+
st.session_state.visual_engine.set_pexels_api_key(st.session_state.PEXELS_API_KEY)
|
| 65 |
+
st.session_state.visual_engine.set_runway_api_key(st.session_state.RUNWAY_API_KEY) # Pass Runway key
|
| 66 |
+
logger.info("VisualEngine initialized and API keys set.")
|
| 67 |
+
except Exception as e: st.error(f"Failed to init VisualEngine: {e}"); logger.critical(f"VisualEngine init/key setting failed: {e}", exc_info=True); st.warning("VisualEngine critical setup issue.")
|
| 68 |
+
st.session_state.services_initialized = True; logger.info("Service initialization complete.")
|
| 69 |
+
|
| 70 |
+
# Initialize other session state variables
|
| 71 |
+
for key_ss, default_val_ss in [ # Renamed loop vars
|
| 72 |
+
('story_treatment_scenes', []), ('scene_generation_prompts', []), ('generated_scene_assets_info', []), # Stores full asset info dicts
|
| 73 |
+
('video_path', None), ('character_definitions', {}), ('global_style_additions', ""),
|
| 74 |
+
('overall_narration_audio_path', None), ('narration_script_display', "")
|
| 75 |
+
]:
|
| 76 |
+
if key_ss not in st.session_state: st.session_state[key_ss] = default_val_ss
|
| 77 |
+
|
| 78 |
+
def initialize_new_project_state(): # Renamed
|
| 79 |
+
st.session_state.story_treatment_scenes = []
|
| 80 |
+
st.session_state.scene_generation_prompts = [] # Stores the prompt used for DALL-E or Runway
|
| 81 |
+
st.session_state.generated_scene_assets_info = [] # Stores dicts {'path':..., 'type':..., 'error':..., 'prompt_used':...}
|
| 82 |
+
st.session_state.video_path, st.session_state.overall_narration_audio_path, st.session_state.narration_script_display = None, None, ""
|
| 83 |
+
logger.info("New project state initialized.")
|
| 84 |
+
|
| 85 |
+
def generate_asset_for_scene_wrapper(scene_idx, scene_dict_data, version_num=1, user_selected_asset_type_override="Auto (Director's Choice)"): # Renamed
|
| 86 |
+
# Determine if video clip is desired based on user override or Gemini's suggestion
|
| 87 |
+
generate_as_video_clip_final = False
|
| 88 |
+
gemini_suggested_asset_type = scene_dict_data.get('suggested_asset_type_κ°λ
', 'image').lower()
|
| 89 |
+
|
| 90 |
+
if user_selected_asset_type_override == "Image":
|
| 91 |
+
generate_as_video_clip_final = False
|
| 92 |
+
elif user_selected_asset_type_override == "Video Clip":
|
| 93 |
+
generate_as_video_clip_final = True
|
| 94 |
+
elif user_selected_asset_type_override == "Auto (Director's Choice)": # Default
|
| 95 |
+
generate_as_video_clip_final = (gemini_suggested_asset_type == "video_clip")
|
| 96 |
+
|
| 97 |
+
# Prompt for base image generation (DALL-E or Pexels fallback)
|
| 98 |
+
image_gen_prompt_text = construct_dalle_prompt(scene_dict_data, st.session_state.character_definitions, st.session_state.global_style_additions)
|
| 99 |
+
|
| 100 |
+
# Prompt for video motion (Runway Gen-4) - only if generating video
|
| 101 |
+
motion_gen_prompt_text = ""
|
| 102 |
+
if generate_as_video_clip_final:
|
| 103 |
+
motion_gen_prompt_text = construct_text_to_video_prompt_for_gen4(scene_dict_data, st.session_state.global_style_additions)
|
| 104 |
+
if not motion_gen_prompt_text: # Fallback if specific motion prompt is empty
|
| 105 |
+
logger.warning(f"S{scene_dict_data.get('scene_number', scene_idx+1)}: Motion prompt empty, using generic for Runway.")
|
| 106 |
+
motion_gen_prompt_text = scene_dict_data.get('video_clip_motion_description_κ°λ
', "subtle ambient motion")
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
if not image_gen_prompt_text: # Base image prompt is always needed
|
| 110 |
+
logger.error(f"Base image prompt construction failed for S{scene_dict_data.get('scene_number', scene_idx+1)}"); return False
|
| 111 |
+
|
| 112 |
+
# Ensure session state lists are adequate
|
| 113 |
+
while len(st.session_state.scene_generation_prompts) <= scene_idx: st.session_state.scene_generation_prompts.append("")
|
| 114 |
+
while len(st.session_state.generated_scene_assets_info) <= scene_idx: st.session_state.generated_scene_assets_info.append(None)
|
| 115 |
+
|
| 116 |
+
# Store the relevant prompt (DALL-E for image, motion for video)
|
| 117 |
+
# The generate_scene_asset method will return the actual prompt it used if different internally.
|
| 118 |
+
st.session_state.scene_generation_prompts[scene_idx] = motion_gen_prompt_text if generate_as_video_clip_final else image_gen_prompt_text
|
| 119 |
+
|
| 120 |
+
filename_base_for_asset = f"scene_{scene_dict_data.get('scene_number', scene_idx+1)}_asset_v{version_num}" # Renamed
|
| 121 |
+
runway_dur_for_scene = scene_dict_data.get('video_clip_duration_estimate_secs_κ°λ
', scene_dict_data.get('user_scene_duration_secs', DEFAULT_SCENE_DURATION_SECS))
|
| 122 |
+
if runway_dur_for_scene <= 0 : runway_dur_for_scene = DEFAULT_SCENE_DURATION_SECS
|
| 123 |
+
|
| 124 |
+
asset_result_dict = st.session_state.visual_engine.generate_scene_asset(
|
| 125 |
+
image_generation_prompt_text=image_gen_prompt_text, # For base DALL-E/Pexels image
|
| 126 |
+
motion_prompt_text_for_video=motion_gen_prompt_text, # For Runway motion
|
| 127 |
+
scene_data=scene_dict_data,
|
| 128 |
+
scene_identifier_filename_base=filename_base_for_asset,
|
| 129 |
+
generate_as_video_clip=generate_as_video_clip_final,
|
| 130 |
+
runway_target_duration=runway_dur_for_scene
|
| 131 |
+
)
|
| 132 |
+
|
| 133 |
+
st.session_state.generated_scene_assets_info[scene_idx] = asset_result_dict
|
| 134 |
+
# Update the stored prompt with what was actually used by the engine, if available from result
|
| 135 |
+
if asset_result_dict and asset_result_dict.get('prompt_used'):
|
| 136 |
+
st.session_state.scene_generation_prompts[scene_idx] = asset_result_dict['prompt_used']
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
if asset_result_dict and not asset_result_dict['error'] and asset_result_dict.get('path') and os.path.exists(asset_result_dict['path']):
|
| 140 |
+
logger.info(f"Asset ({asset_result_dict.get('type')}) generated for S{scene_dict_data.get('scene_number', scene_idx+1)}: {os.path.basename(asset_result_dict['path'])}")
|
| 141 |
+
return True
|
| 142 |
+
else:
|
| 143 |
+
err_msg_asset = asset_result_dict.get('error_message', 'Unknown error') if asset_result_dict else 'Asset result is None'
|
| 144 |
+
logger.warning(f"Asset gen FAILED for S{scene_dict_data.get('scene_number', scene_idx+1)}. Type attempted: {'Video' if generate_as_video_clip_final else 'Image'}. Error: {err_msg_asset}")
|
| 145 |
+
# Store a more detailed failure state if not already
|
| 146 |
+
if not st.session_state.generated_scene_assets_info[scene_idx] or not st.session_state.generated_scene_assets_info[scene_idx]['error']:
|
| 147 |
+
st.session_state.generated_scene_assets_info[scene_idx] = {'path': None, 'type': 'none', 'error': True, 'error_message': err_msg_asset, 'prompt_used': st.session_state.scene_generation_prompts[scene_idx]}
|
| 148 |
+
return False
|
| 149 |
+
|
| 150 |
+
# --- UI Sidebar ---
|
| 151 |
+
with st.sidebar:
|
| 152 |
+
# ... (Sidebar UI code as before, no changes needed for this fix) ...
|
| 153 |
+
st.title("π¬ CineGen AI Ultra+")
|
| 154 |
+
st.markdown("### Creative Seed")
|
| 155 |
+
user_idea = st.text_area("Core Story Idea / Theme:", "A lone wanderer searches for a mythical oasis in a vast, post-apocalyptic desert, haunted by mirages and mechanical scavengers.", height=120, key="user_idea_main_v5")
|
| 156 |
+
genre = st.selectbox("Primary Genre:", ["Cyberpunk", "Sci-Fi", "Fantasy", "Noir", "Thriller", "Western", "Post-Apocalyptic", "Historical Drama", "Surreal"], index=6, key="genre_main_v5")
|
| 157 |
+
mood = st.selectbox("Overall Mood:", ["Hopeful yet Desperate", "Mysterious & Eerie", "Gritty & Tense", "Epic & Awe-Inspiring", "Melancholy & Reflective", "Whimsical & Lighthearted"], index=0, key="mood_main_v5")
|
| 158 |
+
num_scenes = st.slider("Number of Key Scenes:", 1, 10, 2, key="num_scenes_main_v5")
|
| 159 |
+
creative_guidance_options = {"Standard Director": "standard", "Artistic Visionary": "more_artistic", "Experimental Storyteller": "experimental_narrative"}
|
| 160 |
+
selected_creative_guidance_key = st.selectbox("AI Creative Director Style:", options=list(creative_guidance_options.keys()), key="creative_guidance_select_v5")
|
| 161 |
+
actual_creative_guidance = creative_guidance_options[selected_creative_guidance_key]
|
| 162 |
+
|
| 163 |
+
if st.button("π Generate Cinematic Treatment", type="primary", key="generate_treatment_btn_v5", use_container_width=True):
|
| 164 |
+
initialize_new_project_state() # Use renamed function
|
| 165 |
+
if not user_idea.strip(): st.warning("Please provide a story idea.")
|
| 166 |
+
else:
|
| 167 |
+
with st.status("AI Director is envisioning your masterpiece...", expanded=True) as status_op: # Renamed
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 168 |
try:
|
| 169 |
+
status_op.write("Phase 1: Gemini crafting cinematic treatment... π"); logger.info("Phase 1: Cinematic Treatment Gen.")
|
| 170 |
+
treatment_gen_prompt = create_cinematic_treatment_prompt(user_idea, genre, mood, num_scenes, actual_creative_guidance) # Renamed
|
| 171 |
+
raw_treatment_result = st.session_state.gemini_handler.generate_story_breakdown(treatment_gen_prompt) # Renamed
|
| 172 |
+
if not isinstance(raw_treatment_result, list) or not raw_treatment_result: raise ValueError("Gemini returned invalid scene list format.")
|
| 173 |
+
|
| 174 |
+
processed_scene_list = [] # Renamed
|
| 175 |
+
for scene_from_gemini in raw_treatment_result: # Renamed
|
| 176 |
+
scene_from_gemini['user_shot_type'] = scene_from_gemini.get('PROACTIVE_camera_work_κ°λ
', DEFAULT_SHOT_TYPE)
|
| 177 |
+
# Use Gemini's video duration estimate if available for video clips, else default scene duration
|
| 178 |
+
gemini_dur_est = scene_from_gemini.get('video_clip_duration_estimate_secs_κ°λ
', 0)
|
| 179 |
+
scene_from_gemini['user_scene_duration_secs'] = gemini_dur_est if gemini_dur_est > 0 else DEFAULT_SCENE_DURATION_SECS
|
| 180 |
+
scene_from_gemini['user_selected_asset_type'] = "Auto (Director's Choice)" # UI default
|
| 181 |
+
processed_scene_list.append(scene_from_gemini)
|
| 182 |
+
st.session_state.story_treatment_scenes = processed_scene_list
|
| 183 |
+
|
| 184 |
+
num_generated_scenes = len(st.session_state.story_treatment_scenes) # Renamed
|
| 185 |
+
st.session_state.scene_generation_prompts = [""]*num_generated_scenes
|
| 186 |
+
st.session_state.generated_scene_assets_info = [None]*num_generated_scenes
|
| 187 |
+
logger.info(f"Phase 1 complete. {num_generated_scenes} scenes."); status_op.update(label="Treatment complete! β
Generating visuals...", state="running")
|
| 188 |
+
|
| 189 |
+
status_op.write("Phase 2: Creating visual assets (Image/Video)... πΌοΈπ¬"); logger.info("Phase 2: Visual Asset Gen.")
|
| 190 |
+
successful_asset_count = 0 # Renamed
|
| 191 |
+
for i_scene, scene_data_item in enumerate(st.session_state.story_treatment_scenes): # Renamed
|
| 192 |
+
scene_num_display = scene_data_item.get('scene_number', i_scene+1) # Renamed
|
| 193 |
+
status_op.write(f" Asset for Scene {scene_num_display}..."); logger.info(f" Processing asset for Scene {scene_num_display}.")
|
| 194 |
+
if generate_asset_for_scene_wrapper(i_scene, scene_data_item, version_num=1): # Pass default 'Auto' for initial gen
|
| 195 |
+
successful_asset_count += 1
|
| 196 |
+
|
| 197 |
+
status_label_phase2 = "Visual assets ready! " # Renamed
|
| 198 |
+
next_op_state = "running" # Renamed
|
| 199 |
+
if successful_asset_count == 0 and num_generated_scenes > 0:
|
| 200 |
+
logger.error("Asset gen failed for all scenes."); status_label_phase2 = "Asset gen FAILED for all scenes."; next_op_state="error";
|
| 201 |
+
status_op.update(label=status_label_phase2, state=next_op_state, expanded=True); st.stop()
|
| 202 |
+
elif successful_asset_count < num_generated_scenes:
|
| 203 |
+
logger.warning(f"Assets partially generated ({successful_asset_count}/{num_generated_scenes})."); status_label_phase2 = f"Assets partially generated ({successful_asset_count}/{num_generated_scenes}). "
|
| 204 |
+
status_op.update(label=f"{status_label_phase2}Generating narration script...", state=next_op_state)
|
| 205 |
+
if next_op_state == "error": st.stop()
|
| 206 |
+
|
| 207 |
+
status_op.write("Phase 3: Generating narration script..."); logger.info("Phase 3: Narration Script Gen.")
|
| 208 |
+
voice_style_for_narration_prompt = st.session_state.get("selected_voice_style_for_generation", "cinematic_trailer") # Renamed
|
| 209 |
+
narration_gen_prompt = create_narration_script_prompt_enhanced(st.session_state.story_treatment_scenes, mood, genre, voice_style_for_narration_prompt) # Renamed
|
| 210 |
+
st.session_state.narration_script_display = st.session_state.gemini_handler.generate_image_prompt(narration_gen_prompt) # generate_image_prompt returns string
|
| 211 |
+
logger.info("Narration script generated."); status_op.update(label="Narration script ready! Synthesizing voice...", state="running")
|
| 212 |
+
|
| 213 |
+
status_op.write("Phase 4: Synthesizing voice (ElevenLabs)... π"); logger.info("Phase 4: Voice Synthesis.")
|
| 214 |
+
st.session_state.overall_narration_audio_path = st.session_state.visual_engine.generate_narration_audio(st.session_state.narration_script_display)
|
| 215 |
+
|
| 216 |
+
final_status_label = "All components ready! Storyboard below. π" # Renamed
|
| 217 |
+
final_op_state = "complete" # Renamed
|
| 218 |
+
if not st.session_state.overall_narration_audio_path:
|
| 219 |
+
final_status_label = f"{status_label_phase2}Storyboard ready (Voiceover skipped or failed)."
|
| 220 |
+
logger.warning("Voiceover generation was skipped or failed.")
|
| 221 |
+
else: logger.info("Voiceover generated successfully.")
|
| 222 |
+
status_op.update(label=final_status_label, state=final_op_state, expanded=False)
|
| 223 |
+
|
| 224 |
+
except ValueError as ve_err: logger.error(f"ValueError in main generation: {ve_err}", exc_info=True); status_op.update(label=f"Input or Gemini response error: {ve_err}", state="error", expanded=True); # Renamed
|
| 225 |
+
except Exception as e_unhandled: logger.error(f"Unhandled Exception in main generation: {e_unhandled}", exc_info=True); status_op.update(label=f"An unexpected error: {e_unhandled}", state="error", expanded=True); # Renamed
|
| 226 |
+
|
| 227 |
+
# --- Sidebar Fine-Tuning Options (Characters, Global Style, Voice) ---
|
| 228 |
+
# (Keep these sections as they were in the previous correct version)
|
| 229 |
+
with st.expander("Define Characters", expanded=False):
|
| 230 |
+
char_name_input = st.text_input("Character Name", key="char_name_adv_ultra_v5_sb"); char_desc_input = st.text_area("Visual Description", key="char_desc_adv_ultra_v5_sb", height=100, placeholder="e.g., Jax: rugged male astronaut...")
|
| 231 |
+
if st.button("Save Character", key="add_char_adv_ultra_v5_sb"):
|
| 232 |
+
if char_name_input and char_desc_input: st.session_state.character_definitions[char_name_input.strip().lower()] = char_desc_input.strip(); st.success(f"Char '{char_name_input.strip()}' saved.")
|
| 233 |
+
else: st.warning("Name and description needed.")
|
| 234 |
+
if st.session_state.character_definitions: st.caption("Current Characters:"); [st.markdown(f"**{k.title()}:** _{v}_") for k,v in st.session_state.character_definitions.items()]
|
| 235 |
+
|
| 236 |
+
with st.expander("Global Style Overrides", expanded=False):
|
| 237 |
+
style_presets_dict = { "Default (Director's Choice)": "", "Hyper-Realistic Gritty Noir": "hyper-realistic gritty neo-noir...", "Surreal Dreamscape Fantasy": "surreal dreamscape, epic fantasy...", "Vintage Analog Sci-Fi": "70s/80s analog sci-fi..."} # Truncated for brevity
|
| 238 |
+
selected_style_preset_key = st.selectbox("Base Style Preset:", options=list(style_presets_dict.keys()), key="style_preset_adv_ultra_v5_sb")
|
| 239 |
+
custom_style_keywords_input = st.text_area("Additional Custom Style Keywords:", key="custom_style_adv_ultra_v5_sb", height=80, placeholder="e.g., 'Dutch angle'")
|
| 240 |
+
current_global_style = st.session_state.global_style_additions
|
| 241 |
+
if st.button("Apply Global Styles", key="apply_styles_adv_ultra_v5_sb"):
|
| 242 |
+
final_style_str = style_presets_dict[selected_style_preset_key];
|
| 243 |
+
if custom_style_keywords_input.strip(): final_style_str = f"{final_style_str}, {custom_style_keywords_input.strip()}" if final_style_str else custom_style_keywords_input.strip()
|
| 244 |
+
st.session_state.global_style_additions = final_style_str.strip(); current_global_style = final_style_str.strip() # Update local var for immediate display
|
| 245 |
+
if current_global_style: st.success("Global styles applied!")
|
| 246 |
+
else: st.info("Global style additions cleared.")
|
| 247 |
+
if current_global_style: st.caption(f"Active global styles: \"{current_global_style}\"")
|
| 248 |
+
|
| 249 |
+
with st.expander("Voice & Narration Style", expanded=False):
|
| 250 |
+
engine_default_voice = "Rachel"
|
| 251 |
+
if hasattr(st.session_state, 'visual_engine') and st.session_state.visual_engine: engine_default_voice = st.session_state.visual_engine.elevenlabs_voice_id
|
| 252 |
+
user_voice_id_input = st.text_input("ElevenLabs Voice ID (override):", value=engine_default_voice, key="el_voice_id_override_v5_sb", help=f"Defaulting to '{engine_default_voice}'.")
|
| 253 |
+
narration_prompt_styles_dict = {"Cinematic Trailer": "cinematic_trailer", "Neutral Documentary": "documentary_neutral", "Character Introspection": "introspective_character"}
|
| 254 |
+
selected_narration_style_key = st.selectbox("Narration Script Style:", list(narration_prompt_styles_dict.keys()), key="narr_style_sel_v5_sb", index=0)
|
| 255 |
+
if st.button("Set Narrator Voice & Style", key="set_voice_btn_ultra_v5_sb"):
|
| 256 |
+
final_voice_id_to_use_el = user_voice_id_input.strip() or st.session_state.get("ELEVENLABS_VOICE_ID_CONFIG", "Rachel") # Fallback
|
| 257 |
+
if hasattr(st.session_state, 'visual_engine'): st.session_state.visual_engine.elevenlabs_voice_id = final_voice_id_to_use_el
|
| 258 |
+
st.session_state.selected_voice_style_for_generation = narration_prompt_styles_dict[selected_narration_style_key]
|
| 259 |
+
st.success(f"Narrator Voice ID: {final_voice_id_to_use_el}. Script Style: {selected_narration_style_key}")
|
| 260 |
+
logger.info(f"User updated 11L Voice ID: {final_voice_id_to_use_el}, Script Style: {selected_narration_style_key}")
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
# --- Main Content Area ---
|
| 264 |
+
st.header("π¬ Cinematic Storyboard & Treatment")
|
| 265 |
+
if st.session_state.narration_script_display:
|
| 266 |
+
with st.expander("π View Full Narration Script", expanded=False): st.markdown(f"> _{st.session_state.narration_script_display}_")
|
| 267 |
+
|
| 268 |
+
if not st.session_state.story_treatment_scenes: st.info("Use the sidebar to generate your cinematic treatment.")
|
| 269 |
+
else:
|
| 270 |
+
for i_main_loop, scene_content_item in enumerate(st.session_state.story_treatment_scenes): # Renamed
|
| 271 |
+
scene_num_val = scene_content_item.get('scene_number', i_main_loop + 1) # Renamed
|
| 272 |
+
scene_title_val = scene_content_item.get('scene_title', 'Untitled Scene') # Renamed
|
| 273 |
+
# Ensure unique keys for widgets within the loop
|
| 274 |
+
key_base_for_scene = f"s{scene_num_val}_{''.join(filter(str.isalnum, scene_title_val[:10]))}_main_{i_main_loop}" # Renamed
|
| 275 |
+
|
| 276 |
+
if "director_note" in scene_content_item and scene_content_item['director_note']: st.info(f"π¬ Director Note S{scene_num_val}: {scene_content_item['director_note']}")
|
| 277 |
+
st.subheader(f"SCENE {scene_num_val}: {scene_title_val.upper()}"); col_treatment, col_visual = st.columns([0.45, 0.55]) # Renamed
|
| 278 |
+
|
| 279 |
+
with col_treatment: # Treatment and Controls Column
|
| 280 |
+
with st.expander("π Scene Treatment & Controls", expanded=True):
|
| 281 |
+
# ... (Display textual scene details - beat, setting, chars, etc. - as before) ...
|
| 282 |
+
st.markdown(f"**Beat:** {scene_content_item.get('emotional_beat', 'N/A')}"); st.markdown(f"**Setting:** {scene_content_item.get('setting_description', 'N/A')}"); st.markdown(f"**Chars:** {', '.join(scene_content_item.get('characters_involved', ['N/A']))}"); st.markdown(f"**Focus Moment:** _{scene_content_item.get('character_focus_moment', 'N/A')}_"); st.markdown(f"**Plot Beat:** {scene_content_item.get('key_plot_beat', 'N/A')}"); st.markdown(f"**Dialogue Hook:** `\"{scene_content_item.get('suggested_dialogue_hook', '...')}\"`"); st.markdown("---"); st.markdown(f"**Dir. Visual Style:** _{scene_content_item.get('PROACTIVE_visual_style_κ°λ
', 'N/A')}_"); st.markdown(f"**Dir. Camera:** _{scene_content_item.get('PROACTIVE_camera_work_κ°λ
', 'N/A')}_"); st.markdown(f"**Dir. Sound:** _{scene_content_item.get('PROACTIVE_sound_design_κ°λ
', 'N/A')}_"); st.markdown("---")
|
| 283 |
+
|
| 284 |
+
st.markdown("##### Shot, Pacing & Asset Controls")
|
| 285 |
+
# User Shot Type (Camera Angle)
|
| 286 |
+
current_ui_shot_type = st.session_state.story_treatment_scenes[i_main_loop].get('user_shot_type', DEFAULT_SHOT_TYPE) # Renamed
|
| 287 |
+
try: shot_type_idx_val = SHOT_TYPES_OPTIONS.index(current_ui_shot_type) # Renamed
|
| 288 |
+
except ValueError: shot_type_idx_val = SHOT_TYPES_OPTIONS.index(DEFAULT_SHOT_TYPE)
|
| 289 |
+
new_ui_shot_type = st.selectbox("Dominant Shot Type:", options=SHOT_TYPES_OPTIONS, index=shot_type_idx_val, key=f"shot_type_widget_{key_base_for_scene}") # Renamed
|
| 290 |
+
if new_ui_shot_type != current_ui_shot_type: st.session_state.story_treatment_scenes[i_main_loop]['user_shot_type'] = new_ui_shot_type
|
| 291 |
+
|
| 292 |
+
# User Scene Duration
|
| 293 |
+
current_ui_duration = st.session_state.story_treatment_scenes[i_main_loop].get('user_scene_duration_secs', DEFAULT_SCENE_DURATION_SECS) # Renamed
|
| 294 |
+
new_ui_duration = st.number_input("Scene Duration (seconds):", min_value=1, max_value=300, value=current_ui_duration, step=1, key=f"duration_widget_{key_base_for_scene}") # Renamed
|
| 295 |
+
if new_ui_duration != current_ui_duration: st.session_state.story_treatment_scenes[i_main_loop]['user_scene_duration_secs'] = new_ui_duration
|
| 296 |
+
|
| 297 |
+
# User Asset Type Selection
|
| 298 |
+
current_ui_asset_type = st.session_state.story_treatment_scenes[i_main_loop].get('user_selected_asset_type', "Auto (Director's Choice)") # Renamed
|
| 299 |
+
try: asset_type_idx_val = ASSET_TYPE_OPTIONS.index(current_ui_asset_type) # Renamed
|
| 300 |
+
except ValueError: asset_type_idx_val = 0
|
| 301 |
+
new_ui_asset_type = st.selectbox("Asset Type Override:", ASSET_TYPE_OPTIONS, index=asset_type_idx_val, key=f"asset_type_sel_{key_base_for_scene}", help="Choose 'Image' or 'Video Clip'. 'Auto' uses Gemini's suggestion.") # Renamed
|
| 302 |
+
if new_ui_asset_type != current_ui_asset_type: st.session_state.story_treatment_scenes[i_main_loop]['user_selected_asset_type'] = new_ui_asset_type
|
| 303 |
+
st.markdown("---")
|
| 304 |
+
|
| 305 |
+
# Display generated prompt for the asset
|
| 306 |
+
prompt_for_current_asset = st.session_state.scene_generation_prompts[i_main_loop] if i_main_loop < len(st.session_state.scene_generation_prompts) else None # Renamed
|
| 307 |
+
if prompt_for_current_asset:
|
| 308 |
+
with st.popover("ποΈ View Asset Generation Prompt"):
|
| 309 |
+
st.markdown(f"**Prompt used for current asset:**"); st.code(prompt_for_current_asset, language='text')
|
| 310 |
+
|
| 311 |
+
pexels_query_val = scene_content_item.get('pexels_search_query_κ°λ
', None) # Renamed
|
| 312 |
+
if pexels_query_val: st.caption(f"Pexels Fallback Query: `{pexels_query_val}`")
|
| 313 |
+
|
| 314 |
+
with col_visual: # Visuals Column
|
| 315 |
+
asset_info_for_scene = st.session_state.generated_scene_assets_info[i_main_loop] if i_main_loop < len(st.session_state.generated_scene_assets_info) else None # Renamed
|
| 316 |
+
if asset_info_for_scene and not asset_info_for_scene.get('error') and asset_info_for_scene.get('path') and os.path.exists(asset_info_for_scene['path']):
|
| 317 |
+
path_to_asset_file = asset_info_for_scene['path'] # Renamed
|
| 318 |
+
type_of_asset_file = asset_info_for_scene.get('type', 'image') # Renamed
|
| 319 |
+
if type_of_asset_file == 'image': st.image(path_to_asset_file, caption=f"Scene {scene_num_val} ({type_of_asset_file}): {scene_title_val}")
|
| 320 |
+
elif type_of_asset_file == 'video':
|
| 321 |
try:
|
| 322 |
+
with open(path_to_asset_file, 'rb') as vf_read: video_bytes_data = vf_read.read() # Renamed
|
| 323 |
+
st.video(video_bytes_data, format="video/mp4", start_time=0); st.caption(f"Scene {scene_num_val} ({type_of_asset_file}): {scene_title_val}")
|
| 324 |
+
except Exception as e_vid_display: st.error(f"Error displaying video {path_to_asset_file}: {e_vid_display}"); logger.error(f"Error displaying video: {e_vid_display}", exc_info=True) # Renamed
|
| 325 |
+
else: st.warning(f"Unknown asset type '{type_of_asset_file}' for Scene {scene_num_val}.")
|
| 326 |
+
else:
|
| 327 |
+
if st.session_state.story_treatment_scenes:
|
| 328 |
+
error_message_display = asset_info_for_scene.get('error_message', 'Visual pending or failed.') if asset_info_for_scene else 'Visual pending or failed.' # Renamed
|
| 329 |
+
st.caption(error_message_display)
|
| 330 |
+
|
| 331 |
+
# --- Popovers for Editing Scene Treatment & Visual Prompt ---
|
| 332 |
+
with st.popover(f"βοΈ Edit S{scene_num_val} Treatment"):
|
| 333 |
+
feedback_for_treatment = st.text_area("Changes to treatment:", key=f"treat_fb_{key_base_for_scene}", height=150) # Renamed
|
| 334 |
+
if st.button(f"π Update S{scene_num_val} Treatment", key=f"regen_treat_btn_{key_base_for_scene}"):
|
| 335 |
+
if feedback_for_treatment:
|
| 336 |
+
with st.status(f"Updating S{scene_num_val} Treatment & Asset...", expanded=True) as status_treatment_regen: # Renamed
|
| 337 |
+
user_shot_type_pref = st.session_state.story_treatment_scenes[i_main_loop]['user_shot_type'] # Renamed
|
| 338 |
+
user_duration_pref = st.session_state.story_treatment_scenes[i_main_loop]['user_scene_duration_secs'] # Renamed
|
| 339 |
+
user_asset_type_pref = st.session_state.story_treatment_scenes[i_main_loop]['user_selected_asset_type'] # Renamed
|
| 340 |
+
|
| 341 |
+
regen_prompt_for_gemini = create_scene_regeneration_prompt(scene_content_item, feedback_for_treatment, st.session_state.story_treatment_scenes) # Renamed
|
| 342 |
+
try:
|
| 343 |
+
updated_scene_data_gemini = st.session_state.gemini_handler.regenerate_scene_script_details(regen_prompt_for_gemini) # Renamed
|
| 344 |
+
final_updated_scene_data = {**updated_scene_data_gemini} # Renamed
|
| 345 |
+
final_updated_scene_data['user_shot_type'] = user_shot_type_pref
|
| 346 |
+
final_updated_scene_data['user_scene_duration_secs'] = user_duration_pref
|
| 347 |
+
final_updated_scene_data['user_selected_asset_type'] = user_asset_type_pref
|
| 348 |
+
st.session_state.story_treatment_scenes[i_main_loop] = final_updated_scene_data
|
| 349 |
+
status_treatment_regen.update(label="Treatment updated! Regenerating asset...", state="running")
|
| 350 |
+
|
| 351 |
+
version_num_asset = 1 # Renamed
|
| 352 |
+
if asset_info_for_scene and asset_info_for_scene.get('path') and os.path.exists(asset_info_for_scene['path']):
|
| 353 |
+
try: base_fn_asset,_=os.path.splitext(os.path.basename(asset_info_for_scene['path'])); version_num_asset = int(base_fn_asset.split('_v')[-1])+1 if '_v' in base_fn_asset else 2 # Renamed
|
| 354 |
+
except: version_num_asset = 2
|
| 355 |
+
|
| 356 |
+
if generate_asset_for_scene_wrapper(i_main_loop, final_updated_scene_data, version_num=version_num_asset, user_selected_asset_type_override=user_asset_type_pref):
|
| 357 |
+
status_treatment_regen.update(label="Treatment & Asset Updated! π", state="complete", expanded=False)
|
| 358 |
+
else: status_treatment_regen.update(label="Treatment updated, asset failed.", state="complete", expanded=False)
|
| 359 |
+
st.rerun()
|
| 360 |
+
except Exception as e_treat_regen_main: status_treatment_regen.update(label=f"Error: {e_treat_regen_main}", state="error"); logger.error(f"Scene treatment regen error: {e_treat_regen_main}", exc_info=True) # Renamed
|
| 361 |
+
else: st.warning("Please provide feedback for treatment.")
|
| 362 |
+
|
| 363 |
+
with st.popover(f"π¨ Edit S{scene_num_val} Visual Prompt/Asset"):
|
| 364 |
+
current_gen_prompt_display = st.session_state.scene_generation_prompts[i_main_loop] if i_main_loop < len(st.session_state.scene_generation_prompts) else "No prompt generated yet." # Renamed
|
| 365 |
+
st.caption("Current Asset Generation Prompt:"); st.code(current_gen_prompt_display, language='text')
|
| 366 |
+
feedback_for_visual = st.text_area("Describe changes for the visual asset:", key=f"visual_fb_{key_base_for_scene}", height=150) # Renamed
|
| 367 |
+
if st.button(f"π Update S{scene_num_val} Asset", key=f"regen_visual_btn_{key_base_for_scene}"):
|
| 368 |
+
if feedback_for_visual:
|
| 369 |
+
with st.status(f"Refining prompt & regenerating asset for S{scene_num_val}...", expanded=True) as status_visual_regen: # Renamed
|
| 370 |
+
user_asset_type_choice_visual = st.session_state.story_treatment_scenes[i_main_loop]['user_selected_asset_type'] # Renamed
|
| 371 |
+
is_video_asset_type = (user_asset_type_choice_visual == "Video Clip") or \
|
| 372 |
+
(user_asset_type_choice_visual == "Auto (Director's Choice)" and scene_content_item.get('suggested_asset_type_κ°λ
') == 'video_clip')
|
| 373 |
+
|
| 374 |
+
newly_constructed_asset_prompt = "" # Renamed
|
| 375 |
+
if not is_video_asset_type: # Refining an IMAGE prompt
|
| 376 |
+
gemini_refinement_prompt = create_visual_regeneration_prompt(current_gen_prompt_display, feedback_for_visual, scene_content_item, st.session_state.character_definitions, st.session_state.global_style_additions) # Renamed
|
| 377 |
+
try:
|
| 378 |
+
newly_constructed_asset_prompt = st.session_state.gemini_handler.refine_image_prompt_from_feedback(gemini_refinement_prompt)
|
| 379 |
+
st.session_state.scene_generation_prompts[i_main_loop] = newly_constructed_asset_prompt
|
| 380 |
+
status_visual_regen.update(label="Image prompt refined by Gemini! Regenerating asset...", state="running")
|
| 381 |
+
except Exception as e_gemini_prompt_refine: status_visual_regen.update(label=f"Error refining prompt: {e_gemini_prompt_refine}", state="error"); logger.error(f"Visual prompt refinement error: {e_gemini_prompt_refine}", exc_info=True); continue # Skip asset gen
|
| 382 |
+
else: # For VIDEO, reconstruct the motion prompt based on current scene data and feedback (feedback isn't directly used by construct_text_to_video_prompt_for_gen4 here, but scene_data might have changed)
|
| 383 |
+
# For video, feedback should ideally modify scene_content_item's motion description first, then reconstruct.
|
| 384 |
+
# Simple reconstruction for now:
|
| 385 |
+
logger.info(f"Reconstructing video motion prompt for S{scene_num_val} based on feedback (indirectly via scene_data). Feedback was: {feedback_for_visual}")
|
| 386 |
+
newly_constructed_asset_prompt = construct_text_to_video_prompt_for_gen4(scene_content_item, st.session_state.global_style_additions)
|
| 387 |
+
st.session_state.scene_generation_prompts[i_main_loop] = newly_constructed_asset_prompt
|
| 388 |
+
status_visual_regen.update(label="Video motion prompt reconstructed! Regenerating asset...", state="running")
|
| 389 |
+
|
| 390 |
+
if not newly_constructed_asset_prompt: status_visual_regen.update(label="Prompt construction failed.", state="error"); continue
|
| 391 |
+
|
| 392 |
+
version_num_visual_asset = 1 # Renamed
|
| 393 |
+
if asset_info_for_scene and asset_info_for_scene.get('path') and os.path.exists(asset_info_for_scene['path']):
|
| 394 |
+
try: base_fn_viz_asset,_=os.path.splitext(os.path.basename(asset_info_for_scene['path'])); version_num_visual_asset = int(base_fn_viz_asset.split('_v')[-1])+1 if '_v' in base_fn_viz_asset else 2 # Renamed
|
| 395 |
+
except: version_num_visual_asset = 2
|
| 396 |
+
|
| 397 |
+
if generate_asset_for_scene_wrapper(i_main_loop, st.session_state.story_treatment_scenes[i_main_loop], version_num=version_num_visual_asset, user_selected_asset_type_override=user_asset_type_choice_visual):
|
| 398 |
+
status_visual_regen.update(label="Asset Updated! π", state="complete", expanded=False)
|
| 399 |
+
else: status_visual_regen.update(label="Prompt updated, asset regeneration failed.", state="complete", expanded=False)
|
| 400 |
+
st.rerun()
|
| 401 |
+
else: st.warning("Please provide feedback for visual asset.")
|
| 402 |
+
st.markdown("---")
|
| 403 |
+
|
| 404 |
+
# Video Assembly Button
|
| 405 |
+
if st.session_state.story_treatment_scenes and any(asset_info_item_loop and not asset_info_item_loop.get('error') and asset_info_item_loop.get('path') for asset_info_item_loop in st.session_state.generated_scene_assets_info if asset_info_item_loop is not None):
|
| 406 |
+
if st.button("π¬ Assemble Narrated Cinematic Animatic", key="assemble_ultra_video_btn_v5_main", type="primary", use_container_width=True): # Unique key
|
| 407 |
+
with st.status("Assembling Ultra Animatic...", expanded=True) as status_video_assembly: # Renamed
|
| 408 |
+
assets_for_final_video = [] # Renamed
|
| 409 |
+
for i_vid_assembly, scene_data_for_vid in enumerate(st.session_state.story_treatment_scenes): # Renamed
|
| 410 |
+
asset_info_current_scene = st.session_state.generated_scene_assets_info[i_vid_assembly] if i_vid_assembly < len(st.session_state.generated_scene_assets_info) else None # Renamed
|
| 411 |
+
if asset_info_current_scene and not asset_info_current_scene.get('error') and asset_info_current_scene.get('path') and os.path.exists(asset_info_current_scene['path']):
|
| 412 |
+
assets_for_final_video.append({
|
| 413 |
+
'path': asset_info_current_scene['path'],
|
| 414 |
+
'type': asset_info_current_scene.get('type', 'image'),
|
| 415 |
+
'scene_num': scene_data_for_vid.get('scene_number', i_vid_assembly + 1),
|
| 416 |
+
'key_action': scene_data_for_vid.get('key_plot_beat', ''),
|
| 417 |
+
'duration': scene_data_for_vid.get('user_scene_duration_secs', DEFAULT_SCENE_DURATION_SECS)
|
| 418 |
+
})
|
| 419 |
+
status_video_assembly.write(f"Adding S{scene_data_for_vid.get('scene_number', i_vid_assembly + 1)} ({asset_info_current_scene.get('type')}).")
|
| 420 |
+
else: logger.warning(f"Skipping S{scene_data_for_vid.get('scene_number', i_vid_assembly+1)} for video: No valid asset.")
|
| 421 |
+
|
| 422 |
+
if assets_for_final_video:
|
| 423 |
+
status_video_assembly.write("Calling video engine...");
|
| 424 |
+
st.session_state.video_path = st.session_state.visual_engine.assemble_animatic_from_assets(
|
| 425 |
+
asset_data_list=assets_for_final_video,
|
| 426 |
+
overall_narration_path=st.session_state.overall_narration_audio_path,
|
| 427 |
+
output_filename="cinegen_ultra_animatic.mp4", fps=24
|
| 428 |
+
)
|
| 429 |
+
if st.session_state.video_path and os.path.exists(st.session_state.video_path):
|
| 430 |
+
status_video_assembly.update(label="Ultra animatic assembled! π", state="complete", expanded=False); st.balloons()
|
| 431 |
+
else: status_video_assembly.update(label="Video assembly failed. Check logs.", state="error", expanded=False); logger.error("Video assembly returned None or file does not exist.")
|
| 432 |
+
else: status_video_assembly.update(label="No valid assets for video assembly.", state="error", expanded=False); logger.warning("No valid assets found for video assembly.")
|
| 433 |
+
elif st.session_state.story_treatment_scenes: st.info("Generate visual assets before assembling the animatic.")
|
| 434 |
+
|
| 435 |
+
if st.session_state.video_path and os.path.exists(st.session_state.video_path):
|
| 436 |
+
st.header("π¬ Generated Cinematic Animatic");
|
| 437 |
try:
|
| 438 |
+
with open(st.session_state.video_path, 'rb') as vf_obj_read: video_bytes_content = vf_obj_read.read() # Renamed
|
| 439 |
+
st.video(video_bytes_content, format="video/mp4")
|
| 440 |
+
st.download_button(label="Download Ultra Animatic", data=video_bytes_content, file_name=os.path.basename(st.session_state.video_path), mime="video/mp4", use_container_width=True, key="download_ultra_video_btn_v5_main_dl" ) # Unique key
|
| 441 |
+
except Exception as e_vid_final_display: st.error(f"Error displaying final video: {e_vid_final_display}"); logger.error(f"Error displaying final video: {e_vid_final_display}", exc_info=True) # Renamed
|
| 442 |
+
|
| 443 |
+
# --- Footer ---
|
| 444 |
+
st.sidebar.markdown("---"); st.sidebar.caption("CineGen AI Ultra+ | Visionary Cinematic Pre-Production")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|