File size: 38,957 Bytes
a48cea9
 
1c41bb9
 
 
1c22261
1c41bb9
 
ed24a71
1c41bb9
ed24a71
1c41bb9
 
ed24a71
 
1c41bb9
a48cea9
 
 
ed24a71
 
 
a48cea9
3903b53
a48cea9
ed24a71
3903b53
de2fdbb
a48cea9
96a2b41
a48cea9
ed24a71
96a2b41
a48cea9
96a2b41
ed24a71
96a2b41
 
 
 
 
1c41bb9
1c22261
1c41bb9
ed24a71
1c41bb9
 
 
 
 
 
ed24a71
1c22261
 
ed24a71
a48cea9
96a2b41
 
1c41bb9
 
 
 
ed24a71
 
1c22261
 
96a2b41
 
 
1c41bb9
 
96a2b41
 
 
1c41bb9
96a2b41
ed24a71
96a2b41
ed24a71
96a2b41
 
 
 
 
 
 
ed24a71
96a2b41
 
 
 
 
ed24a71
96a2b41
 
 
 
a48cea9
96a2b41
 
 
a48cea9
ed24a71
 
 
 
96a2b41
ed24a71
96a2b41
1c41bb9
ed24a71
96a2b41
 
a48cea9
ed24a71
e20b484
ed24a71
96a2b41
 
 
 
 
ed24a71
 
96a2b41
ed24a71
 
 
96a2b41
 
ed24a71
96a2b41
 
ed24a71
96a2b41
ed24a71
96a2b41
ed24a71
96a2b41
 
 
ed24a71
 
96a2b41
 
 
 
ed24a71
 
 
 
a48cea9
 
ed24a71
 
 
1c22261
96a2b41
a48cea9
ed24a71
 
 
 
 
 
 
 
 
96a2b41
ed24a71
a48cea9
ed24a71
 
 
 
 
 
 
 
 
 
 
a48cea9
 
1c41bb9
 
a48cea9
96a2b41
a48cea9
ed24a71
 
 
 
 
 
 
 
 
 
 
 
a48cea9
ed24a71
 
a48cea9
ed24a71
 
 
 
 
 
 
 
 
 
 
 
 
a48cea9
ed24a71
 
 
 
 
 
 
 
 
 
 
 
8a6537e
ed24a71
 
 
 
a48cea9
96a2b41
ed24a71
 
 
 
 
 
 
 
 
 
 
 
a48cea9
ed24a71
 
 
 
 
 
 
 
 
 
 
a48cea9
ed24a71
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1c22261
ed24a71
 
 
 
 
 
 
 
a48cea9
1c41bb9
a48cea9
 
ed24a71
96a2b41
 
ed24a71
 
 
 
 
 
 
 
96a2b41
ed24a71
96a2b41
 
 
 
1c41bb9
96a2b41
a48cea9
a7374a3
96a2b41
 
 
 
a48cea9
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
# app.py
import streamlit as st
import os
import logging

# --- Streamlit PermissionError Mitigation Attempts ---
if "STREAMLIT_CLIENT_GATHER_USAGE_STATS" not in os.environ:
    os.environ["STREAMLIT_CLIENT_GATHER_USAGE_STATS"] = "false"
if "STREAMLIT_BROWSER_GATHERUSAGESTATS" not in os.environ:
    os.environ["STREAMLIT_BROWSER_GATHERUSAGESTATS"] = "false"
streamlit_home_path_app = "/app/.streamlit_cai_config_v2"
if "STREAMLIT_HOME" not in os.environ and os.getcwd().startswith("/app"):
    os.environ["STREAMLIT_HOME"] = streamlit_home_path_app
    try: os.makedirs(streamlit_home_path_app, exist_ok=True)
    except Exception: pass # Ignore if fails, Dockerfile ENV is primary

from core.gemini_handler import GeminiHandler
from core.visual_engine import VisualEngine
from core.prompt_engineering import (
    create_cinematic_treatment_prompt, construct_dalle_prompt,
    construct_text_to_video_prompt_for_gen4, create_narration_script_prompt_enhanced,
    create_scene_regeneration_prompt, create_visual_regeneration_prompt
)

st.set_page_config(page_title="CineGen AI Ultra+", layout="wide", initial_sidebar_state="expanded")
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s [%(levelname)s] - %(message)s (%(module)s.%(funcName)s:%(lineno)d)')
logger = logging.getLogger(__name__)

SHOT_TYPES_OPTIONS = [ "Director's Choice", "Establishing Shot", "Long Shot", "Full Shot", "Medium Long Shot (Cowboy)", "Medium Shot", "Medium Close-up", "Close-up", "Extreme Close-up", "Point of View (POV)", "Over the Shoulder", "Tracking Shot", "Dolly Zoom", "Crane Shot", "Aerial Shot", "Static Shot", "Dutch Angle", "Whip Pan"]
DEFAULT_SCENE_DURATION_SECS = 5; DEFAULT_SHOT_TYPE = "Director's Choice"; ASSET_TYPE_OPTIONS = ["Auto (Director's Choice)", "Image", "Video Clip"]

def load_api_key(key_name_st, key_name_e, service_n):
    key_val = None; secrets_avail = hasattr(st, 'secrets')
    try:
        if secrets_avail and key_name_st in st.secrets: key_val = st.secrets.get(key_name_st);
        if key_val: logger.info(f"API Key for {service_n} found in St secrets.")
    except Exception as e: logger.warning(f"No st.secrets for {key_name_st} ({service_n}): {e}")
    if not key_val and key_name_e in os.environ: key_val = os.environ.get(key_name_e);
    if key_val: logger.info(f"API Key for {service_n} found in env var '{key_name_e}'.")
    if not key_val: logger.warning(f"API Key for {service_n} (Key: {key_name_st}/{key_name_e}) NOT FOUND.")
    return key_val

if 'services_initialized_flag' not in st.session_state:
    logger.info("APP_INIT: Initializing services and API keys...")
    # (API Key Loading as before)
    st.session_state.API_KEY_GEMINI = load_api_key("GEMINI_API_KEY", "GEMINI_API_KEY", "Gemini")
    st.session_state.API_KEY_OPENAI = load_api_key("OPENAI_API_KEY", "OPENAI_API_KEY", "OpenAI/DALL-E")
    st.session_state.API_KEY_ELEVENLABS = load_api_key("ELEVENLABS_API_KEY", "ELEVENLABS_API_KEY", "ElevenLabs")
    st.session_state.API_KEY_PEXELS = load_api_key("PEXELS_API_KEY", "PEXELS_API_KEY", "Pexels")
    st.session_state.CONFIG_ELEVENLABS_VOICE_ID = load_api_key("ELEVENLABS_VOICE_ID", "ELEVENLABS_VOICE_ID", "ElevenLabs Voice ID")
    st.session_state.API_KEY_RUNWAYML = load_api_key("RUNWAY_API_KEY", "RUNWAY_API_KEY", "RunwayML")

    if not st.session_state.API_KEY_GEMINI: st.error("CRITICAL: Gemini API Key missing!"); logger.critical("Gemini API Key missing."); st.stop()
    try: st.session_state.gemini_service_handler = GeminiHandler(api_key=st.session_state.API_KEY_GEMINI); logger.info("GeminiHandler initialized.")
    except Exception as e: st.error(f"CRITICAL: GeminiHandler init fail: {e}"); logger.critical(f"GeminiHandler init fail: {e}", exc_info=True); st.stop()
    try:
        el_def_voice = "Rachel"; el_res_voice_id = st.session_state.CONFIG_ELEVENLABS_VOICE_ID or el_def_voice
        st.session_state.visual_content_engine = VisualEngine(output_dir="temp_cinegen_media", default_elevenlabs_voice_id=el_res_voice_id)
        st.session_state.visual_content_engine.set_openai_api_key(st.session_state.API_KEY_OPENAI)
        st.session_state.visual_content_engine.set_elevenlabs_api_key(st.session_state.API_KEY_ELEVENLABS, voice_id_from_secret=st.session_state.CONFIG_ELEVENLABS_VOICE_ID)
        st.session_state.visual_content_engine.set_pexels_api_key(st.session_state.API_KEY_PEXELS)
        st.session_state.visual_content_engine.set_runway_api_key(st.session_state.API_KEY_RUNWAYML)
        logger.info("VisualEngine initialized and keys set.")
    except Exception as e: st.error(f"CRITICAL: VisualEngine init fail: {e}"); logger.critical(f"VisualEngine init fail: {e}", exc_info=True); st.warning("VisualEngine critical setup issue."); st.stop()
    st.session_state.services_initialized_flag = True; logger.info("APP_INIT: Service initialization complete.")

PROJECT_SS_DEFAULTS = {'project_story_treatment_scenes_list':[],'project_scene_generation_prompts_list':[],'project_generated_assets_info_list':[],'project_final_video_path':None,'project_character_definitions_map':{},'project_global_style_keywords_str':"",'project_overall_narration_audio_path':None,'project_narration_script_text':""}
for k_ss, def_v_ss in PROJECT_SS_DEFAULTS.items():
    if k_ss not in st.session_state: st.session_state[k_ss] = def_v_ss

def initialize_new_project_data_in_session():
    st.session_state.project_story_treatment_scenes_list = []; st.session_state.project_scene_generation_prompts_list = []; st.session_state.project_generated_assets_info_list = []
    st.session_state.project_final_video_path = None; st.session_state.project_overall_narration_audio_path = None; st.session_state.project_narration_script_text = ""
    logger.info("PROJECT_DATA: New project data re-initialized.")

def generate_asset_for_scene_in_app(sc_idx, sc_data, asset_v=1, user_asset_type_ui="Auto (Director's Choice)"):
    # (Logic as in previous app.py, ensuring it uses scene_data_dict for visual_engine call)
    logger.debug(f"APP: generate_asset_for_scene_in_app for S_idx {sc_idx}, ver {asset_v}, user_type: {user_asset_type_ui}")
    gen_as_vid_final = False; gemini_sugg_type = sc_data.get('suggested_asset_type_감독', 'image').lower()
    if user_asset_type_ui=="Image": gen_as_vid_final=False
    elif user_asset_type_ui=="Video Clip": gen_as_vid_final=True
    elif user_asset_type_ui=="Auto (Director's Choice)": gen_as_vid_final=(gemini_sugg_type=="video_clip")
    logger.debug(f"APP: Final asset type: {'Video' if gen_as_vid_final else 'Image'}")
    prompt_base_img = construct_dalle_prompt(sc_data,st.session_state.project_character_definitions_map,st.session_state.project_global_style_keywords_str)
    prompt_motion_vid = ""
    if gen_as_vid_final: prompt_motion_vid=construct_text_to_video_prompt_for_gen4(sc_data,st.session_state.project_global_style_keywords_str) or sc_data.get('video_clip_motion_description_감독',"subtle motion")
    if not prompt_base_img: logger.error(f"Base image prompt construction failed for S{sc_data.get('scene_number',sc_idx+1)}"); return False
    while len(st.session_state.project_scene_generation_prompts_list)<=sc_idx:st.session_state.project_scene_generation_prompts_list.append("")
    while len(st.session_state.project_generated_assets_info_list)<=sc_idx:st.session_state.project_generated_assets_info_list.append(None)
    st.session_state.project_scene_generation_prompts_list[sc_idx]=prompt_motion_vid if gen_as_vid_final else prompt_base_img
    fn_base_asset=f"scene_{sc_data.get('scene_number',sc_idx+1)}_asset_v{asset_v}"
    rwy_dur=sc_data.get('video_clip_duration_estimate_secs_감독',sc_data.get('user_scene_duration_secs',DEFAULT_SCENE_DURATION_SECS));rwy_dur=max(1,rwy_dur)
    asset_res_dict=st.session_state.visual_content_engine.generate_scene_asset(image_generation_prompt_text=prompt_base_img,motion_prompt_text_for_video=prompt_motion_vid,scene_data_dict=sc_data,scene_identifier_fn_base=fn_base_asset,generate_as_video_clip_flag=gen_as_vid_final,runway_target_dur_val=rwy_dur) # Uses scene_data_dict
    st.session_state.project_generated_assets_info_list[sc_idx]=asset_res_dict
    if asset_res_dict and asset_res_dict.get('prompt_used')and st.session_state.project_scene_generation_prompts_list[sc_idx]!=asset_res_dict['prompt_used']:st.session_state.project_scene_generation_prompts_list[sc_idx]=asset_res_dict['prompt_used']
    if asset_res_dict and not asset_res_dict['error']and asset_res_dict.get('path')and os.path.exists(asset_res_dict['path']):logger.info(f"APP: Asset ({asset_res_dict.get('type')}) OK S{sc_data.get('scene_number',sc_idx+1)}:{os.path.basename(asset_res_dict['path'])}");return True
    else:err_msg=asset_res_dict.get('error_message','Unk err')if asset_res_dict else 'Asset res None';logger.warning(f"APP: Asset gen FAIL S{sc_data.get('scene_number',sc_idx+1)}. Type:{'Vid'if gen_as_vid_final else 'Img'}. Err:{err_msg}");curr_p=st.session_state.project_scene_generation_prompts_list[sc_idx];st.session_state.project_generated_assets_info_list[sc_idx]={'path':None,'type':'none','error':True,'error_message':err_msg,'prompt_used':curr_p};return False

with st.sidebar: # Sidebar UI
    if os.path.exists("assets/logo.png"): st.image("assets/logo.png", width=150)
    else: st.sidebar.markdown("## 🎬 CineGen AI Ultra+"); logger.warning("assets/logo.png not found.")
    st.markdown("### Creative Seed")
    sb_user_idea = st.text_area("Core Idea:", "Lone wanderer, mythical oasis, post-apocalyptic desert, mirages, mechanical scavengers.", height=100, key="sb_user_idea_unique")
    sb_genre = st.selectbox("Genre:", ["Cyberpunk", "Sci-Fi", "Fantasy", "Noir", "Thriller", "Western", "Post-Apocalyptic", "Historical Drama", "Surreal"], index=6, key="sb_genre_unique")
    sb_mood = st.selectbox("Mood:", ["Hopeful yet Desperate", "Mysterious & Eerie", "Gritty & Tense", "Epic & Awe-Inspiring", "Melancholy & Reflective", "Whimsical"], index=0, key="sb_mood_unique")
    sb_num_scenes = st.slider("Key Scenes:", 1, 10, 1, key="sb_num_scenes_unique")
    sb_guidance_opts = {"Standard": "standard", "Artistic": "more_artistic", "Experimental": "experimental_narrative"}
    sb_guidance_key = st.selectbox("AI Director Style:", list(sb_guidance_opts.keys()), key="sb_guidance_unique")
    sb_actual_guidance = sb_guidance_opts[sb_guidance_key]

    if st.button("🌌 Generate Cinematic Treatment", type="primary", key="sb_btn_gen_treat_unique", use_container_width=True):
        initialize_new_project_data_in_session()
        if not sb_user_idea.strip(): st.warning("Please provide a story idea.")
        else:
            with st.status("AI Director is envisioning your masterpiece...", expanded=True) as main_status_op: # Renamed this for clarity
                try:
                    main_status_op.write("Phase 1: Crafting treatment... πŸ“œ"); logger.info("APP: P1 - Treatment Gen.")
                    prompt_treat = create_cinematic_treatment_prompt(sb_user_idea, sb_genre, sb_mood, sb_num_scenes, sb_actual_guidance)
                    raw_treat_list = st.session_state.gemini_service_handler.generate_story_breakdown(prompt_treat)
                    if not isinstance(raw_treat_list, list) or not raw_treat_list: raise ValueError("Gemini invalid scene list.")
                    init_scenes = []
                    for scene_gemini in raw_treat_list:
                        gem_dur = scene_gemini.get('video_clip_duration_estimate_secs_감독', 0); scene_gemini['user_scene_duration_secs'] = gem_dur if gem_dur > 0 else DEFAULT_SCENE_DURATION_SECS
                        scene_gemini['user_shot_type'] = scene_gemini.get('PROACTIVE_camera_work_감독', DEFAULT_SHOT_TYPE); scene_gemini['user_selected_asset_type'] = "Auto (Director's Choice)"; init_scenes.append(scene_gemini)
                    st.session_state.project_story_treatment_scenes_list = init_scenes
                    num_gen_sc = len(init_scenes); st.session_state.project_scene_generation_prompts_list = [""]*num_gen_sc; st.session_state.project_generated_assets_info_list = [None]*num_gen_sc
                    logger.info(f"APP: P1 done. {num_gen_sc} scenes."); main_status_op.update(label="Treatment complete! βœ… Generating assets...", state="running")
                    main_status_op.write("Phase 2: Creating assets..."); logger.info("APP: P2 - Asset Gen.")
                    success_assets = 0
                    for i, scene_item in enumerate(st.session_state.project_story_treatment_scenes_list):
                        sc_num_log = scene_item.get('scene_number', i+1); main_status_op.write(f"  Asset for Scene {sc_num_log}..."); logger.info(f"  APP: Asset S{sc_num_log}.")
                        if generate_asset_for_scene_in_app(i, scene_item, asset_v=1): success_assets += 1
                    lbl_p2 = "Assets generated! "; next_state = "running"
                    if success_assets == 0 and num_gen_sc > 0: logger.error("APP: Asset gen FAIL all."); lbl_p2 = "Asset gen FAIL all."; next_state="error"; main_status_op.update(label=lbl_p2, state=next_state, expanded=True); st.stop()
                    elif success_assets < num_gen_sc: logger.warning(f"APP: Assets partial ({success_assets}/{num_gen_sc})."); lbl_p2 = f"Assets partial ({success_assets}/{num_gen_sc}). "
                    main_status_op.update(label=f"{lbl_p2}Generating narration...", state=next_state)
                    if next_state == "error": st.stop()
                    main_status_op.write("Phase 3: Narration script..."); logger.info("APP: P3 - Narration Script.")
                    voice_style = st.session_state.get("selected_voice_style_for_generation", "cinematic_trailer")
                    prompt_narr = create_narration_script_prompt_enhanced(st.session_state.project_story_treatment_scenes_list, sb_mood, sb_genre, voice_style)
                    st.session_state.project_narration_script_text = st.session_state.gemini_service_handler.generate_image_prompt(prompt_narr)
                    logger.info("APP: Narration script OK."); main_status_op.update(label="Narration ready! Synthesizing voice...", state="running")
                    main_status_op.write("Phase 4: Synthesizing voice..."); logger.info("APP: P4 - Voice Synth.")
                    st.session_state.project_overall_narration_audio_path = st.session_state.visual_content_engine.generate_narration_audio(st.session_state.project_narration_script_text)
                    final_lbl = "All components ready! Review storyboard. πŸš€"; final_state = "complete"
                    if not st.session_state.project_overall_narration_audio_path: final_lbl = f"{lbl_p2}Storyboard ready (Voiceover failed)."; logger.warning("APP: Narration audio fail.")
                    else: logger.info("APP: Narration audio OK.")
                    main_status_op.update(label=final_lbl, state=final_state, expanded=False)
                except ValueError as e_val_main: logger.error(f"APP: ValueError: {e_val_main}", exc_info=True); main_status_op.update(label=f"Data/Response Error: {e_val_main}", state="error", expanded=True);
                except TypeError as e_type_main: logger.error(f"APP: TypeError: {e_type_main}", exc_info=True); main_status_op.update(label=f"Type Error: {e_type_main}", state="error", expanded=True);
                except Exception as e_unhandled_main_flow: logger.error(f"APP_MAIN_FLOW: Unhandled Exception: {e_unhandled_main_flow}", exc_info=True); main_status_op.update(label=f"Unexpected Error: {e_unhandled_main_flow}", state="error", expanded=True);
    
    with st.expander("Define Characters", expanded=False):
        sb_char_name = st.text_input("Character Name", key="sb_char_name_unique_char_main"); sb_char_desc = st.text_area("Visual Description", key="sb_char_desc_unique_char_main", height=100)
        if st.button("Save Character", key="sb_add_char_unique_char_main"):
            if sb_char_name and sb_char_desc: st.session_state.project_character_definitions_map[sb_char_name.strip().lower()] = sb_char_desc.strip(); st.success(f"Char '{sb_char_name.strip()}' saved.")
            else: st.warning("Name and description needed.")
        if st.session_state.project_character_definitions_map: st.caption("Defined Characters:"); [st.markdown(f"**{k.title()}:** _{v}_") for k,v in st.session_state.project_character_definitions_map.items()]
    with st.expander("Global Style Overrides", expanded=False):
        sb_style_presets = { "Default": "", "Noir": "gritty neo-noir...", "Fantasy": "epic fantasy...", "Sci-Fi": "analog sci-fi..."}
        sb_selected_preset = st.selectbox("Base Style Preset:", list(sb_style_presets.keys()), key="sb_style_preset_unique_global_main")
        sb_custom_keywords = st.text_area("Additional Custom Keywords:", key="sb_custom_style_unique_global_main", height=80)
        sb_current_global_style = st.session_state.project_global_style_keywords_str
        if st.button("Apply Global Styles", key="sb_apply_styles_unique_global_main"):
            final_style = sb_style_presets[sb_selected_preset]; 
            if sb_custom_keywords.strip(): final_style = f"{final_style}, {sb_custom_keywords.strip()}" if final_style else sb_custom_keywords.strip()
            st.session_state.project_global_style_keywords_str = final_style.strip(); sb_current_global_style = final_style.strip()
            if sb_current_global_style: st.success("Global styles applied!")
            else: st.info("Global styles cleared.")
        if sb_current_global_style: st.caption(f"Active: \"{sb_current_global_style}\"")
    with st.expander("Voice & Narration Style", expanded=False):
        sb_engine_default_voice = "Rachel"
        if hasattr(st.session_state, 'visual_content_engine'): sb_engine_default_voice = st.session_state.visual_content_engine.elevenlabs_voice_id
        sb_user_voice_id = st.text_input("11L Voice ID (override):", value=sb_engine_default_voice, key="sb_el_voice_id_override_unique_global_main")
        sb_narration_styles = {"Cinematic Trailer": "cinematic_trailer", "Neutral Documentary": "documentary_neutral", "Character Introspection": "introspective_character"}
        sb_selected_narr_style = st.selectbox("Narration Script Style:", list(sb_narration_styles.keys()), key="sb_narr_style_sel_unique_global_main", index=0)
        if st.button("Set Narrator Voice & Style", key="sb_set_voice_btn_unique_global_main"):
            final_el_voice_id = sb_user_voice_id.strip() or st.session_state.get("CONFIG_ELEVENLABS_VOICE_ID", "Rachel")
            if hasattr(st.session_state, 'visual_content_engine'): st.session_state.visual_content_engine.elevenlabs_voice_id = final_el_voice_id
            st.session_state.selected_voice_style_for_generation = sb_narration_styles[sb_selected_narr_style]
            st.success(f"Narrator Voice: {final_el_voice_id}. Script Style: {sb_selected_narr_style}")
            logger.info(f"User updated 11L Voice ID: {final_el_voice_id}, Narr Style: {sb_selected_narr_style}")

st.header("🎬 Cinematic Storyboard & Treatment")
if st.session_state.project_narration_script_text:
    with st.expander("πŸ“œ View Full Narration Script", expanded=False): st.markdown(f"> _{st.session_state.project_narration_script_text}_")

if not st.session_state.project_story_treatment_scenes_list: st.info("Use the sidebar to generate your cinematic treatment.")
else:
    for i_main_loop_content, scene_content_item_display in enumerate(st.session_state.project_story_treatment_scenes_list):
        scene_num_for_display = scene_content_item_display.get('scene_number', i_main_loop_content + 1)
        scene_title_for_display_main = scene_content_item_display.get('scene_title', 'Untitled Scene')
        key_base_main_area_widgets = f"s{scene_num_for_display}_main_widgets_loop_v2_{i_main_loop_content}" # Ensure unique keys

        if "director_note" in scene_content_item_display and scene_content_item_display['director_note']: st.info(f"🎬 Director Note S{scene_num_for_display}: {scene_content_item_display['director_note']}")
        st.subheader(f"SCENE {scene_num_for_display}: {scene_title_for_display_main.upper()}")
        
        # <<< CORRECTED COLUMN VARIABLE NAMES >>>
        treatment_display_col, visual_display_col = st.columns([0.45, 0.55]) 

        with treatment_display_col: # Use the correctly defined variable
            with st.expander("πŸ“ Scene Treatment & Controls", expanded=True):
                # (Display textual scene details)
                st.markdown(f"**Beat:** {scene_content_item_display.get('emotional_beat', 'N/A')}"); st.markdown(f"**Setting:** {scene_content_item_display.get('setting_description', 'N/A')}"); st.markdown(f"**Chars:** {', '.join(scene_content_item_display.get('characters_involved', ['N/A']))}"); st.markdown(f"**Focus Moment:** _{scene_content_item_display.get('character_focus_moment', 'N/A')}_"); st.markdown(f"**Plot Beat:** {scene_content_item_display.get('key_plot_beat', 'N/A')}"); st.markdown(f"**Dialogue Hook:** `\"{scene_content_item_display.get('suggested_dialogue_hook', '...')}\"`"); st.markdown("---"); st.markdown(f"**Dir. Visual Style:** _{scene_content_item_display.get('PROACTIVE_visual_style_감독', 'N/A')}_"); st.markdown(f"**Dir. Camera:** _{scene_content_item_display.get('PROACTIVE_camera_work_감독', 'N/A')}_"); st.markdown(f"**Dir. Sound:** _{scene_content_item_display.get('PROACTIVE_sound_design_감독', 'N/A')}_"); st.markdown("---")
                st.markdown("##### Shot, Pacing & Asset Controls")
                ui_shot_type_current = st.session_state.project_story_treatment_scenes_list[i_main_loop_content].get('user_shot_type', DEFAULT_SHOT_TYPE)
                try: ui_shot_type_idx_val = SHOT_TYPES_OPTIONS.index(ui_shot_type_current)
                except ValueError: ui_shot_type_idx_val = SHOT_TYPES_OPTIONS.index(DEFAULT_SHOT_TYPE)
                ui_shot_type_new = st.selectbox("Dominant Shot Type:", options=SHOT_TYPES_OPTIONS, index=ui_shot_type_idx_val, key=f"shot_type_{key_base_main_area_widgets}")
                if ui_shot_type_new != ui_shot_type_current: st.session_state.project_story_treatment_scenes_list[i_main_loop_content]['user_shot_type'] = ui_shot_type_new
                ui_duration_current = st.session_state.project_story_treatment_scenes_list[i_main_loop_content].get('user_scene_duration_secs', DEFAULT_SCENE_DURATION_SECS)
                ui_duration_new = st.number_input("Scene Duration (seconds):", min_value=1, max_value=300, value=ui_duration_current, step=1, key=f"duration_{key_base_main_area_widgets}")
                if ui_duration_new != ui_duration_current: st.session_state.project_story_treatment_scenes_list[i_main_loop_content]['user_scene_duration_secs'] = ui_duration_new
                ui_asset_type_override_current = st.session_state.project_story_treatment_scenes_list[i_main_loop_content].get('user_selected_asset_type', "Auto (Director's Choice)")
                try: ui_asset_type_idx_val = ASSET_TYPE_OPTIONS.index(ui_asset_type_override_current)
                except ValueError: ui_asset_type_idx_val = 0
                ui_asset_type_override_new = st.selectbox("Asset Type Override:", ASSET_TYPE_OPTIONS, index=ui_asset_type_idx_val, key=f"asset_type_{key_base_main_area_widgets}", help="Choose asset type. 'Auto' uses AI suggestion.")
                if ui_asset_type_override_new != ui_asset_type_override_current: st.session_state.project_story_treatment_scenes_list[i_main_loop_content]['user_selected_asset_type'] = ui_asset_type_override_new
                st.markdown("---")
                prompt_for_asset_to_display = st.session_state.project_scene_generation_prompts_list[i_main_loop_content] if i_main_loop_content < len(st.session_state.project_scene_generation_prompts_list) else None
                if prompt_for_asset_to_display:
                     with st.popover("πŸ‘οΈ View Asset Generation Prompt"): st.markdown(f"**Prompt used for current asset:**"); st.code(prompt_for_asset_to_display, language='text')
                pexels_query_to_display = scene_content_item_display.get('pexels_search_query_감독', None)
                if pexels_query_to_display: st.caption(f"Pexels Fallback: `{pexels_query_to_display}`")

        with visual_display_col: # <<< CORRECTED: Use the correctly defined variable name >>>
            current_asset_info_to_display = st.session_state.project_generated_assets_info_list[i_main_loop_content] if i_main_loop_content < len(st.session_state.project_generated_assets_info_list) else None
            if current_asset_info_to_display and not current_asset_info_to_display.get('error') and current_asset_info_to_display.get('path') and os.path.exists(current_asset_info_to_display['path']):
                path_of_asset_for_display = current_asset_info_to_display['path']; type_of_asset_for_display = current_asset_info_to_display.get('type', 'image')
                if type_of_asset_for_display == 'image': st.image(path_of_asset_for_display, caption=f"S{scene_num_for_display} ({type_of_asset_for_display}): {scene_title_for_display_main}")
                elif type_of_asset_for_display == 'video':
                    try:
                        with open(path_of_asset_for_display, 'rb') as vid_file_obj_read: video_bytes_for_st_video = vid_file_obj_read.read()
                        st.video(video_bytes_for_st_video, format="video/mp4", start_time=0); st.caption(f"S{scene_num_for_display} ({type_of_asset_for_display}): {scene_title_for_display_main}")
                    except Exception as e_vid_display_main_loop: st.error(f"Error displaying video {path_of_asset_for_display}: {e_vid_display_main_loop}"); logger.error(f"Error displaying video: {e_vid_display_main_loop}", exc_info=True)
                else: st.warning(f"Unknown asset type '{type_of_asset_for_display}' for S{scene_num_for_display}.")
            else:
                if st.session_state.project_story_treatment_scenes_list:
                    error_msg_for_asset_display = current_asset_info_to_display.get('error_message', 'Visual pending or failed.') if current_asset_info_to_display else 'Visual pending or failed.'
                    st.caption(error_msg_for_asset_display)
            
            with st.popover(f"✏️ Edit S{scene_num_for_display} Treatment"):
                feedback_input_for_treatment_regen = st.text_area("Changes to treatment:", key=f"treat_fb_input_pop_{key_base_main_area_widgets}", height=150)
                if st.button(f"πŸ”„ Update S{scene_num_for_display} Treatment", key=f"regen_treat_btn_pop_{key_base_main_area_widgets}"):
                    if feedback_input_for_treatment_regen:
                        with st.status(f"Updating S{scene_num_for_display} Treatment & Asset...", expanded=True) as status_treatment_update_op_pop:
                            preserved_user_shot_type = st.session_state.project_story_treatment_scenes_list[i_main_loop_content]['user_shot_type']
                            preserved_user_duration = st.session_state.project_story_treatment_scenes_list[i_main_loop_content]['user_scene_duration_secs']
                            preserved_user_asset_type = st.session_state.project_story_treatment_scenes_list[i_main_loop_content]['user_selected_asset_type']
                            prompt_for_gemini_scene_regen_pop = create_scene_regeneration_prompt(scene_item_for_display, feedback_input_for_treatment_regen, st.session_state.project_story_treatment_scenes_list)
                            try:
                                updated_scene_data_from_gemini_pop = st.session_state.gemini_service_handler.regenerate_scene_script_details(prompt_for_gemini_scene_regen_pop)
                                final_merged_updated_scene_data_pop = {**updated_scene_data_from_gemini_pop}
                                final_merged_updated_scene_data_pop['user_shot_type'] = preserved_user_shot_type; final_merged_updated_scene_data_pop['user_scene_duration_secs'] = preserved_user_duration; final_merged_updated_scene_data_pop['user_selected_asset_type'] = preserved_user_asset_type
                                st.session_state.project_story_treatment_scenes_list[i_main_loop_content] = final_merged_updated_scene_data_pop
                                status_treatment_update_op_pop.update(label="Treatment updated! Regenerating asset...", state="running")
                                version_for_regenerated_asset_pop = 1
                                if current_asset_info_to_display and current_asset_info_to_display.get('path') and os.path.exists(current_asset_info_to_display['path']):
                                    try: base_fn_asset_regen_pop,_=os.path.splitext(os.path.basename(current_asset_info_to_display['path'])); version_for_regenerated_asset_pop = int(base_fn_asset_regen_pop.split('_v')[-1])+1 if '_v' in base_fn_asset_regen_pop else 2
                                    except: version_for_regenerated_asset_pop = 2 
                                if generate_asset_for_scene_in_app(i_main_loop_content, final_merged_updated_scene_data_pop, asset_ver_num=version_for_regenerated_asset_pop, user_asset_type_ui=preserved_user_asset_type): status_treatment_update_op_pop.update(label="Treatment & Asset Updated! πŸŽ‰", state="complete", expanded=False)
                                else: status_treatment_update_op_pop.update(label="Treatment updated, but asset regeneration failed.", state="complete", expanded=False)
                                st.rerun()
                            except Exception as e_treatment_regen_op_pop: status_treatment_update_op_pop.update(label=f"Error during treatment regen: {e_treatment_regen_op_pop}", state="error"); logger.error(f"Scene treatment regeneration error: {e_treatment_regen_op_pop}", exc_info=True)
                    else: st.warning("Please provide feedback to update the treatment.")

            with st.popover(f"🎨 Edit S{scene_num_for_display} Visual Prompt/Asset"):
                prompt_to_edit_display_pop = st.session_state.project_scene_generation_prompts_list[i_main_loop_content] if i_main_loop_content < len(st.session_state.project_scene_generation_prompts_list) else "No prompt."
                st.caption("Current Asset Generation Prompt:"); st.code(prompt_to_edit_display_pop, language='text')
                feedback_for_visual_asset_regen_input = st.text_area("Describe changes for visual asset:", key=f"visual_fb_input_pop_{key_base_main_area_widgets}", height=150)
                if st.button(f"πŸ”„ Update S{scene_num_for_display} Asset", key=f"regen_visual_btn_pop_{key_base_main_area_widgets}"):
                    if feedback_for_visual_asset_regen_input:
                        with st.status(f"Refining prompt & asset for S{scene_num_for_display}...", expanded=True) as status_visual_asset_regen_op_pop:
                            user_selected_asset_type_for_regen_pop = st.session_state.project_story_treatment_scenes_list[i_main_loop_content]['user_selected_asset_type']
                            is_video_type_for_regen_pop = (user_selected_asset_type_for_regen_pop == "Video Clip") or (user_selected_asset_type_for_regen_pop == "Auto (Director's Choice)" and scene_item_for_display.get('suggested_asset_type_감독') == 'video_clip')
                            newly_constructed_asset_prompt_regen_pop = ""
                            if not is_video_type_for_regen_pop:
                                gemini_refinement_prompt_viz_pop = create_visual_regeneration_prompt(prompt_to_edit_display_pop, feedback_for_visual_asset_regen_input, scene_item_for_display, st.session_state.project_character_definitions_map, st.session_state.project_global_style_keywords_str)
                                try: newly_constructed_asset_prompt_regen_pop = st.session_state.gemini_service_handler.refine_image_prompt_from_feedback(gemini_refinement_prompt_viz_pop); st.session_state.project_scene_generation_prompts_list[i_main_loop_content] = newly_constructed_asset_prompt_regen_pop; status_visual_asset_regen_op_pop.update(label="Image prompt refined! Regenerating asset...", state="running")
                                except Exception as e_gemini_refine_viz_pop: status_visual_asset_regen_op_pop.update(label=f"Error refining prompt: {e_gemini_refine_viz_pop}", state="error"); logger.error(f"Visual prompt refinement error: {e_gemini_refine_viz_pop}", exc_info=True); continue
                            else: 
                                newly_constructed_asset_prompt_regen_pop = construct_text_to_video_prompt_for_gen4(scene_item_for_display, st.session_state.project_global_style_keywords_str); st.session_state.project_scene_generation_prompts_list[i_main_loop_content] = newly_constructed_asset_prompt_regen_pop; status_visual_asset_regen_op_pop.update(label="Video prompt reconstructed! Regenerating asset...", state="running")
                            if not newly_constructed_asset_prompt_regen_pop: status_visual_asset_regen_op_pop.update(label="Prompt construction failed.", state="error"); continue
                            version_for_regenerated_visual_asset_pop = 1
                            if current_asset_info_to_display and current_asset_info_to_display.get('path') and os.path.exists(current_asset_info_to_display['path']):
                                try: base_fn_viz_regen_pop,_=os.path.splitext(os.path.basename(current_asset_info_to_display['path'])); version_for_regenerated_visual_asset_pop = int(base_fn_viz_regen_pop.split('_v')[-1])+1 if '_v' in base_fn_viz_regen_pop else 2
                                except: version_for_regenerated_visual_asset_pop = 2
                            if generate_asset_for_scene_in_app(i_main_loop_content, st.session_state.project_story_treatment_scenes_list[i_main_loop_content], asset_ver_num=version_for_regenerated_visual_asset_pop, user_asset_type_ui=user_selected_asset_type_for_regen_pop): status_visual_asset_regen_op_pop.update(label="Asset Updated! πŸŽ‰", state="complete", expanded=False)
                            else: status_visual_asset_regen_op_pop.update(label="Prompt updated, asset regen failed.", state="complete", expanded=False)
                            st.rerun()
                    else: st.warning("Please provide feedback for visual asset regeneration.")
        st.markdown("---")
    
    if st.session_state.project_story_treatment_scenes_list and any(asset_info_item_vid_assembly and not asset_info_item_vid_assembly.get('error') and asset_info_item_vid_assembly.get('path') for asset_info_item_vid_assembly in st.session_state.project_generated_assets_info_list if asset_info_item_vid_assembly is not None):
        if st.button("🎬 Assemble Narrated Cinematic Animatic", key="assemble_video_main_area_btn_final_unique_3", type="primary", use_container_width=True):
            with st.status("Assembling Ultra Animatic...", expanded=True) as status_video_assembly_final_op_main:
                assets_for_final_video_assembly_list_main = []
                for i_vid_assembly_main_loop, scene_data_for_vid_assembly_main in enumerate(st.session_state.project_story_treatment_scenes_list):
                    asset_info_current_scene_for_vid_main = st.session_state.project_generated_assets_info_list[i_vid_assembly_main_loop] if i_vid_assembly_main_loop < len(st.session_state.project_generated_assets_info_list) else None
                    if asset_info_current_scene_for_vid_main and not asset_info_current_scene_for_vid_main.get('error') and asset_info_current_scene_for_vid_main.get('path') and os.path.exists(asset_info_current_scene_for_vid_main['path']):
                        assets_for_final_video_assembly_list_main.append({'path': asset_info_current_scene_for_vid_main['path'], 'type': asset_info_current_scene_for_vid_main.get('type', 'image'), 'scene_num': scene_data_for_vid_assembly_main.get('scene_number', i_vid_assembly_main_loop + 1), 'key_action': scene_data_for_vid_assembly_main.get('key_plot_beat', ''), 'duration': scene_data_for_vid_assembly_main.get('user_scene_duration_secs', DEFAULT_SCENE_DURATION_SECS)})
                        status_video_assembly_final_op_main.write(f"Adding S{scene_data_for_vid_assembly_main.get('scene_number', i_vid_assembly_main_loop + 1)} ({asset_info_current_scene_for_vid_main.get('type')}).")
                    else: logger.warning(f"Skipping S{scene_data_for_vid_assembly_main.get('scene_number', i_vid_assembly_main_loop+1)} for video: No valid asset.")
                if assets_for_final_video_assembly_list_main:
                    status_video_assembly_final_op_main.write("Calling video engine..."); logger.info("APP: Calling visual_engine.assemble_animatic_from_assets")
                    st.session_state.project_final_video_path = st.session_state.visual_content_engine.assemble_animatic_from_assets(asset_data_list=assets_for_final_video_assembly_list_main, overall_narration_path=st.session_state.project_overall_narration_audio_path, output_filename="cinegen_ultra_animatic.mp4", fps=24)
                    if st.session_state.project_final_video_path and os.path.exists(st.session_state.project_final_video_path): status_video_assembly_final_op_main.update(label="Ultra animatic assembled! πŸŽ‰", state="complete", expanded=False); st.balloons()
                    else: status_video_assembly_final_op_main.update(label="Video assembly failed. Check logs.", state="error", expanded=True); logger.error("APP: Video assembly returned None or file does not exist.")
                else: status_video_assembly_final_op_main.update(label="No valid assets for video assembly.", state="error", expanded=True); logger.warning("APP: No valid assets found for video assembly.")
    elif st.session_state.project_story_treatment_scenes_list: st.info("Generate visual assets for your scenes before attempting to assemble the animatic.")

    if st.session_state.project_final_video_path and os.path.exists(st.session_state.project_final_video_path):
        st.header("🎬 Generated Cinematic Animatic"); 
        try:
            with open(st.session_state.project_final_video_path, 'rb') as final_video_file_obj_display_main: final_video_bytes_for_display_main = final_video_file_obj_display_main.read()
            st.video(final_video_bytes_for_display_main, format="video/mp4") 
            st.download_button(label="Download Ultra Animatic", data=final_video_bytes_for_display_main, file_name=os.path.basename(st.session_state.project_final_video_path), mime="video/mp4", use_container_width=True, key="download_video_main_area_btn_final_unique_3" )
        except Exception as e_final_video_display_op_main_area: st.error(f"Error displaying final animatic video: {e_final_video_display_op_main_area}"); logger.error(f"Error displaying final animatic video: {e_final_video_display_op_main_area}", exc_info=True)

st.sidebar.markdown("---"); st.sidebar.caption("CineGen AI Ultra+ | Visionary Cinematic Pre-Production")