# app.py import streamlit as st import os import logging # --- Streamlit PermissionError Mitigation Attempts --- if "STREAMLIT_CLIENT_GATHER_USAGE_STATS" not in os.environ: os.environ["STREAMLIT_CLIENT_GATHER_USAGE_STATS"] = "false" if "STREAMLIT_BROWSER_GATHERUSAGESTATS" not in os.environ: os.environ["STREAMLIT_BROWSER_GATHERUSAGESTATS"] = "false" streamlit_home_path_app = "/app/.streamlit_cai_config_v3" if "STREAMLIT_HOME" not in os.environ and os.getcwd().startswith("/app"): os.environ["STREAMLIT_HOME"] = streamlit_home_path_app try: os.makedirs(streamlit_home_path_app, exist_ok=True) except Exception: pass from core.gemini_handler import GeminiHandler from core.visual_engine import VisualEngine from core.prompt_engineering import ( create_cinematic_treatment_prompt, construct_dalle_prompt, construct_text_to_video_prompt_for_gen4, create_narration_script_prompt_enhanced, create_scene_regeneration_prompt, create_visual_regeneration_prompt ) st.set_page_config(page_title="CineGen AI Ultra+", layout="wide", initial_sidebar_state="expanded") logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s [%(levelname)s] - %(message)s (%(module)s.%(funcName)s:%(lineno)d)') logger = logging.getLogger(__name__) SHOT_TYPES_OPTIONS = [ "Director's Choice", "Establishing Shot", "Long Shot", "Full Shot", "Medium Long Shot (Cowboy)", "Medium Shot", "Medium Close-up", "Close-up", "Extreme Close-up", "Point of View (POV)", "Over the Shoulder", "Tracking Shot", "Dolly Zoom", "Crane Shot", "Aerial Shot", "Static Shot", "Dutch Angle", "Whip Pan"] DEFAULT_SCENE_DURATION_SECS = 5; DEFAULT_SHOT_TYPE = "Director's Choice"; ASSET_TYPE_OPTIONS = ["Auto (Director's Choice)", "Image", "Video Clip"] def load_api_key(key_name_st, key_name_e, service_n): key_val = None; secrets_avail = hasattr(st, 'secrets') try: if secrets_avail and key_name_st in st.secrets: key_val = st.secrets.get(key_name_st); if key_val: logger.info(f"API Key for {service_n} found in Streamlit secrets.") except Exception as e: logger.warning(f"No st.secrets for {key_name_st} ({service_n}): {e}") if not key_val and key_name_e in os.environ: key_val = os.environ.get(key_name_e); if key_val: logger.info(f"API Key for {service_n} found in env var '{key_name_e}'.") if not key_val: logger.warning(f"API Key for {service_n} (Key: {key_name_st}/{key_name_e}) NOT FOUND.") return key_val if 'services_initialized_flag' not in st.session_state: logger.info("APP_INIT: Initializing services and API keys...") st.session_state.API_KEY_GEMINI = load_api_key("GEMINI_API_KEY", "GEMINI_API_KEY", "Gemini") st.session_state.API_KEY_OPENAI = load_api_key("OPENAI_API_KEY", "OPENAI_API_KEY", "OpenAI/DALL-E") st.session_state.API_KEY_ELEVENLABS = load_api_key("ELEVENLABS_API_KEY", "ELEVENLABS_API_KEY", "ElevenLabs") st.session_state.API_KEY_PEXELS = load_api_key("PEXELS_API_KEY", "PEXELS_API_KEY", "Pexels") st.session_state.CONFIG_ELEVENLABS_VOICE_ID = load_api_key("ELEVENLABS_VOICE_ID", "ELEVENLABS_VOICE_ID", "ElevenLabs Voice ID") st.session_state.API_KEY_RUNWAYML = load_api_key("RUNWAY_API_KEY", "RUNWAY_API_KEY", "RunwayML") if not st.session_state.API_KEY_GEMINI: st.error("CRITICAL: Gemini API Key missing!"); logger.critical("Gemini API Key missing."); st.stop() try: st.session_state.gemini_service_handler = GeminiHandler(api_key=st.session_state.API_KEY_GEMINI); logger.info("GeminiHandler initialized.") except Exception as e: st.error(f"CRITICAL: GeminiHandler init fail: {e}"); logger.critical(f"GeminiHandler init fail: {e}", exc_info=True); st.stop() try: el_def_voice = "Rachel"; el_res_voice_id = st.session_state.CONFIG_ELEVENLABS_VOICE_ID or el_def_voice st.session_state.visual_content_engine = VisualEngine(output_dir="temp_cinegen_media", default_elevenlabs_voice_id=el_res_voice_id) st.session_state.visual_content_engine.set_openai_api_key(st.session_state.API_KEY_OPENAI) st.session_state.visual_content_engine.set_elevenlabs_api_key(st.session_state.API_KEY_ELEVENLABS, voice_id_from_secret=st.session_state.CONFIG_ELEVENLABS_VOICE_ID) st.session_state.visual_content_engine.set_pexels_api_key(st.session_state.API_KEY_PEXELS) st.session_state.visual_content_engine.set_runway_api_key(st.session_state.API_KEY_RUNWAYML) logger.info("VisualEngine initialized and keys set.") except Exception as e: st.error(f"CRITICAL: VisualEngine init fail: {e}"); logger.critical(f"VisualEngine init fail: {e}", exc_info=True); st.warning("VisualEngine critical setup issue."); st.stop() st.session_state.services_initialized_flag = True; logger.info("APP_INIT: Service initialization complete.") PROJECT_SS_DEFAULTS = {'project_story_treatment_scenes_list':[],'project_scene_generation_prompts_list':[],'project_generated_assets_info_list':[],'project_final_video_path':None,'project_character_definitions_map':{},'project_global_style_keywords_str':"",'project_overall_narration_audio_path':None,'project_narration_script_text':""} for k_ss, def_v_ss in PROJECT_SS_DEFAULTS.items(): if k_ss not in st.session_state: st.session_state[k_ss] = def_v_ss def initialize_new_project_data_in_session(): st.session_state.project_story_treatment_scenes_list = []; st.session_state.project_scene_generation_prompts_list = []; st.session_state.project_generated_assets_info_list = [] st.session_state.project_final_video_path = None; st.session_state.project_overall_narration_audio_path = None; st.session_state.project_narration_script_text = "" logger.info("PROJECT_DATA: New project data re-initialized.") def generate_asset_for_scene_in_app(sc_idx, sc_data_dict, asset_v=1, user_asset_type_ui="Auto (Director's Choice)"): logger.debug(f"APP: generate_asset_for_scene_in_app for S_idx {sc_idx}, ver {asset_v}, user_type: {user_asset_type_ui}") gen_as_vid_final = False; gemini_sugg_type = sc_data_dict.get('suggested_asset_type_감독', 'image').lower() if user_asset_type_ui=="Image": gen_as_vid_final=False elif user_asset_type_ui=="Video Clip": gen_as_vid_final=True elif user_asset_type_ui=="Auto (Director's Choice)": gen_as_vid_final=(gemini_sugg_type=="video_clip") logger.debug(f"APP: Final asset type: {'Video' if gen_as_vid_final else 'Image'}") prompt_base_img = construct_dalle_prompt(sc_data_dict,st.session_state.project_character_definitions_map,st.session_state.project_global_style_keywords_str) prompt_motion_vid = "" if gen_as_vid_final: prompt_motion_vid=construct_text_to_video_prompt_for_gen4(sc_data_dict,st.session_state.project_global_style_keywords_str) or sc_data_dict.get('video_clip_motion_description_감독',"subtle motion") if not prompt_base_img: logger.error(f"Base image prompt construction failed for S{sc_data_dict.get('scene_number',sc_idx+1)}"); return False while len(st.session_state.project_scene_generation_prompts_list)<=sc_idx:st.session_state.project_scene_generation_prompts_list.append("") while len(st.session_state.project_generated_assets_info_list)<=sc_idx:st.session_state.project_generated_assets_info_list.append(None) st.session_state.project_scene_generation_prompts_list[sc_idx]=prompt_motion_vid if gen_as_vid_final else prompt_base_img fn_base_asset=f"scene_{sc_data_dict.get('scene_number',sc_idx+1)}_asset_v{asset_v}" rwy_dur=sc_data_dict.get('video_clip_duration_estimate_secs_감독',sc_data_dict.get('user_scene_duration_secs',DEFAULT_SCENE_DURATION_SECS));rwy_dur=max(1,rwy_dur) asset_res_dict=st.session_state.visual_content_engine.generate_scene_asset(image_generation_prompt_text=prompt_base_img,motion_prompt_text_for_video=prompt_motion_vid,scene_data_dict=sc_data_dict,scene_identifier_fn_base=fn_base_asset,generate_as_video_clip_flag=gen_as_vid_final,runway_target_dur_val=rwy_dur) st.session_state.project_generated_assets_info_list[sc_idx]=asset_res_dict if asset_res_dict and asset_res_dict.get('prompt_used')and st.session_state.project_scene_generation_prompts_list[sc_idx]!=asset_res_dict['prompt_used']:st.session_state.project_scene_generation_prompts_list[sc_idx]=asset_res_dict['prompt_used'] if asset_res_dict and not asset_res_dict['error']and asset_res_dict.get('path')and os.path.exists(asset_res_dict['path']):logger.info(f"APP: Asset ({asset_res_dict.get('type')}) OK S{sc_data_dict.get('scene_number',sc_idx+1)}:{os.path.basename(asset_res_dict['path'])}");return True else:err_msg=asset_res_dict.get('error_message','Unk err')if asset_res_dict else 'Asset res None';logger.warning(f"APP: Asset gen FAIL S{sc_data_dict.get('scene_number',sc_idx+1)}. Type:{'Vid'if gen_as_vid_final else 'Img'}. Err:{err_msg}");curr_p=st.session_state.project_scene_generation_prompts_list[sc_idx];st.session_state.project_generated_assets_info_list[sc_idx]={'path':None,'type':'none','error':True,'error_message':err_msg,'prompt_used':curr_p};return False with st.sidebar: if os.path.exists("assets/logo.png"): st.image("assets/logo.png", width=150) else: st.sidebar.markdown("## 🎬 CineGen AI Ultra+"); logger.warning("assets/logo.png not found.") st.markdown("### Creative Seed") sb_user_idea = st.text_area("Core Idea:", "Lone wanderer, mythical oasis...", height=100, key="sb_user_idea_u") sb_genre = st.selectbox("Genre:", ["Post-Apocalyptic", "Sci-Fi"], index=0, key="sb_genre_u") sb_mood = st.selectbox("Mood:", ["Hopeful yet Desperate", "Mysterious"], index=0, key="sb_mood_u") sb_num_scenes = st.slider("Key Scenes:", 1, 3, 1, key="sb_num_scenes_u") sb_guidance_opts = {"Standard": "standard", "Artistic": "more_artistic", "Experimental": "experimental_narrative"} sb_guidance_key = st.selectbox("AI Director Style:", list(sb_guidance_opts.keys()), key="sb_guidance_u") sb_actual_guidance = sb_guidance_opts[sb_guidance_key] if st.button("🌌 Generate Cinematic Treatment", type="primary", key="sb_btn_gen_treat_u", use_container_width=True): initialize_new_project_data_in_session() if not sb_user_idea.strip(): st.warning("Please provide a story idea.") else: with st.status("AI Director is envisioning your masterpiece...", expanded=True) as main_status_op: try: main_status_op.write("P1: Crafting treatment... 📜"); logger.info("APP: P1 - Treatment Gen.") prompt_treat = create_cinematic_treatment_prompt(sb_user_idea, sb_genre, sb_mood, sb_num_scenes, sb_actual_guidance) raw_treat_list = st.session_state.gemini_service_handler.generate_story_breakdown(prompt_treat) if not isinstance(raw_treat_list, list) or not raw_treat_list: raise ValueError("Gemini invalid scene list.") init_scenes = [] for scene_g in raw_treat_list: gem_dur = scene_g.get('video_clip_duration_estimate_secs_감독', 0); scene_g['user_scene_duration_secs'] = gem_dur if gem_dur > 0 else DEFAULT_SCENE_DURATION_SECS scene_g['user_shot_type'] = scene_g.get('PROACTIVE_camera_work_감독', DEFAULT_SHOT_TYPE); scene_g['user_selected_asset_type'] = "Auto (Director's Choice)"; init_scenes.append(scene_g) st.session_state.project_story_treatment_scenes_list = init_scenes num_gen_sc = len(init_scenes); st.session_state.project_scene_generation_prompts_list = [""]*num_gen_sc; st.session_state.project_generated_assets_info_list = [None]*num_gen_sc logger.info(f"APP: P1 done. {num_gen_sc} scenes."); main_status_op.update(label="Treatment OK! ✅ Assets...", state="running") main_status_op.write("P2: Creating assets..."); logger.info("APP: P2 - Asset Gen.") success_assets = 0 for i, scene_item in enumerate(st.session_state.project_story_treatment_scenes_list): sc_n_log = scene_item.get('scene_number', i+1); main_status_op.write(f" Asset S{sc_n_log}..."); logger.info(f" APP: Asset S{sc_n_log}.") if generate_asset_for_scene_in_app(i, scene_item, asset_v=1): success_assets += 1 lbl_p2="Assets OK! "; nxt_st="running" if success_assets==0 and num_gen_sc>0: logger.error("APP:Asset FAIL all.");lbl_p2="Asset FAIL all.";nxt_st="error";main_status_op.update(label=lbl_p2,state=nxt_st,expanded=True);st.stop() elif success_assets _{st.session_state.project_narration_script_text}_") if not st.session_state.project_story_treatment_scenes_list: st.info("Use the sidebar to generate your cinematic treatment.") else: for i_main_display, scene_content_item_display in enumerate(st.session_state.project_story_treatment_scenes_list): scene_num_for_display = scene_content_item_display.get('scene_number', i_main_display + 1) scene_title_for_display_main = scene_content_item_display.get('scene_title', 'Untitled Scene') key_base_main_area_widgets = f"s{scene_num_for_display}_main_widgets_loop_v3_{i_main_display}" if "director_note" in scene_content_item_display and scene_content_item_display['director_note']: st.info(f"🎬 Director Note S{scene_num_for_display}: {scene_content_item_display['director_note']}") st.subheader(f"SCENE {scene_num_for_display}: {scene_title_for_display_main.upper()}"); # <<< CORRECTED COLUMN VARIABLE NAMES FOR THIS BLOCK >>> treatment_display_column, visual_display_column = st.columns([0.45, 0.55]) with treatment_display_column: # Use the correctly defined variable with st.expander("📝 Scene Treatment & Controls", expanded=True): st.markdown(f"**Beat:** {scene_content_item_display.get('emotional_beat', 'N/A')}"); st.markdown(f"**Setting:** {scene_content_item_display.get('setting_description', 'N/A')}"); st.markdown(f"**Chars:** {', '.join(scene_content_item_display.get('characters_involved', ['N/A']))}"); st.markdown(f"**Focus Moment:** _{scene_content_item_display.get('character_focus_moment', 'N/A')}_"); st.markdown(f"**Plot Beat:** {scene_content_item_display.get('key_plot_beat', 'N/A')}"); st.markdown(f"**Dialogue Hook:** `\"{scene_content_item_display.get('suggested_dialogue_hook', '...')}\"`"); st.markdown("---"); st.markdown(f"**Dir. Visual Style:** _{scene_content_item_display.get('PROACTIVE_visual_style_감독', 'N/A')}_"); st.markdown(f"**Dir. Camera:** _{scene_content_item_display.get('PROACTIVE_camera_work_감독', 'N/A')}_"); st.markdown(f"**Dir. Sound:** _{scene_content_item_display.get('PROACTIVE_sound_design_감독', 'N/A')}_"); st.markdown("---") st.markdown("##### Shot, Pacing & Asset Controls") ui_shot_type = st.session_state.project_story_treatment_scenes_list[i_main_display].get('user_shot_type', DEFAULT_SHOT_TYPE) try: ui_shot_idx = SHOT_TYPES_OPTIONS.index(ui_shot_type) except ValueError: ui_shot_idx = SHOT_TYPES_OPTIONS.index(DEFAULT_SHOT_TYPE) new_ui_shot = st.selectbox("Dominant Shot Type:", SHOT_TYPES_OPTIONS, index=ui_shot_idx, key=f"shot_type_{key_base_main_area_widgets}") if new_ui_shot != ui_shot_type: st.session_state.project_story_treatment_scenes_list[i_main_display]['user_shot_type'] = new_ui_shot ui_dur = st.session_state.project_story_treatment_scenes_list[i_main_display].get('user_scene_duration_secs', DEFAULT_SCENE_DURATION_SECS) new_ui_dur = st.number_input("Scene Duration (s):", 1, 300, ui_dur, 1, key=f"duration_{key_base_main_area_widgets}") if new_ui_dur != ui_dur: st.session_state.project_story_treatment_scenes_list[i_main_display]['user_scene_duration_secs'] = new_ui_dur ui_asset_type = st.session_state.project_story_treatment_scenes_list[i_main_display].get('user_selected_asset_type', "Auto (Director's Choice)") try: ui_asset_idx = ASSET_TYPE_OPTIONS.index(ui_asset_type) except ValueError: ui_asset_idx = 0 new_ui_asset = st.selectbox("Asset Type Override:", ASSET_TYPE_OPTIONS, index=ui_asset_idx, key=f"asset_type_{key_base_main_area_widgets}") if new_ui_asset != ui_asset_type: st.session_state.project_story_treatment_scenes_list[i_main_display]['user_selected_asset_type'] = new_ui_asset st.markdown("---") prompt_asset_disp_main = st.session_state.project_scene_generation_prompts_list[i_main_display] if i_main_display < len(st.session_state.project_scene_generation_prompts_list) else None if prompt_asset_disp_main: with st.popover("👁️ View Asset Gen Prompt"): st.markdown(f"**Prompt used:**"); st.code(prompt_asset_disp_main, language='text') px_q_disp_main = scene_content_item_display.get('pexels_search_query_감독', None) if px_q_disp_main: st.caption(f"Pexels Fallback: `{px_q_disp_main}`") with visual_display_column: # <<< CORRECTED VARIABLE NAME >>> asset_info_main_disp = st.session_state.project_generated_assets_info_list[i_main_display] if i_main_display < len(st.session_state.project_generated_assets_info_list) else None if asset_info_main_disp and not asset_info_main_disp.get('error') and asset_info_main_disp.get('path') and os.path.exists(asset_info_main_disp['path']): path_asset_main = asset_info_main_disp['path']; type_asset_main = asset_info_main_disp.get('type','image') if type_asset_main == 'image': st.image(path_asset_main, caption=f"S{scene_num_for_display} ({type_asset_main}): {scene_title_for_display_main}") elif type_asset_main == 'video': try: with open(path_asset_main,'rb') as vid_f_main: vid_b_main = vid_f_main.read() st.video(vid_b_main, format="video/mp4", start_time=0); st.caption(f"S{scene_num_for_display} ({type_asset_main}): {scene_title_for_display_main}") except Exception as e_vid_disp_main_area: st.error(f"Error displaying video {path_asset_main}: {e_vid_disp_main_area}"); logger.error(f"Display video error: {e_vid_disp_main_area}", exc_info=True) else: st.warning(f"Unknown asset type '{type_asset_main}' S{scene_num_for_display}.") else: if st.session_state.project_story_treatment_scenes_list: err_msg_disp_main_area = asset_info_main_disp.get('error_message', 'Visual pending/failed.') if asset_info_main_disp else 'Visual pending/failed.' st.caption(err_msg_disp_main_area) with st.popover(f"✏️ Edit S{scene_num_for_display} Treatment"): feedback_treat_regen_in = st.text_area("Changes to treatment:", key=f"treat_fb_pop_main_{key_base_main_area_widgets}", height=150) if st.button(f"🔄 Update S{scene_num_for_display} Treatment", key=f"regen_treat_btn_pop_main_{key_base_main_area_widgets}"): if feedback_treat_regen_in: with st.status(f"Updating S{scene_num_for_display} Treatment & Asset...", expanded=True) as status_treat_upd_pop_main: user_shot_pref = st.session_state.project_story_treatment_scenes_list[i_main_display]['user_shot_type'] user_dur_pref = st.session_state.project_story_treatment_scenes_list[i_main_display]['user_scene_duration_secs'] user_asset_pref = st.session_state.project_story_treatment_scenes_list[i_main_display]['user_selected_asset_type'] prompt_gemini_regen = create_scene_regeneration_prompt(scene_content_item_display, feedback_treat_regen_in, st.session_state.project_story_treatment_scenes_list) try: updated_scene_gemini = st.session_state.gemini_service_handler.regenerate_scene_script_details(prompt_gemini_regen) final_updated_scene = {**updated_scene_gemini} final_updated_scene['user_shot_type']=user_shot_pref; final_updated_scene['user_scene_duration_secs']=user_dur_pref; final_updated_scene['user_selected_asset_type']=user_asset_pref st.session_state.project_story_treatment_scenes_list[i_main_display] = final_updated_scene status_treat_upd_pop_main.update(label="Treatment updated! Regenerating asset...", state="running") ver_asset_regen = 1 if asset_info_main_disp and asset_info_main_disp.get('path') and os.path.exists(asset_info_main_disp['path']): try: base_fn_regen,_=os.path.splitext(os.path.basename(asset_info_main_disp['path'])); ver_asset_regen = int(base_fn_regen.split('_v')[-1])+1 if '_v' in base_fn_regen else 2 except: ver_asset_regen = 2 if generate_asset_for_scene_in_app(i_main_display, final_updated_scene, asset_v=ver_asset_regen, user_asset_type_ui=user_asset_pref): status_treat_upd_pop_main.update(label="Treatment & Asset Updated! 🎉", state="complete", expanded=False) else: status_treat_upd_pop_main.update(label="Treatment updated, asset regen failed.", state="complete", expanded=False) st.rerun() except Exception as e_treat_regen_main_loop_pop: status_treat_upd_pop_main.update(label=f"Error: {e_treat_regen_main_loop_pop}", state="error"); logger.error(f"Scene treatment regen error: {e_treat_regen_main_loop_pop}", exc_info=True) else: st.warning("Please provide feedback for treatment.") with st.popover(f"🎨 Edit S{scene_num_for_display} Visual Prompt/Asset"): prompt_edit_disp_visual_pop = st.session_state.project_scene_generation_prompts_list[i_main_display] if i_main_display < len(st.session_state.project_scene_generation_prompts_list) else "No prompt." st.caption("Current Asset Generation Prompt:"); st.code(prompt_edit_disp_visual_pop, language='text') feedback_vis_asset_regen_input_pop = st.text_area("Describe changes for visual asset:", key=f"visual_fb_input_pop_main_{key_base_main_area_widgets}", height=150) if st.button(f"🔄 Update S{scene_num_for_display} Asset", key=f"regen_visual_btn_pop_main_{key_base_main_area_widgets}"): if feedback_vis_asset_regen_input_pop: with st.status(f"Refining prompt & asset for S{scene_num_for_display}...", expanded=True) as status_vis_asset_regen_op_pop_main: user_asset_type_choice_pop_viz = st.session_state.project_story_treatment_scenes_list[i_main_display]['user_selected_asset_type'] is_video_for_regen_pop_viz = (user_asset_type_choice_pop_viz == "Video Clip") or (user_asset_type_choice_pop_viz == "Auto (Director's Choice)" and scene_content_item_display.get('suggested_asset_type_감독') == 'video_clip') newly_constructed_asset_prompt_regen_pop_viz = "" if not is_video_for_regen_pop_viz: gemini_refinement_prompt_viz_pop_final = create_visual_regeneration_prompt(prompt_edit_disp_visual_pop, feedback_vis_asset_regen_input_pop, scene_content_item_display, st.session_state.project_character_definitions_map, st.session_state.project_global_style_keywords_str) try: newly_constructed_asset_prompt_regen_pop_viz = st.session_state.gemini_service_handler.refine_image_prompt_from_feedback(gemini_refinement_prompt_viz_pop_final); st.session_state.project_scene_generation_prompts_list[i_main_display] = newly_constructed_asset_prompt_regen_pop_viz; status_vis_asset_regen_op_pop_main.update(label="Image prompt refined! Regenerating asset...", state="running") except Exception as e_gem_refine_pop_main: status_vis_asset_regen_op_pop_main.update(label=f"Error refining prompt: {e_gem_refine_pop_main}", state="error"); logger.error(f"Visual prompt refinement error: {e_gem_refine_pop_main}", exc_info=True); continue else: newly_constructed_asset_prompt_regen_pop_viz = construct_text_to_video_prompt_for_gen4(scene_content_item_display, st.session_state.project_global_style_keywords_str); st.session_state.project_scene_generation_prompts_list[i_main_display] = newly_constructed_asset_prompt_regen_pop_viz; status_vis_asset_regen_op_pop_main.update(label="Video prompt reconstructed! Regenerating asset...", state="running") if not newly_constructed_asset_prompt_regen_pop_viz: status_vis_asset_regen_op_pop_main.update(label="Prompt construction failed.", state="error"); continue ver_vis_asset_regen_pop_main = 1 if asset_info_main_disp and asset_info_main_disp.get('path') and os.path.exists(asset_info_main_disp['path']): try: base_fn_viz_pop_main,_=os.path.splitext(os.path.basename(asset_info_main_disp['path'])); ver_vis_asset_regen_pop_main = int(base_fn_viz_pop_main.split('_v')[-1])+1 if '_v' in base_fn_viz_pop_main else 2 except: ver_vis_asset_regen_pop_main = 2 if generate_asset_for_scene_in_app(i_main_display, st.session_state.project_story_treatment_scenes_list[i_main_display], asset_ver_num=ver_vis_asset_regen_pop_main, user_asset_type_ui=user_asset_type_choice_pop_viz): status_vis_asset_regen_op_pop_main.update(label="Asset Updated! 🎉", state="complete", expanded=False) else: status_vis_asset_regen_op_pop_main.update(label="Prompt updated, asset regen failed.", state="complete", expanded=False) st.rerun() else: st.warning("Please provide feedback for visual asset regeneration.") st.markdown("---") if st.session_state.project_story_treatment_scenes_list and any(asset_info_item_vid and not asset_info_item_vid.get('error') and asset_info_item_vid.get('path') for asset_info_item_vid in st.session_state.project_generated_assets_info_list if asset_info_item_vid is not None): if st.button("🎬 Assemble Narrated Cinematic Animatic", key="assemble_video_main_area_btn_final_unique_4", type="primary", use_container_width=True): with st.status("Assembling Ultra Animatic...", expanded=True) as status_video_assembly_final_op_main: assets_for_final_vid_assembly_list_main = [] for i_vid_assembly_main_loop, scene_data_for_vid_assembly_main in enumerate(st.session_state.project_story_treatment_scenes_list): asset_info_current_scene_for_vid_main = st.session_state.project_generated_assets_info_list[i_vid_assembly_main_loop] if i_vid_assembly_main_loop < len(st.session_state.project_generated_assets_info_list) else None if asset_info_current_scene_for_vid_main and not asset_info_current_scene_for_vid_main.get('error') and asset_info_current_scene_for_vid_main.get('path') and os.path.exists(asset_info_current_scene_for_vid_main['path']): assets_for_final_video_assembly_list_main.append({'path': asset_info_current_scene_for_vid_main['path'], 'type': asset_info_current_scene_for_vid_main.get('type', 'image'), 'scene_num': scene_data_for_vid_assembly_main.get('scene_number', i_vid_assembly_main_loop + 1), 'key_action': scene_data_for_vid_assembly_main.get('key_plot_beat', ''), 'duration': scene_data_for_vid_assembly_main.get('user_scene_duration_secs', DEFAULT_SCENE_DURATION_SECS)}) status_video_assembly_final_op_main.write(f"Adding S{scene_data_for_vid_assembly_main.get('scene_number', i_vid_assembly_main_loop + 1)} ({asset_info_current_scene_for_vid_main.get('type')}).") else: logger.warning(f"Skipping S{scene_data_for_vid_assembly_main.get('scene_number', i_vid_assembly_main_loop+1)} for video: No valid asset.") if assets_for_final_video_assembly_list_main: status_video_assembly_final_op_main.write("Calling video engine..."); logger.info("APP: Calling visual_engine.assemble_animatic_from_assets") st.session_state.project_final_video_path = st.session_state.visual_content_engine.assemble_animatic_from_assets(asset_data_list=assets_for_final_video_assembly_list_main, overall_narration_path=st.session_state.project_overall_narration_audio_path, output_filename="cinegen_ultra_animatic.mp4", fps=24) if st.session_state.project_final_video_path and os.path.exists(st.session_state.project_final_video_path): status_video_assembly_final_op_main.update(label="Ultra animatic assembled! 🎉", state="complete", expanded=False); st.balloons() else: status_video_assembly_final_op_main.update(label="Video assembly failed. Check logs.", state="error", expanded=True); logger.error("APP: Video assembly returned None or file does not exist.") else: status_video_assembly_final_op_main.update(label="No valid assets for video assembly.", state="error", expanded=True); logger.warning("APP: No valid assets found for video assembly.") elif st.session_state.project_story_treatment_scenes_list: st.info("Generate visual assets for your scenes before attempting to assemble the animatic.") if st.session_state.project_final_video_path and os.path.exists(st.session_state.project_final_video_path): st.header("🎬 Generated Cinematic Animatic"); try: with open(st.session_state.project_final_video_path, 'rb') as final_video_file_obj_display_main: final_video_bytes_for_display_main = final_video_file_obj_display_main.read() st.video(final_video_bytes_for_display_main, format="video/mp4") st.download_button(label="Download Ultra Animatic", data=final_video_bytes_for_display_main, file_name=os.path.basename(st.session_state.project_final_video_path), mime="video/mp4", use_container_width=True, key="download_video_main_area_btn_final_unique_4" ) except Exception as e_final_video_display_op_main_area: st.error(f"Error displaying final animatic video: {e_final_video_display_op_main_area}"); logger.error(f"Error displaying final animatic video: {e_final_video_display_op_main_area}", exc_info=True) st.sidebar.markdown("---"); st.sidebar.caption("CineGen AI Ultra+ | Visionary Cinematic Pre-Production")