CingenAI / app.py
mgbam's picture
Update app.py
870979c verified
raw
history blame
25.9 kB
# app.py
import streamlit as st
from core.gemini_handler import GeminiHandler
from core.visual_engine import VisualEngine
from core.prompt_engineering import (
create_cinematic_treatment_prompt, # Using the new "Ultra" prompt
construct_dalle_prompt,
create_narration_script_prompt_enhanced,
create_scene_regeneration_prompt,
create_visual_regeneration_prompt
)
import os
# --- Configuration & Initialization ---
st.set_page_config(page_title="CineGen AI Ultra+", layout="wide", initial_sidebar_state="expanded")
# --- Global State Variables & API Key Setup ---
def load_api_key(key_name_streamlit, key_name_env):
key = None
secrets_available = hasattr(st, 'secrets') # Check if st.secrets exists
try:
if secrets_available and key_name_streamlit in st.secrets:
key = st.secrets[key_name_streamlit]
except Exception as e: # Catch any error from accessing st.secrets
print(f"Note: Could not access st.secrets for {key_name_streamlit} (may be local dev): {e}")
if not key and key_name_env in os.environ: # Fallback to environment variable
key = os.environ[key_name_env]
return key
# Initialize API Keys and handlers once using session state
if 'keys_loaded' not in st.session_state:
st.session_state.GEMINI_API_KEY = load_api_key("GEMINI_API_KEY", "GEMINI_API_KEY")
st.session_state.OPENAI_API_KEY = load_api_key("OPENAI_API_KEY", "OPENAI_API_KEY")
st.session_state.ELEVENLABS_API_KEY = load_api_key("ELEVENLABS_API_KEY", "ELEVENLABS_API_KEY")
st.session_state.PEXELS_API_KEY = load_api_key("PEXELS_API_KEY", "PEXELS_API_KEY")
if not st.session_state.GEMINI_API_KEY:
st.error("Gemini API Key is essential and missing! Please set it in secrets or environment variables.")
st.stop()
st.session_state.gemini_handler = GeminiHandler(api_key=st.session_state.GEMINI_API_KEY)
# Initialize VisualEngine and set API keys
if 'visual_engine' not in st.session_state: # Ensure VE is also session-scoped if needed elsewhere before full init
st.session_state.visual_engine = VisualEngine(output_dir="temp_cinegen_media")
st.session_state.visual_engine.set_openai_api_key(st.session_state.OPENAI_API_KEY)
st.session_state.visual_engine.set_elevenlabs_api_key(st.session_state.ELEVENLABS_API_KEY)
st.session_state.visual_engine.set_pexels_api_key(st.session_state.PEXELS_API_KEY)
st.session_state.keys_loaded = True # Mark keys as loaded
# Initialize other session state variables
for key, default_val in [
('story_treatment_scenes', []), ('scene_dalle_prompts', []), ('generated_visual_paths', []),
('video_path', None), ('character_definitions', {}), ('global_style_additions', ""),
('overall_narration_audio_path', None), ('narration_script_display', "")
]:
if key not in st.session_state: st.session_state[key] = default_val
# --- End State & API Key Setup ---
def initialize_new_project():
st.session_state.story_treatment_scenes = []
st.session_state.scene_dalle_prompts = []
st.session_state.generated_visual_paths = []
st.session_state.video_path = None
st.session_state.overall_narration_audio_path = None
st.session_state.narration_script_display = ""
# Clean up old media files (optional, good for development)
# output_dir = st.session_state.visual_engine.output_dir
# if os.path.exists(output_dir):
# for f_name in os.listdir(output_dir):
# try: os.remove(os.path.join(output_dir, f_name))
# except Exception as e: print(f"Could not remove old file {f_name}: {e}")
def generate_visual_for_scene_core(scene_index, scene_data, version=1):
# scene_data here is one scene from story_treatment_scenes
dalle_prompt = construct_dalle_prompt( # Use the new prompt constructor
scene_data,
st.session_state.character_definitions,
st.session_state.global_style_additions
)
if not dalle_prompt:
print(f"ERROR: DALL-E prompt construction failed for scene {scene_data.get('scene_number', scene_index+1)}")
return False
# Ensure lists are long enough (should be pre-initialized)
while len(st.session_state.scene_dalle_prompts) <= scene_index: st.session_state.scene_dalle_prompts.append("")
while len(st.session_state.generated_visual_paths) <= scene_index: st.session_state.generated_visual_paths.append(None)
st.session_state.scene_dalle_prompts[scene_index] = dalle_prompt # Store the generated DALL-E prompt
filename = f"scene_{scene_data.get('scene_number', scene_index+1)}_visual_v{version}.png"
# Pass the full scene_data to visual_engine for Pexels query construction if DALL-E fails
img_path = st.session_state.visual_engine.generate_image_visual(dalle_prompt, scene_data, filename)
if img_path and os.path.exists(img_path):
st.session_state.generated_visual_paths[scene_index] = img_path
return True
else:
st.session_state.generated_visual_paths[scene_index] = None
print(f"WARNING: Visual generation ultimately failed for scene {scene_data.get('scene_number', scene_index+1)}")
return False
# --- UI Sidebar ---
with st.sidebar:
st.title("🎬 CineGen AI Ultra+")
st.markdown("### Creative Seed")
user_idea = st.text_area("Core Story Idea / Theme:", "A lone wanderer searches for a mythical oasis in a vast, post-apocalyptic desert, haunted by mirages and mechanical scavengers.", height=120, key="user_idea_main")
genre = st.selectbox("Primary Genre:", ["Cyberpunk", "Sci-Fi", "Fantasy", "Noir", "Thriller", "Western", "Post-Apocalyptic", "Historical Drama", "Surreal"], index=6, key="genre_main")
mood = st.selectbox("Overall Mood:", ["Hopeful yet Desperate", "Mysterious & Eerie", "Gritty & Tense", "Epic & Awe-Inspiring", "Melancholy & Reflective", "Whimsical & Lighthearted"], index=0, key="mood_main")
num_scenes = st.slider("Number of Key Scenes:", 1, 3, 2, key="num_scenes_main") # Max 3 for API cost/time
creative_guidance_options = {"Standard Director": "standard", "Artistic Visionary": "more_artistic", "Experimental Storyteller": "experimental_narrative"}
selected_creative_guidance_key = st.selectbox("AI Creative Director Style:", options=list(creative_guidance_options.keys()), key="creative_guidance_select")
actual_creative_guidance = creative_guidance_options[selected_creative_guidance_key]
if st.button("🌌 Generate Cinematic Treatment", type="primary", key="generate_treatment_btn", use_container_width=True):
initialize_new_project()
if not user_idea.strip(): st.warning("Please provide a story idea.")
else:
with st.status("AI Director is envisioning your masterpiece...", expanded=True) as status:
status.write("Phase 1: Gemini crafting cinematic treatment (scenes, style, camera, sound)... πŸ“œ")
treatment_prompt = create_cinematic_treatment_prompt(user_idea, genre, mood, num_scenes, actual_creative_guidance)
try:
treatment_result_json = st.session_state.gemini_handler.generate_story_breakdown(treatment_prompt) # Re-use for JSON list
if not isinstance(treatment_result_json, list): # Basic validation
raise ValueError("Gemini did not return a valid list of scenes for the treatment.")
st.session_state.story_treatment_scenes = treatment_result_json
num_gen_scenes = len(st.session_state.story_treatment_scenes)
if num_gen_scenes == 0: raise ValueError("Gemini returned an empty scene list.")
st.session_state.scene_dalle_prompts = [""] * num_gen_scenes
st.session_state.generated_visual_paths = [None] * num_gen_scenes
status.update(label="Treatment complete! βœ… Generating visuals...", state="running", expanded=True)
visual_successes = 0
for i_scene, scene_content in enumerate(st.session_state.story_treatment_scenes):
status.write(f" Creating visual for Scene {scene_content.get('scene_number', i_scene+1)}: {scene_content.get('scene_title','Untitled')}...")
if generate_visual_for_scene_core(i_scene, scene_content, version=1): visual_successes += 1
if visual_successes == 0 and num_gen_scenes > 0:
status.update(label="Visual generation failed for all scenes. Check DALL-E/Pexels API keys, quota, or try different prompts.", state="error", expanded=True); st.stop()
elif visual_successes < num_gen_scenes:
status.update(label=f"Visuals ready ({visual_successes}/{num_gen_scenes} succeeded). Generating narration script...", state="running", expanded=True)
else:
status.update(label="Visuals ready! Generating narration script...", state="running", expanded=True)
# Narration Generation
selected_voice_style = st.session_state.get("selected_voice_style_for_generation", "cinematic_trailer") # Get from session state if set by UI
narration_prompt = create_narration_script_prompt_enhanced(st.session_state.story_treatment_scenes, mood, genre, selected_voice_style)
narr_script = st.session_state.gemini_handler.generate_image_prompt(narration_prompt)
st.session_state.narration_script_display = narr_script
status.update(label="Narration script ready! Synthesizing voice...", state="running")
st.session_state.overall_narration_audio_path = st.session_state.visual_engine.generate_narration_audio(narr_script)
if st.session_state.overall_narration_audio_path: status.update(label="Voiceover ready! ✨", state="running")
else: status.update(label="Voiceover failed or skipped. Video will be silent.", state="warning")
status.update(label="All components ready! View storyboard below. πŸš€", state="complete", expanded=False)
except ValueError as ve: # Catch our own validation errors
status.update(label=f"Treatment generation error: {ve}", state="error", expanded=True); st.stop()
except Exception as e:
status.update(label=f"Error during generation: {e}", state="error", expanded=True); st.stop()
st.markdown("---")
st.markdown("### Fine-Tuning Options")
with st.expander("Define Characters", expanded=False):
char_name_input = st.text_input("Character Name", key="char_name_adv_input_ultra")
char_desc_input = st.text_area("Detailed Visual Description", key="char_desc_adv_input_ultra", height=100, placeholder="e.g., Jax: rugged male astronaut, mid-40s, salt-and-pepper short hair...")
if st.button("Save Character", key="add_char_adv_btn_ultra"):
if char_name_input and char_desc_input: st.session_state.character_definitions[char_name_input.strip().lower()] = char_desc_input.strip(); st.success(f"Character '{char_name_input.strip()}' saved.")
else: st.warning("Provide name and description.")
if st.session_state.character_definitions:
st.caption("Current Characters:")
for k,v in st.session_state.character_definitions.items(): st.markdown(f"**{k.title()}:** _{v}_")
with st.expander("Global Style Overrides", expanded=False):
predefined_styles = { "Default (Director's Choice)": "", "Hyper-Realistic Gritty Noir": "hyper-realistic gritty neo-noir...", "Surreal Dreamscape Fantasy": "surreal dreamscape, epic fantasy elements...", "Vintage Analog Sci-Fi": "70s/80s analog sci-fi film aesthetic..."} # Shortened for brevity
selected_preset = st.selectbox("Base Style Preset:", options=list(predefined_styles.keys()), key="style_preset_select_adv_ultra")
custom_keywords = st.text_area("Additional Custom Style Keywords:", key="custom_style_keywords_adv_input_ultra", height=80, placeholder="e.g., 'shot with Arri Alexa, shallow depth of field'")
current_style_desc = st.session_state.global_style_additions
if st.button("Apply Global Styles", key="apply_styles_adv_btn_ultra"):
final_desc = predefined_styles[selected_preset];
if custom_keywords.strip(): final_desc = f"{final_desc}, {custom_keywords.strip()}" if final_desc else custom_keywords.strip()
st.session_state.global_style_additions = final_desc.strip(); current_style_desc = final_desc.strip()
if current_style_desc: st.success("Global styles applied!")
else: st.info("Global style additions cleared.")
if current_style_desc: st.caption(f"Active global style additions: \"{current_style_desc}\"")
with st.expander("Voice Customization (ElevenLabs)", expanded=False):
elevenlabs_voices_conceptual = ["Rachel", "Adam", "Bella", "Antoni", "Elli", "Josh", "Arnold", "Domi", "Fin", "Sarah", "Charlie", "Clyde", "Dorothy", "George"] # More options
# Get current voice from visual_engine, default if not set
engine_voice_id = "Rachel"
if hasattr(st.session_state, 'visual_engine') and st.session_state.visual_engine:
engine_voice_id = st.session_state.visual_engine.elevenlabs_voice_id
try:
current_voice_index = elevenlabs_voices_conceptual.index(engine_voice_id)
except ValueError:
current_voice_index = 0 # Default to Rachel if current ID not in list
selected_el_voice = st.selectbox("Narrator Voice:", elevenlabs_voices_conceptual,
index=current_voice_index,
key="el_voice_select_ultra")
# Store voice style for narration prompt
voice_styles_for_prompt = {"Cinematic Trailer": "cinematic_trailer", "Neutral Documentary": "documentary_neutral", "Character Introspection": "introspective_character"}
selected_prompt_voice_style_key = st.selectbox("Narration Script Style:", list(voice_styles_for_prompt.keys()), key="narration_style_select")
st.session_state.selected_voice_style_for_generation = voice_styles_for_prompt[selected_prompt_voice_style_key]
if st.button("Set Narrator Voice & Style", key="set_voice_btn_ultra"):
if hasattr(st.session_state, 'visual_engine'):
st.session_state.visual_engine.elevenlabs_voice_id = selected_el_voice
st.success(f"Narrator voice set to: {selected_el_voice}. Script style: {selected_prompt_voice_style_key}")
# --- Main Content Area ---
st.header("🎬 Cinematic Storyboard & Treatment")
if st.session_state.narration_script_display:
with st.expander("πŸ“œ View Full Narration Script", expanded=False):
st.markdown(f"> _{st.session_state.narration_script_display}_")
if not st.session_state.story_treatment_scenes:
st.info("Use the sidebar to generate your cinematic treatment.")
else:
for i_main, scene_content_display in enumerate(st.session_state.story_treatment_scenes):
scene_num = scene_content_display.get('scene_number', i_main + 1)
scene_title = scene_content_display.get('scene_title', 'Untitled Scene')
unique_key_base = f"scene_{scene_num}_{''.join(filter(str.isalnum, scene_title[:10]))}"
if "director_note" in scene_content_display and scene_content_display['director_note']:
st.info(f"🎬 Director's Note for Scene {scene_num}: {scene_content_display['director_note']}")
st.subheader(f"SCENE {scene_num}: {scene_title.upper()}")
col_details, col_visual = st.columns([0.45, 0.55])
with col_details:
with st.expander("πŸ“ Scene Treatment Details", expanded=True):
st.markdown(f"**Emotional Beat:** {scene_content_display.get('emotional_beat', 'N/A')}")
st.markdown(f"**Setting:** {scene_content_display.get('setting_description', 'N/A')}")
st.markdown(f"**Characters:** {', '.join(scene_content_display.get('characters_involved', ['N/A']))}")
st.markdown(f"**Character Focus Moment:** _{scene_content_display.get('character_focus_moment', 'N/A')}_")
st.markdown(f"**Key Plot Beat:** {scene_content_display.get('key_plot_beat', 'N/A')}")
st.markdown(f"**Dialogue Hook:** `\"{scene_content_display.get('suggested_dialogue_hook', '...')}\"`")
st.markdown("---")
st.markdown(f"**🎬 Director's Visual Style:** _{scene_content_display.get('PROACTIVE_visual_style_감독', 'N/A')}_")
st.markdown(f"**πŸŽ₯ Director's Camera Work:** _{scene_content_display.get('PROACTIVE_camera_work_감독', 'N/A')}_")
st.markdown(f"**πŸ”Š Director's Sound Design:** _{scene_content_display.get('PROACTIVE_sound_design_감독', 'N/A')}_")
current_dalle_prompt = st.session_state.scene_dalle_prompts[i_main] if i_main < len(st.session_state.scene_dalle_prompts) else None
if current_dalle_prompt:
with st.popover("πŸ‘οΈ View DALL-E Prompt"):
st.markdown(f"**Full DALL-E Prompt:**"); st.code(current_dalle_prompt, language='text')
pexels_query_display = scene_content_display.get('pexels_search_query_감독', None)
if pexels_query_display:
st.caption(f"Suggested Pexels Query for fallback: `{pexels_query_display}`")
with col_visual:
current_img_path = st.session_state.generated_visual_paths[i_main] if i_main < len(st.session_state.generated_visual_paths) else None
if current_img_path and os.path.exists(current_img_path):
st.image(current_img_path, caption=f"Visual Concept for Scene {scene_num}: {scene_title}", use_column_width='always')
else:
if st.session_state.story_treatment_scenes: st.caption("Visual concept for this scene is pending or failed.")
with st.popover(f"✏️ Edit Scene {scene_num} Treatment"):
feedback_script_edit = st.text_area("Describe changes to treatment details:", key=f"treat_feed_{unique_key_base}", height=150)
if st.button(f"πŸ”„ Update Scene {scene_num} Treatment", key=f"regen_treat_btn_{unique_key_base}"):
if feedback_script_edit:
with st.status(f"Updating Scene {scene_num} Treatment...", expanded=True) as status_treat_regen:
regen_prompt_text = create_scene_regeneration_prompt(scene_content_display, feedback_script_edit, st.session_state.story_treatment_scenes)
try:
updated_scene_data = st.session_state.gemini_handler.regenerate_scene_script_details(regen_prompt_text)
st.session_state.story_treatment_scenes[i_main] = updated_scene_data
status_treat_regen.update(label="Treatment updated! Regenerating visual...", state="running")
version_num = 1
if current_img_path:
try: base,_=os.path.splitext(os.path.basename(current_img_path)); version_num = int(base.split('_v')[-1])+1 if '_v' in base else 2
except: version_num=2
if generate_visual_for_scene_core(i_main, updated_scene_data, version=version_num):
status_treat_regen.update(label="Scene Treatment & Visual Updated! πŸŽ‰", state="complete", expanded=False)
else: status_treat_regen.update(label="Treatment updated, visual failed.", state="warning", expanded=False)
st.rerun()
except Exception as e: status_treat_regen.update(label=f"Error: {e}", state="error")
else: st.warning("Please provide feedback for treatment regeneration.")
with st.popover(f"🎨 Edit Scene {scene_num} Visual Prompt"):
dalle_prompt_to_edit = st.session_state.scene_dalle_prompts[i_main] if i_main < len(st.session_state.scene_dalle_prompts) else "No DALL-E prompt."
st.caption("Current DALL-E Prompt:"); st.code(dalle_prompt_to_edit, language='text')
feedback_visual_edit = st.text_area("Describe changes for the DALL-E prompt:", key=f"visual_feed_{unique_key_base}", height=150)
if st.button(f"πŸ”„ Update Scene {scene_num} Visual Prompt & Image", key=f"regen_visual_btn_{unique_key_base}"):
if feedback_visual_edit:
with st.status(f"Refining DALL-E prompt & regenerating visual...", expanded=True) as status_visual_edit_regen:
refinement_req_prompt = create_visual_regeneration_prompt(
dalle_prompt_to_edit, feedback_visual_edit, scene_content_display,
st.session_state.character_definitions, st.session_state.global_style_additions
)
try:
refined_dalle_prompt = st.session_state.gemini_handler.generate_image_prompt(refinement_req_prompt)
st.session_state.scene_dalle_prompts[i_main] = refined_dalle_prompt
status_visual_edit_regen.update(label="DALL-E prompt refined! Regenerating visual...", state="running")
version_num = 1
if current_img_path:
try: base,_=os.path.splitext(os.path.basename(current_img_path)); version_num = int(base.split('_v')[-1])+1 if '_v' in base else 2
except: version_num=2
# Pass current scene_content_display for Pexels fallback context
if generate_visual_for_scene_core(i_main, scene_content_display, version=version_num):
status_visual_edit_regen.update(label="Visual Updated! πŸŽ‰", state="complete", expanded=False)
else: status_visual_edit_regen.update(label="Prompt refined, visual failed.", state="warning", expanded=False)
st.rerun()
except Exception as e: status_visual_edit_regen.update(label=f"Error: {e}", state="error")
else: st.warning("Please provide feedback for visual prompt regeneration.")
st.markdown("---")
if st.session_state.story_treatment_scenes and any(p for p in st.session_state.generated_visual_paths if p is not None):
if st.button("🎬 Assemble Narrated Cinematic Animatic", key="assemble_ultra_video_btn", type="primary", use_container_width=True):
with st.status("Assembling Ultra Animatic...", expanded=True) as status_vid:
image_data_for_vid = []
for i_vid, scene_c in enumerate(st.session_state.story_treatment_scenes):
img_p = st.session_state.generated_visual_paths[i_vid] if i_vid < len(st.session_state.generated_visual_paths) else None
if img_p and os.path.exists(img_p):
image_data_for_vid.append({
'path':img_p, 'scene_num':scene_c.get('scene_number',i_vid+1),
'key_action':scene_c.get('key_plot_beat','')
}); status_vid.write(f"Adding Scene {scene_c.get('scene_number', i_vid + 1)} to video.")
if image_data_for_vid:
status_vid.write("Calling video engine...")
st.session_state.video_path = st.session_state.visual_engine.create_video_from_images(
image_data_for_vid,
overall_narration_path=st.session_state.overall_narration_audio_path,
output_filename="cinegen_ultra_animatic.mp4",
duration_per_image=5,
fps=24
)
if st.session_state.video_path and os.path.exists(st.session_state.video_path):
status_vid.update(label="Ultra animatic assembled! πŸŽ‰", state="complete", expanded=False); st.balloons()
else: status_vid.update(label="Video assembly failed. Check logs.", state="error", expanded=False)
else: status_vid.update(label="No valid images for video.", state="error", expanded=False)
elif st.session_state.story_treatment_scenes: st.info("Generate visuals before assembling video.")
if st.session_state.video_path and os.path.exists(st.session_state.video_path):
st.header("🎬 Generated Cinematic Animatic")
try:
with open(st.session_state.video_path, 'rb') as vf_obj: video_bytes = vf_obj.read()
st.video(video_bytes, format="video/mp4")
with open(st.session_state.video_path, "rb") as fp_dl:
st.download_button(label="Download Ultra Animatic", data=fp_dl,
file_name=os.path.basename(st.session_state.video_path), mime="video/mp4",
use_container_width=True, key="download_ultra_video_btn" )
except Exception as e: st.error(f"Error displaying video: {e}")
# --- Footer ---
st.sidebar.markdown("---")
st.sidebar.caption("CineGen AI Ultra+ | Visionary Cinematic Pre-Production")