CingenAI / app.py
mgbam's picture
Update app.py
d8cdb3b verified
raw
history blame
23 kB
# app.py
import streamlit as st
from core.gemini_handler import GeminiHandler
from core.visual_engine import VisualEngine
from core.prompt_engineering import (
create_cinematic_treatment_prompt,
construct_dalle_prompt,
create_narration_script_prompt_enhanced,
create_scene_regeneration_prompt,
create_visual_regeneration_prompt
)
import os
# --- Configuration & Initialization ---
st.set_page_config(page_title="CineGen AI Ultra+", layout="wide", initial_sidebar_state="expanded")
# --- Global State Variables & API Key Setup ---
def load_api_key(key_name_streamlit, key_name_env):
key = None; secrets_available = hasattr(st, 'secrets')
try:
if secrets_available and key_name_streamlit in st.secrets: key = st.secrets[key_name_streamlit]
except Exception: pass
if not key and key_name_env in os.environ: key = os.environ[key_name_env]
return key
if 'keys_loaded' not in st.session_state:
st.session_state.GEMINI_API_KEY = load_api_key("GEMINI_API_KEY", "GEMINI_API_KEY")
st.session_state.OPENAI_API_KEY = load_api_key("OPENAI_API_KEY", "OPENAI_API_KEY")
st.session_state.ELEVENLABS_API_KEY = load_api_key("ELEVENLABS_API_KEY", "ELEVENLABS_API_KEY")
st.session_state.PEXELS_API_KEY = load_api_key("PEXELS_API_KEY", "PEXELS_API_KEY")
if not st.session_state.GEMINI_API_KEY: st.error("Gemini API Key is essential and missing!"); st.stop()
st.session_state.gemini_handler = GeminiHandler(api_key=st.session_state.GEMINI_API_KEY)
st.session_state.visual_engine = VisualEngine(output_dir="temp_cinegen_media")
st.session_state.visual_engine.set_openai_api_key(st.session_state.OPENAI_API_KEY)
st.session_state.visual_engine.set_elevenlabs_api_key(st.session_state.ELEVENLABS_API_KEY)
st.session_state.visual_engine.set_pexels_api_key(st.session_state.PEXELS_API_KEY)
st.session_state.keys_loaded = True
for key, default_val in [
('story_treatment_scenes', []), ('scene_dalle_prompts', []), ('generated_visual_paths', []),
('video_path', None), ('character_definitions', {}), ('global_style_additions', ""),
('overall_narration_audio_path', None), ('narration_script_display', "")
]:
if key not in st.session_state: st.session_state[key] = default_val
# --- Helper Functions ---
def initialize_new_project():
st.session_state.story_treatment_scenes = []
st.session_state.scene_dalle_prompts = []
st.session_state.generated_visual_paths = []
st.session_state.video_path = None
st.session_state.overall_narration_audio_path = None
st.session_state.narration_script_display = ""
def generate_visual_for_scene_core(scene_index, scene_data, version=1):
dalle_prompt = construct_dalle_prompt(
scene_data,
st.session_state.character_definitions,
st.session_state.global_style_additions
)
if not dalle_prompt: return False
while len(st.session_state.scene_dalle_prompts) <= scene_index: st.session_state.scene_dalle_prompts.append("")
while len(st.session_state.generated_visual_paths) <= scene_index: st.session_state.generated_visual_paths.append(None)
st.session_state.scene_dalle_prompts[scene_index] = dalle_prompt
filename = f"scene_{scene_data.get('scene_number', scene_index+1)}_visual_v{version}.png"
img_path = st.session_state.visual_engine.generate_image_visual(dalle_prompt, scene_data, filename)
if img_path and os.path.exists(img_path):
st.session_state.generated_visual_paths[scene_index] = img_path; return True
else:
st.session_state.generated_visual_paths[scene_index] = None; return False
# --- UI Sidebar ---
with st.sidebar:
st.title("🎬 CineGen AI Ultra+")
st.markdown("### Creative Seed")
user_idea = st.text_area("Core Story Idea / Theme:", "A lone wanderer searches for a mythical oasis in a vast, post-apocalyptic desert, haunted by mirages and mechanical scavengers.", height=120, key="user_idea_main")
genre = st.selectbox("Primary Genre:", ["Cyberpunk", "Sci-Fi", "Fantasy", "Noir", "Thriller", "Western", "Post-Apocalyptic"], index=6, key="genre_main")
mood = st.selectbox("Overall Mood:", ["Hopeful yet Desperate", "Mysterious & Eerie", "Gritty & Tense", "Epic & Awe-Inspiring", "Melancholy & Reflective"], index=0, key="mood_main")
num_scenes = st.slider("Number of Key Scenes:", 1, 4, 2, key="num_scenes_main")
creative_guidance_options = {"Standard": "standard", "More Artistic": "more_artistic", "Experimental Narrative": "experimental_narrative"}
selected_creative_guidance = st.selectbox("AI Creative Guidance Level:", options=list(creative_guidance_options.keys()), key="creative_guidance_select")
if st.button("🌌 Generate Cinematic Treatment", type="primary", key="generate_treatment_btn", use_container_width=True):
initialize_new_project()
if not user_idea.strip(): st.warning("Please provide a story idea.")
else:
with st.status("AI Director is envisioning your masterpiece...", expanded=True) as status:
status.write("Phase 1: Gemini crafting cinematic treatment (scenes, style, camera, sound)... πŸ“œ")
treatment_prompt = create_cinematic_treatment_prompt(user_idea, genre, mood, num_scenes, creative_guidance_options[selected_creative_guidance])
try:
st.session_state.story_treatment_scenes = st.session_state.gemini_handler.generate_story_breakdown(treatment_prompt)
num_gen_scenes = len(st.session_state.story_treatment_scenes)
st.session_state.scene_dalle_prompts = [""] * num_gen_scenes
st.session_state.generated_visual_paths = [None] * num_gen_scenes
status.update(label="Treatment complete! Generating visuals...", state="running")
visual_successes = 0
for i_scene, scene_content in enumerate(st.session_state.story_treatment_scenes):
status.write(f" Creating visual for Scene {scene_content.get('scene_number', i_scene+1)}: {scene_content.get('scene_title','')}...")
if generate_visual_for_scene_core(i_scene, scene_content, version=1): visual_successes += 1
if visual_successes == 0 and num_gen_scenes > 0:
status.update(label="Visual generation failed for all scenes. Check API keys/quota.", state="error", expanded=False); st.stop()
status.update(label="Visuals ready! Generating narration script...", state="running")
narration_prompt = create_narration_script_prompt_enhanced(st.session_state.story_treatment_scenes, mood, genre, st.session_state.get("selected_voice_style", "cinematic_trailer"))
narr_script = st.session_state.gemini_handler.generate_image_prompt(narration_prompt)
st.session_state.narration_script_display = narr_script
status.update(label="Narration script ready! Synthesizing voice...", state="running")
st.session_state.overall_narration_audio_path = st.session_state.visual_engine.generate_narration_audio(narr_script)
if st.session_state.overall_narration_audio_path: status.update(label="Voiceover ready! ✨", state="running")
else: status.update(label="Voiceover failed. Video will be silent.", state="warning")
status.update(label="All components ready! View storyboard. πŸš€", state="complete", expanded=False)
except Exception as e: status.update(label=f"Error during generation: {e}", state="error", expanded=True); st.stop()
st.markdown("---")
st.markdown("### Fine-Tuning Options")
with st.expander("Define Characters", expanded=False):
char_name_input = st.text_input("Character Name", key="char_name_adv_input_ultra")
char_desc_input = st.text_area("Detailed Visual Description", key="char_desc_adv_input_ultra", height=100, placeholder="e.g., Jax: rugged male astronaut, mid-40s, salt-and-pepper short hair, cybernetic left eye glowing faintly blue, wearing a patched-up crimson flight suit.")
if st.button("Save Character", key="add_char_adv_btn_ultra"):
if char_name_input and char_desc_input: st.session_state.character_definitions[char_name_input.strip().lower()] = char_desc_input.strip(); st.success(f"Character '{char_name_input.strip()}' saved.")
else: st.warning("Provide name and description.")
if st.session_state.character_definitions:
st.caption("Current Characters:")
for k,v in st.session_state.character_definitions.items(): st.markdown(f"**{k.title()}:** _{v}_")
with st.expander("Global Style Overrides", expanded=False):
predefined_styles = { "Default (Director's Choice)": "", "Hyper-Realistic Gritty Noir": "hyper-realistic gritty neo-noir, extreme detail, deep dynamic shadows, complex reflections on wet surfaces, cinematic film grain, desaturated palette with isolated vibrant neon accents (e.g. red, cyan), anamorphic lens distortion, atmospheric haze.", "Surreal Dreamscape Fantasy": "surreal dreamscape, epic fantasy elements, painterly with photorealistic details, impossible architecture, bioluminescent flora, otherworldly color palette (e.g., magenta skies, turquoise rivers), style of Roger Dean meets ZdzisΕ‚aw BeksiΕ„ski.", "Vintage Analog Sci-Fi": "70s/80s analog sci-fi film aesthetic, tangible practical effects look, subtle light leaks, lens flares, warm filmic tones mixed with cool blues, detailed retro-futuristic technology with chunky buttons and CRT screens."}
selected_preset = st.selectbox("Base Style Preset:", options=list(predefined_styles.keys()), key="style_preset_select_adv_ultra")
custom_keywords = st.text_area("Additional Custom Style Keywords:", key="custom_style_keywords_adv_input_ultra", height=80, placeholder="e.g., 'shot with Arri Alexa, shallow depth of field, golden hour tones'")
current_style_desc = st.session_state.global_style_additions
if st.button("Apply Global Styles", key="apply_styles_adv_btn_ultra"):
final_desc = predefined_styles[selected_preset];
if custom_keywords.strip(): final_desc = f"{final_desc}, {custom_keywords.strip()}" if final_desc else custom_keywords.strip()
st.session_state.global_style_additions = final_desc.strip(); current_style_desc = final_desc.strip()
if current_style_desc: st.success("Global styles applied!")
else: st.info("Global style additions cleared (using Director's per-scene choice).")
if current_style_desc: st.caption(f"Active global style additions: \"{current_style_desc}\"")
with st.expander("Voice Customization (ElevenLabs)", expanded=False):
elevenlabs_voices = ["Rachel", "Adam", "Bella", "Antoni", "Elli", "Josh", "Arnold", "Domi", "Fin", "Sarah"]
current_el_voice = st.session_state.visual_engine.elevenlabs_voice_id if hasattr(st.session_state, 'visual_engine') else "Rachel"
selected_el_voice = st.selectbox("Narrator Voice:", elevenlabs_voices, index=elevenlabs_voices.index(current_el_voice) if current_el_voice in elevenlabs_voices else 0, key="el_voice_select_ultra")
if st.button("Set Narrator Voice", key="set_voice_btn_ultra"):
if hasattr(st.session_state, 'visual_engine'): st.session_state.visual_engine.elevenlabs_voice_id = selected_el_voice
st.success(f"Narrator voice set to: {selected_el_voice}")
# --- Main Content Area ---
st.header("🎬 Cinematic Storyboard & Treatment")
if st.session_state.narration_script_display:
with st.expander("πŸ“œ View Full Narration Script", expanded=False):
st.markdown(f"> _{st.session_state.narration_script_display}_")
if not st.session_state.story_treatment_scenes:
st.info("Use the sidebar to generate your cinematic treatment.")
else:
for i_main, scene_content_display in enumerate(st.session_state.story_treatment_scenes):
scene_num = scene_content_display.get('scene_number', i_main + 1)
scene_title = scene_content_display.get('scene_title', 'Untitled Scene')
unique_key_base = f"scene_{scene_num}_{''.join(filter(str.isalnum, scene_title[:10]))}"
if "director_note" in scene_content_display:
st.info(f"🎬 Director's Note for Scene {scene_num}: {scene_content_display['director_note']}")
st.subheader(f"SCENE {scene_num}: {scene_title.upper()}")
col_details, col_visual = st.columns([0.45, 0.55])
with col_details:
with st.expander("πŸ“ Scene Treatment Details", expanded=True):
st.markdown(f"**Emotional Beat:** {scene_content_display.get('emotional_beat', 'N/A')}")
st.markdown(f"**Setting:** {scene_content_display.get('setting_description', 'N/A')}")
st.markdown(f"**Characters:** {', '.join(scene_content_display.get('characters_involved', ['N/A']))}")
st.markdown(f"**Character Focus Moment:** _{scene_content_display.get('character_focus_moment', 'N/A')}_")
st.markdown(f"**Key Plot Beat:** {scene_content_display.get('key_plot_beat', 'N/A')}")
st.markdown(f"**Dialogue Hook:** `\"{scene_content_display.get('suggested_dialogue_hook', '...')}\"`")
st.markdown("---")
st.markdown(f"**🎬 Director's Visual Style:** {scene_content_display.get('PROACTIVE_visual_style_감독', 'N/A')}")
st.markdown(f"**πŸŽ₯ Director's Camera Work:** {scene_content_display.get('PROACTIVE_camera_work_감독', 'N/A')}")
st.markdown(f"**πŸ”Š Director's Sound Design:** {scene_content_display.get('PROACTIVE_sound_design_감독', 'N/A')}")
current_dalle_prompt = st.session_state.scene_dalle_prompts[i_main] if i_main < len(st.session_state.scene_dalle_prompts) else None
if current_dalle_prompt:
with st.popover("πŸ‘οΈ View DALL-E Prompt"):
st.markdown(f"**Full DALL-E Prompt:**"); st.code(current_dalle_prompt, language='text')
pexels_query_display = scene_content_display.get('pexels_search_query_감독', None)
if pexels_query_display:
st.caption(f"Suggested Pexels Query: `{pexels_query_display}`")
with col_visual:
current_img_path = st.session_state.generated_visual_paths[i_main] if i_main < len(st.session_state.generated_visual_paths) else None
if current_img_path and os.path.exists(current_img_path):
st.image(current_img_path, caption=f"Visual Concept for Scene {scene_num}: {scene_title}", use_column_width='always')
else:
if st.session_state.story_treatment_scenes: st.caption("Visual concept pending or failed.")
with st.popover(f"✏️ Edit Scene {scene_num} Treatment"):
feedback_script_edit = st.text_area("Describe changes to treatment details:", key=f"treat_feed_{unique_key_base}", height=150)
if st.button(f"πŸ”„ Update Scene {scene_num} Treatment", key=f"regen_treat_btn_{unique_key_base}"):
if feedback_script_edit:
with st.status(f"Updating Scene {scene_num} Treatment...", expanded=True) as status_treat_regen:
regen_prompt_text = create_scene_regeneration_prompt(scene_content_display, feedback_script_edit, st.session_state.story_treatment_scenes)
try:
updated_scene_data = st.session_state.gemini_handler.regenerate_scene_script_details(regen_prompt_text)
st.session_state.story_treatment_scenes[i_main] = updated_scene_data
status_treat_regen.update(label="Treatment updated! Regenerating visual & DALL-E prompt...", state="running")
version_num = 1
if current_img_path:
try:
base,_=os.path.splitext(os.path.basename(current_img_path))
if '_v' in base: version_num = int(base.split('_v')[-1])+1
else: version_num = 2 # If no _v, start with v2 for regenerated
except: version_num=2 # Fallback if parsing fails
if generate_visual_for_scene_core(i_main, updated_scene_data, version=version_num):
status_treat_regen.update(label="Scene Treatment & Visual Updated! πŸŽ‰", state="complete", expanded=False)
else: status_treat_regen.update(label="Treatment updated, visual failed.", state="warning", expanded=False)
st.rerun()
except Exception as e: status_treat_regen.update(label=f"Error: {e}", state="error")
else: st.warning("Please provide feedback for treatment regeneration.")
with st.popover(f"🎨 Edit Scene {scene_num} Visual Prompt"):
dalle_prompt_to_edit = st.session_state.scene_dalle_prompts[i_main] if i_main < len(st.session_state.scene_dalle_prompts) else "No DALL-E prompt."
st.caption("Current DALL-E Prompt:"); st.code(dalle_prompt_to_edit, language='text')
feedback_visual_edit = st.text_area("Describe changes for the DALL-E prompt:", key=f"visual_feed_{unique_key_base}", height=150)
if st.button(f"πŸ”„ Update Scene {scene_num} Visual Prompt & Image", key=f"regen_visual_btn_{unique_key_base}"):
if feedback_visual_edit:
with st.status(f"Refining DALL-E prompt & regenerating visual...", expanded=True) as status_visual_edit_regen:
refinement_req_prompt = create_visual_regeneration_prompt(
dalle_prompt_to_edit, feedback_visual_edit, scene_content_display,
st.session_state.character_definitions, st.session_state.global_style_additions
)
try:
refined_dalle_prompt = st.session_state.gemini_handler.generate_image_prompt(refinement_req_prompt)
st.session_state.scene_dalle_prompts[i_main] = refined_dalle_prompt
status_visual_edit_regen.update(label="DALL-E prompt refined! Regenerating visual...", state="running")
version_num = 1
if current_img_path:
try:
base,_=os.path.splitext(os.path.basename(current_img_path))
if '_v' in base: version_num = int(base.split('_v')[-1])+1
else: version_num = 2
except: version_num=2
if generate_visual_for_scene_core(i_main, scene_content_display, version=version_num):
status_visual_edit_regen.update(label="Visual Updated! πŸŽ‰", state="complete", expanded=False)
else: status_visual_edit_regen.update(label="Prompt refined, visual failed.", state="warning", expanded=False)
st.rerun()
except Exception as e: status_visual_edit_regen.update(label=f"Error: {e}", state="error")
else: st.warning("Please provide feedback for visual prompt regeneration.")
st.markdown("---")
if st.session_state.story_treatment_scenes and any(p for p in st.session_state.generated_visual_paths if p is not None):
if st.button("🎬 Assemble Narrated Cinematic Animatic", key="assemble_ultra_video_btn", type="primary", use_container_width=True):
with st.status("Assembling Ultra Animatic...", expanded=True) as status_vid:
image_data_for_vid = []
for i_vid, scene_c in enumerate(st.session_state.story_treatment_scenes):
img_p = st.session_state.generated_visual_paths[i_vid] if i_vid < len(st.session_state.generated_visual_paths) else None
if img_p and os.path.exists(img_p):
image_data_for_vid.append({
'path':img_p, 'scene_num':scene_c.get('scene_number',i_vid+1),
'key_action':scene_c.get('key_plot_beat','')
}); status_vid.write(f"Adding Scene {scene_c.get('scene_number', i_vid + 1)} to video.")
if image_data_for_vid:
status_vid.write("Calling video engine...")
st.session_state.video_path = st.session_state.visual_engine.create_video_from_images(
image_data_for_vid,
overall_narration_path=st.session_state.overall_narration_audio_path,
output_filename="cinegen_ultra_animatic.mp4",
duration_per_image=5,
fps=24
)
if st.session_state.video_path and os.path.exists(st.session_state.video_path):
status_vid.update(label="Ultra animatic assembled! πŸŽ‰", state="complete", expanded=False); st.balloons()
else: status_vid.update(label="Video assembly failed. Check logs.", state="error", expanded=False)
else: status_vid.update(label="No valid images for video.", state="error", expanded=False)
elif st.session_state.story_treatment_scenes: st.info("Generate visuals before assembling video.")
if st.session_state.video_path and os.path.exists(st.session_state.video_path):
st.header("🎬 Generated Cinematic Animatic")
try:
with open(st.session_state.video_path, 'rb') as vf_obj: video_bytes = vf_obj.read()
st.video(video_bytes, format="video/mp4")
with open(st.session_state.video_path, "rb") as fp_dl:
st.download_button(label="Download Ultra Animatic", data=fp_dl,
file_name=os.path.basename(st.session_state.video_path), mime="video/mp4",
use_container_width=True, key="download_ultra_video_btn" )
except Exception as e: st.error(f"Error displaying video: {e}")
# --- Footer ---
st.sidebar.markdown("---")
st.sidebar.caption("CineGen AI Ultra+ | Visionary Cinematic Pre-Production")