# core/visual_engine.py from PIL import Image, ImageDraw, ImageFont, ImageOps import base64 # For Data URI conversion # --- MONKEY PATCH --- try: if hasattr(Image, 'Resampling') and hasattr(Image.Resampling, 'LANCZOS'): if not hasattr(Image, 'ANTIALIAS'): Image.ANTIALIAS = Image.Resampling.LANCZOS elif hasattr(Image, 'LANCZOS'): if not hasattr(Image, 'ANTIALIAS'): Image.ANTIALIAS = Image.LANCZOS elif not hasattr(Image, 'ANTIALIAS'): print("WARNING: Pillow ANTIALIAS/Resampling issue.") except Exception as e_mp: print(f"WARNING: ANTIALIAS patch error: {e_mp}") from moviepy.editor import (ImageClip, VideoFileClip, concatenate_videoclips, TextClip, CompositeVideoClip, AudioFileClip) import moviepy.video.fx.all as vfx import numpy as np import os import openai import requests import io import time import random import logging import mimetypes # For Data URI logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) # --- SERVICE CLIENT IMPORTS --- ELEVENLABS_CLIENT_IMPORTED = False; ElevenLabsAPIClient = None; Voice = None; VoiceSettings = None try: from elevenlabs.client import ElevenLabs as ImportedElevenLabsClient from elevenlabs import Voice as ImportedVoice, VoiceSettings as ImportedVoiceSettings ElevenLabsAPIClient = ImportedElevenLabsClient; Voice = ImportedVoice; VoiceSettings = ImportedVoiceSettings ELEVENLABS_CLIENT_IMPORTED = True; logger.info("ElevenLabs client components imported.") except Exception as e_eleven: logger.warning(f"ElevenLabs client import failed: {e_eleven}. Audio disabled.") RUNWAYML_SDK_IMPORTED = False; RunwayMLAPIClient = None # Renamed for clarity try: from runwayml import RunwayML as ImportedRunwayMLClient # Actual SDK import RunwayMLAPIClient = ImportedRunwayMLClient RUNWAYML_SDK_IMPORTED = True logger.info("RunwayML SDK imported successfully.") except ImportError: logger.warning("RunwayML SDK not found (pip install runwayml). RunwayML video generation will be disabled.") except Exception as e_runway_sdk: logger.warning(f"Error importing RunwayML SDK: {e_runway_sdk}. RunwayML features disabled.") class VisualEngine: def __init__(self, output_dir="temp_cinegen_media", default_elevenlabs_voice_id="Rachel"): self.output_dir = output_dir os.makedirs(self.output_dir, exist_ok=True) self.font_filename = "DejaVuSans-Bold.ttf" font_paths_to_try = [ self.font_filename, "/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf", "/usr/share/fonts/truetype/liberation/LiberationSans-Bold.ttf", "/System/Library/Fonts/Supplemental/Arial.ttf", "C:/Windows/Fonts/arial.ttf", f"/usr/local/share/fonts/truetype/mycustomfonts/arial.ttf"] self.font_path_pil = next((p for p in font_paths_to_try if os.path.exists(p)), None) self.font_size_pil = 20; self.video_overlay_font_size = 30; self.video_overlay_font_color = 'white' self.video_overlay_font = 'DejaVu-Sans-Bold' try: self.font = ImageFont.truetype(self.font_path_pil, self.font_size_pil) if self.font_path_pil else ImageFont.load_default() if self.font_path_pil: logger.info(f"Pillow font: {self.font_path_pil}.") else: logger.warning("Default Pillow font."); self.font_size_pil = 10 except IOError as e_font: logger.error(f"Pillow font IOError: {e_font}. Default."); self.font = ImageFont.load_default(); self.font_size_pil = 10 self.openai_api_key = None; self.USE_AI_IMAGE_GENERATION = False; self.dalle_model = "dall-e-3"; self.image_size_dalle3 = "1792x1024" self.video_frame_size = (1280, 720) # Default, will be mapped to Runway ratio self.elevenlabs_api_key = None; self.USE_ELEVENLABS = False; self.elevenlabs_client = None; self.elevenlabs_voice_id = default_elevenlabs_voice_id if VoiceSettings and ELEVENLABS_CLIENT_IMPORTED: self.elevenlabs_voice_settings = VoiceSettings(stability=0.60, similarity_boost=0.80, style=0.15, use_speaker_boost=True) else: self.elevenlabs_voice_settings = None self.pexels_api_key = None; self.USE_PEXELS = False self.runway_api_key = None; self.USE_RUNWAYML = False; self.runway_client = None if RUNWAYML_SDK_IMPORTED and RunwayMLAPIClient: # Initialize if SDK is available try: # The SDK expects RUNWAYML_API_SECRET env var. # If your key is passed directly, you might need to initialize differently or set the env var. if os.getenv("RUNWAYML_API_SECRET"): self.runway_client = RunwayMLAPIClient() logger.info("RunwayML Client initialized using RUNWAYML_API_SECRET env var.") else: logger.warning("RUNWAYML_API_SECRET env var not set. RunwayML client not initialized here (will try in set_runway_api_key).") except Exception as e_runway_init: logger.error(f"Failed to initialize RunwayML client during __init__: {e_runway_init}", exc_info=True) logger.info("VisualEngine initialized.") def set_openai_api_key(self,k): self.openai_api_key=k; self.USE_AI_IMAGE_GENERATION=bool(k); logger.info(f"DALL-E ({self.dalle_model}) {'Ready.' if k else 'Disabled.'}") def set_elevenlabs_api_key(self,api_key, voice_id_from_secret=None): self.elevenlabs_api_key=api_key if voice_id_from_secret: self.elevenlabs_voice_id = voice_id_from_secret if api_key and ELEVENLABS_CLIENT_IMPORTED and ElevenLabsAPIClient: # This API key is for the client try: self.elevenlabs_client = ElevenLabsAPIClient(api_key=api_key); self.USE_ELEVENLABS=bool(self.elevenlabs_client); logger.info(f"ElevenLabs Client {'Ready' if self.USE_ELEVENLABS else 'Failed Init'} (Voice ID: {self.elevenlabs_voice_id}).") except Exception as e: logger.error(f"ElevenLabs client init error: {e}. Disabled.", exc_info=True); self.USE_ELEVENLABS=False else: self.USE_ELEVENLABS=False; logger.info("ElevenLabs Disabled (no key or SDK issue).") def set_pexels_api_key(self,k): self.pexels_api_key=k; self.USE_PEXELS=bool(k); logger.info(f"Pexels Search {'Ready.' if k else 'Disabled.'}") def set_runway_api_key(self, k): # For RunwayML self.runway_api_key = k # Store the key if k: if RUNWAYML_SDK_IMPORTED and RunwayMLAPIClient: if not self.runway_client: # If not initialized in __init__ try: # Ensure RUNWAYML_API_SECRET is set if SDK relies on it if not os.getenv("RUNWAYML_API_SECRET") and k: logger.info("Setting RUNWAYML_API_SECRET environment variable from provided key for SDK.") os.environ["RUNWAYML_API_SECRET"] = k # Make key available to SDK self.runway_client = RunwayMLAPIClient() self.USE_RUNWAYML = True logger.info("RunwayML Client initialized successfully via set_runway_api_key.") except Exception as e_client_init: logger.error(f"RunwayML Client initialization failed in set_runway_api_key: {e_client_init}", exc_info=True) self.USE_RUNWAYML = False else: # Client was already initialized (e.g., from env var in __init__) self.USE_RUNWAYML = True logger.info("RunwayML Client already initialized.") else: # SDK not imported logger.warning("RunwayML SDK not imported. API key set, but direct HTTP calls would be needed (not implemented).") self.USE_RUNWAYML = False # Can't use if SDK is the only implemented path else: self.USE_RUNWAYML = False logger.info("RunwayML Disabled (no API key provided to set_runway_api_key).") def _image_to_data_uri(self, image_path): try: mime_type, _ = mimetypes.guess_type(image_path) if not mime_type: # Fallback for common image types if mimetypes fails (e.g., on some systems) ext = os.path.splitext(image_path)[1].lower() if ext == ".png": mime_type = "image/png" elif ext in [".jpg", ".jpeg"]: mime_type = "image/jpeg" else: logger.warning(f"Could not determine MIME type for {image_path}. Defaulting to application/octet-stream.") mime_type = "application/octet-stream" # Fallback, Runway might reject this with open(image_path, "rb") as image_file: encoded_string = base64.b64encode(image_file.read()).decode('utf-8') data_uri = f"data:{mime_type};base64,{encoded_string}" logger.debug(f"Generated data URI for {image_path} (first 100 chars): {data_uri[:100]}") return data_uri except Exception as e: logger.error(f"Error converting image {image_path} to data URI: {e}", exc_info=True) return None def _map_resolution_to_runway_ratio(self, width, height): # Gen-4 supports specific ratios. Find the closest supported or default. # Example: 1280x720 -> "1280:720" # This needs to be robust. For now, we'll assume app.py sends a valid W:H string # or we use a default that matches self.video_frame_size if it's standard. if width == 1280 and height == 720: return "1280:720" if width == 720 and height == 1280: return "720:1280" # Add more mappings based on Gen-4 supported ratios if your self.video_frame_size can vary logger.warning(f"Unsupported resolution {width}x{height} for Runway Gen-4 mapping. Defaulting to 1280:720.") return "1280:720" # Default def _generate_video_clip_with_runwayml(self, text_prompt_for_motion, input_image_path, scene_identifier_filename_base, target_duration_seconds=5): if not self.USE_RUNWAYML or not self.runway_client: # Check for initialized client logger.warning("RunwayML not enabled or client not initialized. Cannot generate video clip.") return None if not input_image_path or not os.path.exists(input_image_path): logger.error(f"Runway Gen-4 requires an input image. Path not provided or invalid: {input_image_path}") return None image_data_uri = self._image_to_data_uri(input_image_path) if not image_data_uri: return None runway_duration = 10 if target_duration_seconds > 7 else 5 # Map to 5s or 10s runway_ratio_str = self._map_resolution_to_runway_ratio(self.video_frame_size[0], self.video_frame_size[1]) output_video_filename = scene_identifier_filename_base.replace(".png", f"_runway_gen4_d{runway_duration}s.mp4") output_video_filepath = os.path.join(self.output_dir, output_video_filename) logger.info(f"Initiating Runway Gen-4 task: motion='{text_prompt_for_motion[:100]}...', image='{os.path.basename(input_image_path)}', dur={runway_duration}s, ratio='{runway_ratio_str}'") try: task = self.runway_client.image_to_video.create( model='gen4_turbo', prompt_image=image_data_uri, prompt_text=text_prompt_for_motion, duration=runway_duration, ratio=runway_ratio_str, # e.g., "1280:720" # seed=random.randint(0, 4294967295), # Optional # Other Gen-4 params can be added here: motion_score, upscale etc. ) logger.info(f"Runway Gen-4 task created with ID: {task.id}. Polling for completion...") poll_interval = 10 # seconds max_polls = 36 # Max 6 minutes (36 * 10s) for _ in range(max_polls): time.sleep(poll_interval) task_details = self.runway_client.tasks.retrieve(id=task.id) logger.info(f"Runway task {task.id} status: {task_details.status}") if task_details.status == 'SUCCEEDED': # The SDK docs don't explicitly show how to get the output URL from `task_details`. # Common patterns are `task_details.output.url` or `task_details.artifacts[0].url`. # This is a GUESS based on typical API structures. You MUST verify this. output_url = None if hasattr(task_details, 'output') and task_details.output and hasattr(task_details.output, 'url'): output_url = task_details.output.url elif hasattr(task_details, 'artifacts') and task_details.artifacts and isinstance(task_details.artifacts, list) and len(task_details.artifacts) > 0: # Assuming the first artifact is the video and has a URL if hasattr(task_details.artifacts[0], 'url'): output_url = task_details.artifacts[0].url elif hasattr(task_details.artifacts[0], 'download_url'): # Another common name output_url = task_details.artifacts[0].download_url if not output_url: logger.error(f"Runway task {task.id} SUCCEEDED, but no output URL found in task details: {task_details}") # Attempt to log the full task_details object for inspection try: logger.error(f"Full task details: {vars(task_details)}") except: pass return None logger.info(f"Runway task {task.id} SUCCEEDED. Downloading video from: {output_url}") video_response = requests.get(output_url, stream=True, timeout=300) # 5 min timeout for download video_response.raise_for_status() with open(output_video_filepath, 'wb') as f: for chunk in video_response.iter_content(chunk_size=8192): f.write(chunk) logger.info(f"Runway Gen-4 video successfully downloaded and saved to: {output_video_filepath}") return output_video_filepath elif task_details.status in ['FAILED', 'ABORTED']: error_message = "Unknown error" if hasattr(task_details, 'error_message') and task_details.error_message: error_message = task_details.error_message elif hasattr(task_details, 'output') and hasattr(task_details.output, 'error') and task_details.output.error: error_message = task_details.output.error logger.error(f"Runway task {task.id} status: {task_details.status}. Error: {error_message}") return None logger.warning(f"Runway task {task.id} timed out after {max_polls * poll_interval} seconds.") return None except AttributeError as ae: # If SDK methods are not as expected logger.error(f"AttributeError with RunwayML SDK: {ae}. Ensure SDK is up to date and methods match.", exc_info=True) return None except Exception as e_runway: logger.error(f"Error during Runway Gen-4 API call or processing: {e_runway}", exc_info=True) return None # --- Other helper methods (_get_text_dimensions, _create_placeholder_image_content, _search_pexels_image, _create_placeholder_video_content) --- # --- Keep these as they were in the previous full rewrite unless they need minor adjustments for the Gen-4 workflow --- def _get_text_dimensions(self,tc,fo): di=fo.size if hasattr(fo,'size') else self.font_size_pil; return (0,di) if not tc else (lambda b:(b[2]-b[0],b[3]-b[1] if b[3]-b[1]>0 else di))(fo.getbbox(tc)) if hasattr(fo,'getbbox') else (lambda s:(s[0],s[1] if s[1]>0 else di))(fo.getsize(tc)) if hasattr(fo,'getsize') else (int(len(tc)*di*0.6),int(di*1.2)) def _create_placeholder_image_content(self,td,fn,sz=None): if sz is None: sz = self.video_frame_size; img=Image.new('RGB',sz,color=(20,20,40));d=ImageDraw.Draw(img);pd=25;mw=sz[0]-(2*pd);ls=[]; if not td: td="(Placeholder Image)" ws=td.split();cl="" for w in ws: tl=cl+w+" ";raw_w,_=self._get_text_dimensions(tl,self.font);check_w=raw_w if raw_w > 0 else len(tl)*(self.font_size_pil*0.6); # Corrected w to check_w if check_w<=mw:cl=tl;else: # Corrected w to check_w if cl:ls.append(cl.strip());cl=w+" " if cl.strip():ls.append(cl.strip()) if not ls and td:ls.append(td[:int(mw//(self._get_text_dimensions("A",self.font)[0]or 10))]+"..." if td else "(Text too long)");elif not ls:ls.append("(Placeholder Error)") _,slh=self._get_text_dimensions("Ay",self.font);slh=slh if slh>0 else self.font_size_pil+2;mld=min(len(ls),(sz[1]-(2*pd))//(slh+2)) if slh>0 else 1; if mld<=0:mld=1;yts=pd+(sz[1]-(2*pd)-mld*(slh+2))/2.0;yt=yts for i in range(mld):lc=ls[i];lw,_=self._get_text_dimensions(lc,self.font);xt=(sz[0]-lw)/2.0;d.text((xt,yt),lc,font=self.font,fill=(200,200,180));yt+=slh+2 if i==6 and mld>7:d.text((xt,yt),"...",font=self.font,fill=(200,200,180));break fp=os.path.join(self.output_dir,fn); try:img.save(fp);return fp except Exception as e:logger.error(f"Save placeholder img {fp}: {e}",exc_info=True);return None def _search_pexels_image(self, q, ofnb): if not self.USE_PEXELS or not self.pexels_api_key: return None; h={"Authorization":self.pexels_api_key};p={"query":q,"per_page":1,"orientation":"landscape","size":"large2x"} pfn=ofnb.replace(".png",f"_pexels_{random.randint(1000,9999)}.jpg").replace(".mp4",f"_pexels_{random.randint(1000,9999)}.jpg");fp=os.path.join(self.output_dir,pfn) try: logger.info(f"Pexels search: '{q}'");eq=" ".join(q.split()[:5]);p["query"]=eq;r=requests.get("https://api.pexels.com/v1/search",headers=h,params=p,timeout=20) r.raise_for_status();d=r.json() if d.get("photos") and len(d["photos"])>0:pu=d["photos"][0]["src"]["large2x"];ir=requests.get(pu,timeout=60);ir.raise_for_status();id_img=Image.open(io.BytesIO(ir.content)) # Renamed id to id_img if id_img.mode!='RGB':id_img=id_img.convert('RGB');id_img.save(fp);logger.info(f"Pexels saved: {fp}");return fp else: logger.info(f"No Pexels for: '{eq}'") except Exception as e:logger.error(f"Pexels error ('{q}'): {e}",exc_info=True);return None def _create_placeholder_video_content(self, td, fn, dur=4, sz=None): # Generic placeholder if sz is None: sz = self.video_frame_size; fp = os.path.join(self.output_dir, fn); tc = None try: tc = TextClip(td, fontsize=50, color='white', font=self.video_overlay_font, bg_color='black', size=sz, method='caption').set_duration(dur) tc.write_videofile(fp, fps=24, codec='libx264', preset='ultrafast', logger=None, threads=2) logger.info(f"Generic placeholder video: {fp}"); return fp except Exception as e: logger.error(f"Generic placeholder video error {fp}: {e}", exc_info=True); return None finally: if tc and hasattr(tc, 'close'): tc.close() # --- generate_scene_asset (Main asset generation logic) --- def generate_scene_asset(self, image_generation_prompt_text, motion_prompt_text_for_video, scene_data, scene_identifier_filename_base, generate_as_video_clip=False, runway_target_duration=5): base_name, _ = os.path.splitext(scene_identifier_filename_base) # Default asset_info for error state asset_info = {'path': None, 'type': 'none', 'error': True, 'prompt_used': image_generation_prompt_text, # Default to image prompt 'error_message': 'Asset generation not fully attempted'} # STEP 1: Generate/acquire the base image for Runway Gen-4 or for direct image output input_image_for_runway_path = None # Use a distinct name for the base image if it's only an intermediate step for video base_image_filename = base_name + ("_base_for_video.png" if generate_as_video_clip else ".png") base_image_filepath = os.path.join(self.output_dir, base_image_filename) # Try DALL-E for base image if self.USE_AI_IMAGE_GENERATION and self.openai_api_key: max_r, att_n = 2, 0 for att_n in range(max_r): try: logger.info(f"Attempt {att_n+1} DALL-E (base image): {image_generation_prompt_text[:100]}...") cl = openai.OpenAI(api_key=self.openai_api_key, timeout=90.0) r = cl.images.generate(model=self.dalle_model, prompt=image_generation_prompt_text, n=1, size=self.image_size_dalle3, quality="hd", response_format="url", style="vivid") iu = r.data[0].url; rp = getattr(r.data[0], 'revised_prompt', None) if rp: logger.info(f"DALL-E revised: {rp[:100]}...") ir = requests.get(iu, timeout=120); ir.raise_for_status() id_img = Image.open(io.BytesIO(ir.content)) if id_img.mode != 'RGB': id_img = id_img.convert('RGB') id_img.save(base_image_filepath); logger.info(f"DALL-E base image saved: {base_image_filepath}"); input_image_for_runway_path = base_image_filepath asset_info = {'path': base_image_filepath, 'type': 'image', 'error': False, 'prompt_used': image_generation_prompt_text, 'revised_prompt': rp} break # DALL-E success except openai.RateLimitError as e: logger.warning(f"OpenAI Rate Limit {att_n+1}: {e}. Retry..."); time.sleep(5*(att_n+1)); asset_info['error_message']=str(e) except Exception as e: logger.error(f"DALL-E base image error: {e}", exc_info=True); asset_info['error_message']=str(e); break if asset_info['error']: logger.warning(f"DALL-E failed after {att_n+1} attempts for base image.") # Try Pexels if DALL-E failed or not used if asset_info['error'] and self.USE_PEXELS: logger.info("Attempting Pexels for base image.") pqt = scene_data.get('pexels_search_query_감독', f"{scene_data.get('emotional_beat','')} {scene_data.get('setting_description','')}") pp = self._search_pexels_image(pqt, base_image_filename) # Pass base image filename if pp: input_image_for_runway_path = pp; asset_info = {'path': pp, 'type': 'image', 'error': False, 'prompt_used': f"Pexels: {pqt}"} else: current_em = asset_info.get('error_message',""); asset_info['error_message']=(current_em + " Pexels failed for base image.").strip() # Fallback to placeholder for base image if all above failed if asset_info['error']: logger.warning("Base image (DALL-E/Pexels) failed. Using placeholder for base image.") ppt = asset_info.get('prompt_used', image_generation_prompt_text) # Use the original image prompt php = self._create_placeholder_image_content(f"[Base Img Placeholder] {ppt[:100]}...", base_image_filename) if php: input_image_for_runway_path = php; asset_info = {'path': php, 'type': 'image', 'error': False, 'prompt_used': ppt} else: current_em=asset_info.get('error_message',"");asset_info['error_message']=(current_em + " Base placeholder failed.").strip() # STEP 2: If video clip is requested, use the generated base image with RunwayML if generate_as_video_clip: if not input_image_for_runway_path: # If base image generation totally failed logger.error("Cannot generate RunwayML video: base image path is missing or generation failed.") asset_info['error'] = True # Ensure error state is propagated asset_info['error_message'] = (asset_info.get('error_message',"") + " Base image missing, Runway video aborted.").strip() asset_info['type'] = 'none' # No valid asset produced return asset_info if self.USE_RUNWAYML: logger.info(f"Proceeding to Runway Gen-4 video for {base_name} using base image: {input_image_for_runway_path}") video_path = self._generate_video_clip_with_runwayml( text_prompt_for_motion=motion_prompt_text_for_video, input_image_path=input_image_for_runway_path, scene_identifier_filename_base=base_name, # _runway_gen4.mp4 will be appended target_duration_seconds=runway_target_duration ) if video_path and os.path.exists(video_path): # Success generating video asset_info = {'path': video_path, 'type': 'video', 'error': False, 'prompt_used': motion_prompt_text_for_video, # This is the prompt for Runway 'base_image_path': input_image_for_runway_path} else: # RunwayML failed, return the base image info but mark video as failed logger.warning(f"RunwayML video generation failed for {base_name}. Using the base image as fallback.") asset_info['error'] = True # Video step specifically failed asset_info['error_message'] = (asset_info.get('error_message', "Base image generated.") + " RunwayML video step failed; using base image instead.").strip() asset_info['path'] = input_image_for_runway_path # Path of the base image asset_info['type'] = 'image' # Fallback asset type is image asset_info['prompt_used'] = image_generation_prompt_text # Prompt for the base image else: # RunwayML not enabled, use base image logger.warning("RunwayML selected but not enabled/configured. Using base image.") asset_info['error'] = True # Mark that video wasn't generated asset_info['error_message'] = (asset_info.get('error_message', "Base image generated.") + " RunwayML disabled; using base image.").strip() asset_info['path'] = input_image_for_runway_path asset_info['type'] = 'image' asset_info['prompt_used'] = image_generation_prompt_text # If not generate_as_video_clip, asset_info already holds the result of image generation return asset_info # --- generate_narration_audio (Keep as before) --- def generate_narration_audio(self, text_to_narrate, output_filename="narration_overall.mp3"): if not self.USE_ELEVENLABS or not self.elevenlabs_client or not text_to_narrate: logger.info("11L skip."); return None; afp=os.path.join(self.output_dir,output_filename) try: logger.info(f"11L audio (Voice:{self.elevenlabs_voice_id}): {text_to_narrate[:70]}..."); asm=None if hasattr(self.elevenlabs_client,'text_to_speech')and hasattr(self.elevenlabs_client.text_to_speech,'stream'):asm=self.elevenlabs_client.text_to_speech.stream;logger.info("Using 11L .text_to_speech.stream()") elif hasattr(self.elevenlabs_client,'generate_stream'):asm=self.elevenlabs_client.generate_stream;logger.info("Using 11L .generate_stream()") elif hasattr(self.elevenlabs_client,'generate'):logger.info("Using 11L .generate()");vp=Voice(voice_id=str(self.elevenlabs_voice_id),settings=self.elevenlabs_voice_settings)if Voice and self.elevenlabs_voice_settings else str(self.elevenlabs_voice_id);ab=self.elevenlabs_client.generate(text=text_to_narrate,voice=vp,model="eleven_multilingual_v2"); with open(afp,"wb")as f:f.write(ab);logger.info(f"11L audio (non-stream): {afp}");return afp else:logger.error("No 11L audio method.");return None if asm:vps={"voice_id":str(self.elevenlabs_voice_id)} if self.elevenlabs_voice_settings: if hasattr(self.elevenlabs_voice_settings,'model_dump'):vps["voice_settings"]=self.elevenlabs_voice_settings.model_dump() elif hasattr(self.elevenlabs_voice_settings,'dict'):vps["voice_settings"]=self.elevenlabs_voice_settings.dict() else:vps["voice_settings"]=self.elevenlabs_voice_settings adi=asm(text=text_to_narrate,model_id="eleven_multilingual_v2",**vps) with open(afp,"wb")as f: for c in adi: if c:f.write(c) logger.info(f"11L audio (stream): {afp}");return afp except Exception as e:logger.error(f"11L audio error: {e}",exc_info=True);return None # --- assemble_animatic_from_assets (Keep robust image processing, C-contiguous, debug saves, pix_fmt) --- def assemble_animatic_from_assets(self, asset_data_list, overall_narration_path=None, output_filename="final_video.mp4", fps=24): if not asset_data_list: logger.warning("No assets for animatic."); return None processed_clips = []; narration_clip = None; final_clip = None # final_composite_clip_obj renamed to final_clip logger.info(f"Assembling from {len(asset_data_list)} assets. Frame: {self.video_frame_size}.") for i, asset_info in enumerate(asset_data_list): asset_path, asset_type, scene_dur = asset_info.get('path'), asset_info.get('type'), asset_info.get('duration', 4.5) scene_num, key_action = asset_info.get('scene_num', i + 1), asset_info.get('key_action', '') logger.info(f"S{scene_num}: Path='{asset_path}', Type='{asset_type}', Dur='{scene_dur}'s") if not (asset_path and os.path.exists(asset_path)): logger.warning(f"S{scene_num}: Not found '{asset_path}'. Skip."); continue if scene_dur <= 0: logger.warning(f"S{scene_num}: Invalid duration ({scene_dur}s). Skip."); continue current_scene_mvpy_clip = None try: if asset_type == 'image': pil_img = Image.open(asset_path); logger.debug(f"S{scene_num}: Loaded img. Mode:{pil_img.mode}, Size:{pil_img.size}") img_rgba = pil_img.convert('RGBA') if pil_img.mode != 'RGBA' else pil_img.copy() thumb = img_rgba.copy(); rf = Image.Resampling.LANCZOS if hasattr(Image.Resampling,'LANCZOS') else Image.BILINEAR; thumb.thumbnail(self.video_frame_size,rf) cv_rgba = Image.new('RGBA',self.video_frame_size,(0,0,0,0)); xo,yo=(self.video_frame_size[0]-thumb.width)//2,(self.video_frame_size[1]-thumb.height)//2 cv_rgba.paste(thumb,(xo,yo),thumb) final_rgb_pil = Image.new("RGB",self.video_frame_size,(0,0,0)); final_rgb_pil.paste(cv_rgba,mask=cv_rgba.split()[3]) dbg_path = os.path.join(self.output_dir,f"debug_PRE_NUMPY_S{scene_num}.png"); final_rgb_pil.save(dbg_path); logger.info(f"DEBUG: Saved PRE_NUMPY_S{scene_num} to {dbg_path}") frame_np = np.array(final_rgb_pil,dtype=np.uint8); if not frame_np.flags['C_CONTIGUOUS']: frame_np=np.ascontiguousarray(frame_np,dtype=np.uint8) logger.debug(f"S{scene_num}: NumPy for MoviePy. Shape:{frame_np.shape}, DType:{frame_np.dtype}, C-Contig:{frame_np.flags['C_CONTIGUOUS']}") if frame_np.size==0 or frame_np.ndim!=3 or frame_np.shape[2]!=3: logger.error(f"S{scene_num}: Invalid NumPy. Skip."); continue clip_base = ImageClip(frame_np,transparent=False).set_duration(scene_dur) mvpy_dbg_path=os.path.join(self.output_dir,f"debug_MOVIEPY_FRAME_S{scene_num}.png"); clip_base.save_frame(mvpy_dbg_path,t=0.1); logger.info(f"DEBUG: Saved MOVIEPY_FRAME_S{scene_num} to {mvpy_dbg_path}") clip_fx = clip_base try: es=random.uniform(1.03,1.08); clip_fx=clip_base.fx(vfx.resize,lambda t:1+(es-1)*(t/scene_dur) if scene_dur>0 else 1).set_position('center') except Exception as e: logger.error(f"S{scene_num} Ken Burns error: {e}",exc_info=False) current_scene_mvpy_clip = clip_fx elif asset_type == 'video': src_clip=None try: src_clip=VideoFileClip(asset_path,target_resolution=(self.video_frame_size[1],self.video_frame_size[0])if self.video_frame_size else None, audio=False) # Explicitly no audio from source video clips tmp_clip=src_clip if src_clip.duration!=scene_dur: if src_clip.duration>scene_dur:tmp_clip=src_clip.subclip(0,scene_dur) else: if scene_dur/src_clip.duration > 1.5 and src_clip.duration>0.1:tmp_clip=src_clip.loop(duration=scene_dur) else:tmp_clip=src_clip.set_duration(src_clip.duration);logger.info(f"S{scene_num} Video clip ({src_clip.duration:.2f}s) shorter than target ({scene_dur:.2f}s).") current_scene_mvpy_clip=tmp_clip.set_duration(scene_dur) if current_scene_mvpy_clip.size!=list(self.video_frame_size):current_scene_mvpy_clip=current_scene_mvpy_clip.resize(self.video_frame_size) except Exception as e:logger.error(f"S{scene_num} Video load error '{asset_path}':{e}",exc_info=True);continue finally: if src_clip and src_clip is not current_scene_mvpy_clip and hasattr(src_clip,'close'):src_clip.close() else: logger.warning(f"S{scene_num} Unknown asset type '{asset_type}'. Skip."); continue if current_scene_mvpy_clip and key_action: try: to_dur=min(current_scene_mvpy_clip.duration-0.5,current_scene_mvpy_clip.duration*0.8)if current_scene_mvpy_clip.duration>0.5 else current_scene_mvpy_clip.duration to_start=0.25 if to_dur > 0 : # Only add text if duration is positive txt_c=TextClip(f"Scene {scene_num}\n{key_action}",fontsize=self.video_overlay_font_size,color=self.video_overlay_font_color,font=self.video_overlay_font,bg_color='rgba(10,10,20,0.7)',method='caption',align='West',size=(self.video_frame_size[0]*0.9,None),kerning=-1,stroke_color='black',stroke_width=1.5).set_duration(to_dur).set_start(to_start).set_position(('center',0.92),relative=True) current_scene_mvpy_clip=CompositeVideoClip([current_scene_mvpy_clip,txt_c],size=self.video_frame_size,use_bgclip=True) else: logger.warning(f"S{scene_num}: Text overlay duration is zero or negative. Skipping text overlay.") except Exception as e:logger.error(f"S{scene_num} TextClip error:{e}. No text.",exc_info=True) if current_scene_mvpy_clip:processed_clips.append(current_scene_mvpy_clip);logger.info(f"S{scene_num} Processed. Dur:{current_scene_mvpy_clip.duration:.2f}s.") except Exception as e:logger.error(f"MAJOR Error S{scene_num} ({asset_path}):{e}",exc_info=True) finally: if current_scene_mvpy_clip and hasattr(current_scene_mvpy_clip,'close'): # Check if it's a VideoFileClip instance that needs closing if hasattr(current_scene_mvpy_clip, 'reader') and current_scene_mvpy_clip.reader: current_scene_mvpy_clip.close() elif not hasattr(current_scene_mvpy_clip, 'reader'): current_scene_mvpy_clip.close() # For ImageClip if close() is added if not processed_clips:logger.warning("No clips processed. Abort.");return None td=0.75 try: logger.info(f"Concatenating {len(processed_clips)} clips."); if len(processed_clips)>1:final_clip=concatenate_videoclips(processed_clips,padding=-td if td>0 else 0,method="compose") elif processed_clips:final_clip=processed_clips[0] if not final_clip:logger.error("Concatenation failed.");return None logger.info(f"Concatenated dur:{final_clip.duration:.2f}s") if td>0 and final_clip.duration>0: if final_clip.duration>td*2:final_clip=final_clip.fx(vfx.fadein,td).fx(vfx.fadeout,td) else:final_clip=final_clip.fx(vfx.fadein,min(td,final_clip.duration/2.0)) if overall_narration_path and os.path.exists(overall_narration_path) and final_clip.duration>0: try:narration_clip=AudioFileClip(overall_narration_path);final_clip=final_clip.set_audio(narration_clip);logger.info("Narration added.") except Exception as e:logger.error(f"Narration add error:{e}",exc_info=True) elif final_clip.duration<=0:logger.warning("Video no duration. No audio.") if final_clip and final_clip.duration>0: op=os.path.join(self.output_dir,output_filename);logger.info(f"Writing video:{op} (Dur:{final_clip.duration:.2f}s)") final_clip.write_videofile(op,fps=fps,codec='libx264',preset='medium',audio_codec='aac',temp_audiofile=os.path.join(self.output_dir,f'temp-audio-{os.urandom(4).hex()}.m4a'),remove_temp=True,threads=os.cpu_count()or 2,logger='bar',bitrate="5000k",ffmpeg_params=["-pix_fmt", "yuv420p"]) logger.info(f"Video created:{op}");return op else:logger.error("Final clip invalid. No write.");return None except Exception as e:logger.error(f"Video write error:{e}",exc_info=True);return None finally: logger.debug("Closing all MoviePy clips in `assemble_animatic_from_assets` finally block.") all_clips_to_close = processed_clips + ([narration_clip] if narration_clip else []) + ([final_clip] if final_clip else []) for clip_obj in all_clips_to_close: # Use a different name to avoid scope issues if clip_obj and hasattr(clip_obj, 'close'): try: clip_obj.close() except Exception as e_close: logger.warning(f"Ignoring error while closing a clip: {type(clip_obj).__name__} - {e_close}")