# core/visual_engine.py from PIL import Image, ImageDraw, ImageFont, ImageOps # --- MONKEY PATCH FOR Image.ANTIALIAS --- try: if hasattr(Image, 'Resampling') and hasattr(Image.Resampling, 'LANCZOS'): # Pillow 9+ if not hasattr(Image, 'ANTIALIAS'): Image.ANTIALIAS = Image.Resampling.LANCZOS elif hasattr(Image, 'LANCZOS'): # Pillow 8 if not hasattr(Image, 'ANTIALIAS'): Image.ANTIALIAS = Image.LANCZOS elif not hasattr(Image, 'ANTIALIAS'): print("WARNING: Pillow version lacks common Resampling attributes or ANTIALIAS. Video effects might fail.") except Exception as e_mp: print(f"WARNING: ANTIALIAS monkey-patch error: {e_mp}") # --- END MONKEY PATCH --- from moviepy.editor import (ImageClip, VideoFileClip, concatenate_videoclips, TextClip, CompositeVideoClip, AudioFileClip) import moviepy.video.fx.all as vfx import numpy as np import os import openai import requests import io import time import random import logging logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) # --- ElevenLabs Client Import --- ELEVENLABS_CLIENT_IMPORTED = False ElevenLabsAPIClient = None Voice = None VoiceSettings = None try: from elevenlabs.client import ElevenLabs as ImportedElevenLabsClient from elevenlabs import Voice as ImportedVoice, VoiceSettings as ImportedVoiceSettings ElevenLabsAPIClient = ImportedElevenLabsClient Voice = ImportedVoice VoiceSettings = ImportedVoiceSettings ELEVENLABS_CLIENT_IMPORTED = True logger.info("ElevenLabs client components imported.") except Exception as e_eleven: logger.warning(f"ElevenLabs client import failed: {e_eleven}. Audio disabled.") # --- RunwayML Client Import (Placeholder) --- RUNWAYML_SDK_IMPORTED = False RunwayMLClient = None try: logger.info("RunwayML SDK import is a placeholder.") except ImportError: logger.warning("RunwayML SDK (placeholder) not found. RunwayML disabled.") except Exception as e_runway_sdk: logger.warning(f"Error importing RunwayML SDK (placeholder): {e_runway_sdk}. RunwayML disabled.") class VisualEngine: def __init__(self, output_dir="temp_cinegen_media", default_elevenlabs_voice_id="Rachel"): self.output_dir = output_dir os.makedirs(self.output_dir, exist_ok=True) self.font_filename = "DejaVuSans-Bold.ttf" font_paths_to_try = [ self.font_filename, f"/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf", f"/usr/share/fonts/truetype/liberation/LiberationSans-Bold.ttf", f"/System/Library/Fonts/Supplemental/Arial.ttf", f"C:/Windows/Fonts/arial.ttf", f"/usr/local/share/fonts/truetype/mycustomfonts/arial.ttf" ] self.font_path_pil = next((p for p in font_paths_to_try if os.path.exists(p)), None) self.font_size_pil = 20 self.video_overlay_font_size = 30 self.video_overlay_font_color = 'white' self.video_overlay_font = 'DejaVu-Sans-Bold' try: if self.font_path_pil: self.font = ImageFont.truetype(self.font_path_pil, self.font_size_pil) logger.info(f"Pillow font loaded: {self.font_path_pil}.") else: self.font = ImageFont.load_default() logger.warning("Using default Pillow font.") self.font_size_pil = 10 except IOError as e_font: logger.error(f"Pillow font loading IOError: {e_font}. Using default.") self.font = ImageFont.load_default() self.font_size_pil = 10 self.openai_api_key = None self.USE_AI_IMAGE_GENERATION = False self.dalle_model = "dall-e-3" self.image_size_dalle3 = "1792x1024" self.video_frame_size = (1280, 720) self.elevenlabs_api_key = None self.USE_ELEVENLABS = False self.elevenlabs_client = None self.elevenlabs_voice_id = default_elevenlabs_voice_id if VoiceSettings and ELEVENLABS_CLIENT_IMPORTED: self.elevenlabs_voice_settings = VoiceSettings( stability=0.60, similarity_boost=0.80, style=0.15, use_speaker_boost=True ) else: self.elevenlabs_voice_settings = None self.pexels_api_key = None self.USE_PEXELS = False self.runway_api_key = None self.USE_RUNWAYML = False self.runway_client = None logger.info("VisualEngine initialized.") def set_openai_api_key(self, k): self.openai_api_key = k self.USE_AI_IMAGE_GENERATION = bool(k) logger.info(f"DALL-E ({self.dalle_model}) {'Ready.' if k else 'Disabled.'}") def set_elevenlabs_api_key(self, api_key, voice_id_from_secret=None): self.elevenlabs_api_key = api_key if voice_id_from_secret: self.elevenlabs_voice_id = voice_id_from_secret if api_key and ELEVENLABS_CLIENT_IMPORTED and ElevenLabsAPIClient: try: self.elevenlabs_client = ElevenLabsAPIClient(api_key=api_key) self.USE_ELEVENLABS = bool(self.elevenlabs_client) logger.info(f"ElevenLabs Client {'Ready' if self.USE_ELEVENLABS else 'Failed Init'} (Voice ID: {self.elevenlabs_voice_id}).") except Exception as e: logger.error(f"ElevenLabs client init error: {e}. Disabled.", exc_info=True) self.USE_ELEVENLABS = False else: self.USE_ELEVENLABS = False logger.info("ElevenLabs Disabled (no key or SDK).") def set_pexels_api_key(self, k): self.pexels_api_key = k self.USE_PEXELS = bool(k) logger.info(f"Pexels Search {'Ready.' if k else 'Disabled.'}") def set_runway_api_key(self, k): self.runway_api_key = k if k and RUNWAYML_SDK_IMPORTED and RunwayMLClient: try: self.USE_RUNWAYML = True logger.info(f"RunwayML Client (Placeholder SDK) {'Ready.' if self.USE_RUNWAYML else 'Failed Init.'}") except Exception as e: logger.error(f"RunwayML client (Placeholder SDK) init error: {e}. Disabled.", exc_info=True) self.USE_RUNWAYML = False elif k: self.USE_RUNWAYML = True logger.info("RunwayML API Key set (direct API or placeholder).") else: self.USE_RUNWAYML = False logger.info("RunwayML Disabled (no API key).") def _get_text_dimensions(self, text_content, font_obj): default_line_height = getattr(font_obj, 'size', self.font_size_pil) if not text_content: return 0, default_line_height try: if hasattr(font_obj, 'getbbox'): bbox = font_obj.getbbox(text_content) width = bbox[2] - bbox[0] height = bbox[3] - bbox[1] return width, height if height > 0 else default_line_height elif hasattr(font_obj, 'getsize'): width, height = font_obj.getsize(text_content) return width, height if height > 0 else default_line_height else: return int(len(text_content) * default_line_height * 0.6), int(default_line_height * 1.2) except Exception as e: logger.warning(f"Error in _get_text_dimensions for '{text_content[:20]}...': {e}") return int(len(text_content) * self.font_size_pil * 0.6), int(self.font_size_pil * 1.2) def _create_placeholder_image_content(self, text_description, filename, size=None): if size is None: size = self.video_frame_size img = Image.new('RGB', size, color=(20, 20, 40)) draw = ImageDraw.Draw(img) padding = 25 max_text_width = size[0] - (2 * padding) lines = [] if not text_description: text_description = "(Placeholder: No text description provided)" words = text_description.split() current_line = "" for word in words: test_line = current_line + word + " " line_width_test, _ = self._get_text_dimensions(test_line.strip(), self.font) if line_width_test <= max_text_width: current_line = test_line else: if current_line.strip(): lines.append(current_line.strip()) word_width, _ = self._get_text_dimensions(word, self.font) if word_width > max_text_width: avg_char_w = self._get_text_dimensions("A", self.font)[0] or 10 chars_that_fit = int(max_text_width / avg_char_w) if avg_char_w > 0 else 10 if len(word) > chars_that_fit: lines.append(word[:chars_that_fit-3] + "...") else: lines.append(word) current_line = "" else: current_line = word + " " if current_line.strip(): lines.append(current_line.strip()) if not lines and text_description: avg_char_w = self._get_text_dimensions("A", self.font)[0] or 10 chars_that_fit = int(max_text_width / avg_char_w) if avg_char_w > 0 else 10 if len(text_description) > chars_that_fit: lines.append(text_description[:chars_that_fit-3] + "...") else: lines.append(text_description) elif not lines: lines.append("(Placeholder Text Error)") _, single_line_height = self._get_text_dimensions("Ay", self.font) single_line_height = single_line_height if single_line_height > 0 else (self.font_size_pil + 2) line_spacing = 2 max_lines_to_display = min(len(lines), (size[1] - (2 * padding)) // (single_line_height + line_spacing)) if single_line_height > 0 else 1 if max_lines_to_display <= 0: max_lines_to_display = 1 total_text_block_height = max_lines_to_display * single_line_height + (max_lines_to_display - 1) * line_spacing y_text_start = padding + (size[1] - (2 * padding) - total_text_block_height) / 2.0 current_y = y_text_start for i in range(max_lines_to_display): line_content = lines[i] line_width_actual, _ = self._get_text_dimensions(line_content, self.font) x_text = max(padding, (size[0] - line_width_actual) / 2.0) draw.text((x_text, current_y), line_content, font=self.font, fill=(200, 200, 180)) current_y += single_line_height + line_spacing if i == 6 and max_lines_to_display > 7 and len(lines) > max_lines_to_display: ellipsis_width, _ = self._get_text_dimensions("...", self.font) x_ellipsis = max(padding, (size[0] - ellipsis_width) / 2.0) draw.text((x_ellipsis, current_y), "...", font=self.font, fill=(200, 200, 180)) break filepath = os.path.join(self.output_dir, filename) try: img.save(filepath) return filepath except Exception as e: logger.error(f"Error saving placeholder image {filepath}: {e}", exc_info=True) return None def _search_pexels_image(self, query, output_filename_base): if not self.USE_PEXELS or not self.pexels_api_key: return None headers = {"Authorization": self.pexels_api_key} params = {"query": query, "per_page": 1, "orientation": "landscape", "size": "large2x"} base_name, _ = os.path.splitext(output_filename_base) pexels_filename = base_name + f"_pexels_{random.randint(1000,9999)}.jpg" filepath = os.path.join(self.output_dir, pexels_filename) try: logger.info(f"Pexels search: '{query}'") effective_query = " ".join(query.split()[:5]) params["query"] = effective_query response = requests.get("https://api.pexels.com/v1/search", headers=headers, params=params, timeout=20) response.raise_for_status() data = response.json() if data.get("photos") and len(data["photos"]) > 0: photo_details = data["photos"][0] photo_url = photo_details["src"]["large2x"] logger.info(f"Downloading Pexels image from: {photo_url}") image_response = requests.get(photo_url, timeout=60) image_response.raise_for_status() img_data = Image.open(io.BytesIO(image_response.content)) if img_data.mode != 'RGB': logger.debug(f"Pexels image mode is {img_data.mode}, converting to RGB.") img_data = img_data.convert('RGB') img_data.save(filepath) logger.info(f"Pexels image saved successfully: {filepath}") return filepath else: logger.info(f"No photos found on Pexels for query: '{effective_query}'") return None except requests.exceptions.RequestException as e_req: logger.error(f"Pexels request error for query '{query}': {e_req}", exc_info=True) except json.JSONDecodeError as e_json: logger.error(f"Pexels JSON decode error for query '{query}': {e_json}", exc_info=True) except Exception as e: logger.error(f"General Pexels error for query '{query}': {e}", exc_info=True) return None def _generate_video_clip_with_runwayml(self, pt, iip, sifnb, tds=5): if not self.USE_RUNWAYML or not self.runway_api_key: logger.warning("RunwayML disabled.") return None if not iip or not os.path.exists(iip): logger.error(f"Runway Gen-4 needs input image. Path invalid: {iip}") return None runway_dur = 10 if tds > 7 else 5 ovfn = sifnb.replace(".png", f"_runway_gen4_d{runway_dur}s.mp4") ovfp = os.path.join(self.output_dir, ovfn) logger.info(f"Runway Gen-4 (Placeholder) img: {os.path.basename(iip)}, motion: '{pt[:100]}...', dur: {runway_dur}s") logger.warning("Using PLACEHOLDER video for Runway Gen-4.") img_clip = None txt_c = None final_ph_clip = None try: img_clip = ImageClip(iip).set_duration(runway_dur) txt = f"Runway Gen-4 Placeholder\nInput: {os.path.basename(iip)}\nMotion: {pt[:50]}..." txt_c = TextClip( txt, fontsize=24, color='white', font=self.video_overlay_font, bg_color='rgba(0,0,0,0.5)', size=(self.video_frame_size[0] * 0.8, None), method='caption' ).set_duration(runway_dur).set_position('center') final_ph_clip = CompositeVideoClip([img_clip, txt_c], size=img_clip.size) final_ph_clip.write_videofile(ovfp, fps=24, codec='libx264', preset='ultrafast', logger=None, threads=2) logger.info(f"Runway Gen-4 placeholder video: {ovfp}") return ovfp except Exception as e: logger.error(f"Runway Gen-4 placeholder error: {e}", exc_info=True) return None finally: if img_clip and hasattr(img_clip, 'close'): img_clip.close() if txt_c and hasattr(txt_c, 'close'): txt_c.close() if final_ph_clip and hasattr(final_ph_clip, 'close'): final_ph_clip.close() def _create_placeholder_video_content(self, text_description, filename, duration=4, size=None): if size is None: size = self.video_frame_size filepath = os.path.join(self.output_dir, filename) txt_clip = None try: txt_clip = TextClip( text_description, fontsize=50, color='white', font=self.video_overlay_font, bg_color='black', size=size, method='caption' ).set_duration(duration) txt_clip.write_videofile( filepath, fps=24, codec='libx264', preset='ultrafast', logger=None, threads=2 ) logger.info(f"Generic placeholder video created successfully: {filepath}") return filepath except Exception as e: logger.error(f"Failed to create generic placeholder video {filepath}: {e}", exc_info=True) return None finally: if txt_clip and hasattr(txt_clip, 'close'): try: txt_clip.close() except Exception as e_close: logger.warning(f"Error closing TextClip in _create_placeholder_video_content: {e_close}") def generate_scene_asset(self, image_generation_prompt_text, motion_prompt_text_for_video, scene_data, scene_identifier_filename_base, generate_as_video_clip=False, runway_target_duration=5): base_name = scene_identifier_filename_base asset_info = { 'path': None, 'type': 'none', 'error': True, 'prompt_used': image_generation_prompt_text, 'error_message': 'Generation not attempted' } input_image_for_runway_path = None image_filename_for_base = base_name + "_base_image.png" temp_image_asset_info = { 'error': True, 'prompt_used': image_generation_prompt_text, 'error_message': 'Base image generation not attempted' } if self.USE_AI_IMAGE_GENERATION and self.openai_api_key: max_r, att_n = 2, 0 for att_n in range(max_r): try: img_fp_dalle = os.path.join(self.output_dir, image_filename_for_base) logger.info(f"Attempt {att_n+1} DALL-E (base img): {image_generation_prompt_text[:100]}...") cl = openai.OpenAI(api_key=self.openai_api_key, timeout=90.0) r = cl.images.generate( model=self.dalle_model, prompt=image_generation_prompt_text, n=1, size=self.image_size_dalle3, quality="hd", response_format="url", style="vivid" ) iu = r.data[0].url rp = getattr(r.data[0], 'revised_prompt', None) if rp: logger.info(f"DALL-E revised: {rp[:100]}...") ir = requests.get(iu, timeout=120) ir.raise_for_status() id_img = Image.open(io.BytesIO(ir.content)) if id_img.mode != 'RGB': id_img = id_img.convert('RGB') id_img.save(img_fp_dalle) logger.info(f"DALL-E base image: {img_fp_dalle}") input_image_for_runway_path = img_fp_dalle temp_image_asset_info = { 'path': img_fp_dalle, 'type': 'image', 'error': False, 'prompt_used': image_generation_prompt_text, 'revised_prompt': rp } break except openai.RateLimitError as e: logger.warning(f"OpenAI Rate Limit {att_n+1}: {e}. Retry...") time.sleep(5 * (att_n + 1)) temp_image_asset_info['error_message'] = str(e) except Exception as e: logger.error(f"DALL-E error: {e}", exc_info=True) temp_image_asset_info['error_message'] = str(e) break if temp_image_asset_info['error']: logger.warning(f"DALL-E failed after {att_n+1} attempts for base image.") if temp_image_asset_info['error'] and self.USE_PEXELS: pqt = scene_data.get('pexels_search_query_감독', f"{scene_data.get('emotional_beat','')} {scene_data.get('setting_description','')}") pp = self._search_pexels_image(pqt, image_filename_for_base) if pp: input_image_for_runway_path = pp temp_image_asset_info = { 'path': pp, 'type': 'image', 'error': False, 'prompt_used': f"Pexels: {pqt}" } else: current_em = temp_image_asset_info.get('error_message', "") temp_image_asset_info['error_message'] = (current_em + " Pexels failed.").strip() if temp_image_asset_info['error']: logger.warning("Base image (DALL-E/Pexels) failed. Placeholder base image.") ppt = temp_image_asset_info.get('prompt_used', image_generation_prompt_text) php = self._create_placeholder_image_content(f"[Base Img Placeholder] {ppt[:100]}...", image_filename_for_base) if php: input_image_for_runway_path = php temp_image_asset_info = { 'path': php, 'type': 'image', 'error': False, 'prompt_used': ppt } else: current_em = temp_image_asset_info.get('error_message', "") temp_image_asset_info['error_message'] = (current_em + " Base placeholder failed.").strip() if generate_as_video_clip: if self.USE_RUNWAYML and input_image_for_runway_path: video_path = self._generate_video_clip_with_runwayml( motion_prompt_text_for_video, input_image_for_runway_path, base_name, runway_target_duration ) if video_path and os.path.exists(video_path): return { 'path': video_path, 'type': 'video', 'error': False, 'prompt_used': motion_prompt_text_for_video, 'base_image_path': input_image_for_runway_path } else: asset_info = temp_image_asset_info asset_info['error'] = True asset_info['error_message'] = "RunwayML video gen failed; using base image." asset_info['type'] = 'image' return asset_info elif not self.USE_RUNWAYML: asset_info = temp_image_asset_info asset_info['error_message'] = "RunwayML disabled; using base image." asset_info['type'] = 'image' return asset_info else: asset_info = temp_image_asset_info asset_info['error_message'] = (asset_info.get('error_message', "") + " Base image failed, Runway video not attempted.").strip() asset_info['type'] = 'image' return asset_info else: return temp_image_asset_info def generate_narration_audio(self, ttn, ofn="narration_overall.mp3"): if not self.USE_ELEVENLABS or not self.elevenlabs_client or not ttn: logger.info("11L skip.") return None afp = os.path.join(self.output_dir, ofn) try: logger.info(f"11L audio (Voice:{self.elevenlabs_voice_id}): {ttn[:70]}...") asm = None if hasattr(self.elevenlabs_client, 'text_to_speech') and hasattr(self.elevenlabs_client.text_to_speech, 'stream'): asm = self.elevenlabs_client.text_to_speech.stream logger.info("Using 11L .text_to_speech.stream()") elif hasattr(self.elevenlabs_client, 'generate_stream'): asm = self.elevenlabs_client.generate_stream logger.info("Using 11L .generate_stream()") elif hasattr(self.elevenlabs_client, 'generate'): logger.info("Using 11L .generate()") vp = Voice(voice_id=str(self.elevenlabs_voice_id), settings=self.elevenlabs_voice_settings) if Voice and self.elevenlabs_voice_settings else str(self.elevenlabs_voice_id) ab = self.elevenlabs_client.generate(text=ttn, voice=vp, model="eleven_multilingual_v2") with open(afp, "wb") as f: f.write(ab) logger.info(f"11L audio (non-stream): {afp}") return afp else: logger.error("No 11L audio method.") return None if asm: vps = {"voice_id": str(self.elevenlabs_voice_id)} if self.elevenlabs_voice_settings: if hasattr(self.elevenlabs_voice_settings, 'model_dump'): vps["voice_settings"] = self.elevenlabs_voice_settings.model_dump() elif hasattr(self.elevenlabs_voice_settings, 'dict'): vps["voice_settings"] = self.elevenlabs_voice_settings.dict() else: vps["voice_settings"] = self.elevenlabs_voice_settings adi = asm(text=ttn, model_id="eleven_multilingual_v2", **vps) with open(afp, "wb") as f: for c in adi: if c: f.write(c) logger.info(f"11L audio (stream): {afp}") return afp except Exception as e: logger.error(f"11L audio error: {e}", exc_info=True) return None def assemble_animatic_from_assets(self, asset_data_list, overall_narration_path=None, output_filename="final_video.mp4", fps=24): if not asset_data_list: logger.warning("No assets for animatic.") return None processed_clips = [] narration_clip = None final_clip = None logger.info(f"Assembling from {len(asset_data_list)} assets. Frame: {self.video_frame_size}.") for i, asset_info in enumerate(asset_data_list): asset_path = asset_info.get('path') asset_type = asset_info.get('type') scene_dur = asset_info.get('duration', 4.5) scene_num = asset_info.get('scene_num', i + 1) key_action = asset_info.get('key_action', '') logger.info(f"S{scene_num}: Path='{asset_path}', Type='{asset_type}', Dur='{scene_dur}'s") if not (asset_path and os.path.exists(asset_path)): logger.warning(f"S{scene_num}: Not found '{asset_path}'. Skip.") continue if scene_dur <= 0: logger.warning(f"S{scene_num}: Invalid duration ({scene_dur}s). Skip.") continue current_scene_mvpy_clip = None try: if asset_type == 'image': pil_img = Image.open(asset_path) logger.debug(f"S{scene_num}: Loaded img. Mode:{pil_img.mode}, Size:{pil_img.size}") img_rgba = pil_img.convert('RGBA') if pil_img.mode != 'RGBA' else pil_img.copy() thumb = img_rgba.copy() rf = Image.Resampling.LANCZOS if hasattr(Image.Resampling, 'LANCZOS') else Image.BILINEAR thumb.thumbnail(self.video_frame_size, rf) cv_rgba = Image.new('RGBA', self.video_frame_size, (0, 0, 0, 0)) xo = (self.video_frame_size[0] - thumb.width) // 2 yo = (self.video_frame_size[1] - thumb.height) // 2 cv_rgba.paste(thumb, (xo, yo), thumb) final_rgb_pil = Image.new("RGB", self.video_frame_size, (0, 0, 0)) final_rgb_pil.paste(cv_rgba, mask=cv_rgba.split()[3]) dbg_path = os.path.join(self.output_dir, f"debug_PRE_NUMPY_S{scene_num}.png") final_rgb_pil.save(dbg_path) logger.info(f"DEBUG: Saved PRE_NUMPY_S{scene_num} to {dbg_path}") frame_np = np.array(final_rgb_pil, dtype=np.uint8) if not frame_np.flags['C_CONTIGUOUS']: frame_np = np.ascontiguousarray(frame_np, dtype=np.uint8) logger.debug(f"S{scene_num}: NumPy for MoviePy. Shape:{frame_np.shape}, DType:{frame_np.dtype}, C-Contig:{frame_np.flags['C_CONTIGUOUS']}") if frame_np.size == 0 or frame_np.ndim != 3 or frame_np.shape[2] != 3: logger.error(f"S{scene_num}: Invalid NumPy. Skip.") continue clip_base = ImageClip(frame_np, transparent=False).set_duration(scene_dur) mvpy_dbg_path = os.path.join(self.output_dir, f"debug_MOVIEPY_FRAME_S{scene_num}.png") clip_base.save_frame(mvpy_dbg_path, t=0.1) logger.info(f"DEBUG: Saved MOVIEPY_FRAME_S{scene_num} to {mvpy_dbg_path}") clip_fx = clip_base try: es = random.uniform(1.03, 1.08) clip_fx = clip_base.fx( vfx.resize, lambda t: 1 + (es - 1) * (t / scene_dur) if scene_dur > 0 else 1 ).set_position('center') except Exception as e: logger.error(f"S{scene_num} Ken Burns error: {e}", exc_info=False) current_scene_mvpy_clip = clip_fx elif asset_type == 'video': src_clip = None try: src_clip = VideoFileClip( asset_path, target_resolution=(self.video_frame_size[1], self.video_frame_size[0]) if self.video_frame_size else None, audio=False ) tmp_clip = src_clip if src_clip.duration != scene_dur: if src_clip.duration > scene_dur: tmp_clip = src_clip.subclip(0, scene_dur) else: if scene_dur / src_clip.duration > 1.5 and src_clip.duration > 0.1: tmp_clip = src_clip.loop(duration=scene_dur) else: tmp_clip = src_clip.set_duration(src_clip.duration) logger.info(f"S{scene_num} Video clip ({src_clip.duration:.2f}s) shorter than target ({scene_dur:.2f}s).") current_scene_mvpy_clip = tmp_clip.set_duration(scene_dur) if current_scene_mvpy_clip.size != list(self.video_frame_size): current_scene_mvpy_clip = current_scene_mvpy_clip.resize(self.video_frame_size) except Exception as e: logger.error(f"S{scene_num} Video load error '{asset_path}':{e}", exc_info=True) continue finally: if src_clip and src_clip is not current_scene_mvpy_clip and hasattr(src_clip, 'close'): src_clip.close() else: logger.warning(f"S{scene_num} Unknown asset type '{asset_type}'. Skip.") continue if current_scene_mvpy_clip and key_action: try: to_dur = min(current_scene_mvpy_clip.duration - 0.5, current_scene_mvpy_clip.duration * 0.8) if current_scene_mvpy_clip.duration > 0.5 else current_scene_mvpy_clip.duration to_start = 0.25 txt_c = TextClip( f"Scene {scene_num}\n{key_action}", fontsize=self.video_overlay_font_size, color=self.video_overlay_font_color, font=self.video_overlay_font, bg_color='rgba(10,10,20,0.7)', method='caption', align='West', size=(self.video_frame_size[0] * 0.9, None), kerning=-1, stroke_color='black', stroke_width=1.5 ).set_duration(to_dur).set_start(to_start).set_position(('center', 0.92), relative=True) current_scene_mvpy_clip = CompositeVideoClip( [current_scene_mvpy_clip, txt_c], size=self.video_frame_size, use_bgclip=True ) except Exception as e: logger.error(f"S{scene_num} TextClip error:{e}. No text.", exc_info=True) if current_scene_mvpy_clip: processed_clips.append(current_scene_mvpy_clip) logger.info(f"S{scene_num} Processed. Dur:{current_scene_mvpy_clip.duration:.2f}s.") except Exception as e: logger.error(f"MAJOR Error S{scene_num} ({asset_path}):{e}", exc_info=True) finally: if current_scene_mvpy_clip and hasattr(current_scene_mvpy_clip, 'close'): try: current_scene_mvpy_clip.close() except: pass if not processed_clips: logger.warning("No clips processed. Abort.") return None td = 0.75 try: logger.info(f"Concatenating {len(processed_clips)} clips.") if len(processed_clips) > 1: final_clip = concatenate_videoclips(processed_clips, padding=-td if td > 0 else 0, method="compose") elif processed_clips: final_clip = processed_clips[0] if not final_clip: logger.error("Concatenation failed.") return None logger.info(f"Concatenated dur:{final_clip.duration:.2f}s") if td > 0 and final_clip.duration > 0: if final_clip.duration > td * 2: final_clip = final_clip.fx(vfx.fadein, td).fx(vfx.fadeout, td) else: final_clip = final_clip.fx(vfx.fadein, min(td, final_clip.duration / 2.0)) if overall_narration_path and os.path.exists(overall_narration_path) and final_clip.duration > 0: try: narration_clip = AudioFileClip(overall_narration_path) final_clip = final_clip.set_audio(narration_clip) logger.info("Narration added.") except Exception as e: logger.error(f"Narration add error:{e}", exc_info=True) elif final_clip.duration <= 0: logger.warning("Video no duration. No audio.") if final_clip and final_clip.duration > 0: op = os.path.join(self.output_dir, output_filename) logger.info(f"Writing video:{op} (Dur:{final_clip.duration:.2f}s)") final_clip.write_videofile( op, fps=fps, codec='libx264', preset='medium', audio_codec='aac', temp_audiofile=os.path.join(self.output_dir, f'temp-audio-{os.urandom(4).hex()}.m4a'), remove_temp=True, threads=os.cpu_count() or 2, logger='bar', bitrate="5000k", ffmpeg_params=["-pix_fmt", "yuv420p"] ) logger.info(f"Video created:{op}") return op else: logger.error("Final clip invalid. No write.") return None except Exception as e: logger.error(f"Video write error:{e}", exc_info=True) return None finally: logger.debug("Closing all MoviePy clips in `assemble_animatic_from_assets` finally block.") clips_to_close = processed_clips + ([narration_clip] if narration_clip else []) + ([final_clip] if final_clip else []) for clip_obj in clips_to_close: if clip_obj and hasattr(clip_obj, 'close'): try: clip_obj.close() except Exception as e_close: logger.warning(f"Ignoring error while closing a clip: {e_close}")