mgbam commited on
Commit
d4d0117
·
verified ·
1 Parent(s): 7a3f79b

Update core/visual_engine.py

Browse files
Files changed (1) hide show
  1. core/visual_engine.py +307 -376
core/visual_engine.py CHANGED
@@ -4,430 +4,351 @@ import base64
4
  import mimetypes
5
  import numpy as np
6
  import os
7
- import openai # OpenAI v1.x.x+
8
  import requests
9
  import io
10
  import time
11
  import random
12
  import logging
13
 
14
- # --- MoviePy Imports ---
15
  from moviepy.editor import (ImageClip, VideoFileClip, concatenate_videoclips, TextClip,
16
  CompositeVideoClip, AudioFileClip)
17
  import moviepy.video.fx.all as vfx
18
 
19
- # --- MONKEY PATCH for Pillow/MoviePy compatibility ---
20
- try:
21
- if hasattr(Image, 'Resampling') and hasattr(Image.Resampling, 'LANCZOS'): # Pillow 9+
22
  if not hasattr(Image, 'ANTIALIAS'): Image.ANTIALIAS = Image.Resampling.LANCZOS
23
- elif hasattr(Image, 'LANCZOS'): # Pillow 8
24
  if not hasattr(Image, 'ANTIALIAS'): Image.ANTIALIAS = Image.LANCZOS
25
- elif not hasattr(Image, 'ANTIALIAS'):
26
- print("WARNING: Pillow version lacks common Resampling attributes or ANTIALIAS. MoviePy effects might fail or look different.")
27
- except Exception as e_monkey_patch:
28
- print(f"WARNING: An unexpected error occurred during Pillow ANTIALIAS monkey-patch: {e_monkey_patch}")
29
 
30
  logger = logging.getLogger(__name__)
31
- # logger.setLevel(logging.DEBUG) # Uncomment for verbose debugging during development
32
 
33
- # --- External Service Client Imports ---
34
  ELEVENLABS_CLIENT_IMPORTED = False; ElevenLabsAPIClient = None; Voice = None; VoiceSettings = None
35
  try:
36
  from elevenlabs.client import ElevenLabs as ImportedElevenLabsClient
37
  from elevenlabs import Voice as ImportedVoice, VoiceSettings as ImportedVoiceSettings
38
  ElevenLabsAPIClient = ImportedElevenLabsClient; Voice = ImportedVoice; VoiceSettings = ImportedVoiceSettings
39
- ELEVENLABS_CLIENT_IMPORTED = True; logger.info("ElevenLabs client components (SDK v1.x.x pattern) imported successfully.")
40
- except ImportError: logger.warning("ElevenLabs SDK not found (expected 'pip install elevenlabs>=1.0.0'). Audio generation will be disabled.")
41
- except Exception as e_eleven_import_general: logger.warning(f"General error importing ElevenLabs client components: {e_eleven_import_general}. Audio generation disabled.")
42
 
43
  RUNWAYML_SDK_IMPORTED = False; RunwayMLAPIClientClass = None
44
  try:
45
  from runwayml import RunwayML as ImportedRunwayMLAPIClientClass
46
  RunwayMLAPIClientClass = ImportedRunwayMLAPIClientClass; RUNWAYML_SDK_IMPORTED = True
47
- logger.info("RunwayML SDK (runwayml) imported successfully.")
48
- except ImportError: logger.warning("RunwayML SDK not found (pip install runwayml). RunwayML video generation will be disabled.")
49
- except Exception as e_runway_sdk_import_general: logger.warning(f"General error importing RunwayML SDK: {e_runway_sdk_import_general}. RunwayML features disabled.")
50
-
51
 
52
  class VisualEngine:
53
  DEFAULT_FONT_SIZE_PIL = 10; PREFERRED_FONT_SIZE_PIL = 20
54
  VIDEO_OVERLAY_FONT_SIZE = 30; VIDEO_OVERLAY_FONT_COLOR = 'white'
55
  DEFAULT_MOVIEPY_FONT = 'DejaVu-Sans-Bold'; PREFERRED_MOVIEPY_FONT = 'Liberation-Sans-Bold'
56
 
57
- # <<< CORRECTED __init__ METHOD >>>
58
  def __init__(self, output_dir="temp_cinegen_media", default_elevenlabs_voice_id="Rachel"):
59
- self.output_dir = output_dir
60
- os.makedirs(self.output_dir, exist_ok=True)
61
- logger.info(f"VisualEngine output directory set to: {self.output_dir}")
62
-
63
- self.font_filename_pil_preference = "DejaVuSans-Bold.ttf" # More standard Linux font
64
- font_paths_to_try = [
65
- self.font_filename_pil_preference,
66
- f"/usr/share/fonts/truetype/dejavu/{self.font_filename_pil_preference}",
67
- f"/usr/share/fonts/truetype/liberation/LiberationSans-Bold.ttf", # Alternative
68
- f"/System/Library/Fonts/Supplemental/Arial.ttf", # macOS fallback
69
- f"C:/Windows/Fonts/arial.ttf", # Windows fallback
70
- f"/usr/local/share/fonts/truetype/mycustomfonts/arial.ttf" # User's previous custom path
71
- ]
72
- self.resolved_font_path_pil = next((p for p in font_paths_to_try if os.path.exists(p)), None)
73
-
74
- self.active_font_pil = ImageFont.load_default() # Fallback default
75
- self.active_font_size_pil = self.DEFAULT_FONT_SIZE_PIL
76
- self.active_moviepy_font_name = self.DEFAULT_MOVIEPY_FONT
77
-
78
  if self.resolved_font_path_pil:
79
- try:
80
- self.active_font_pil = ImageFont.truetype(self.resolved_font_path_pil, self.PREFERRED_FONT_SIZE_PIL)
81
- self.active_font_size_pil = self.PREFERRED_FONT_SIZE_PIL
82
- logger.info(f"Pillow font loaded: {self.resolved_font_path_pil} at size {self.active_font_size_pil}.")
83
- if "dejavu" in self.resolved_font_path_pil.lower(): self.active_moviepy_font_name = 'DejaVu-Sans-Bold'
84
- elif "liberation" in self.resolved_font_path_pil.lower(): self.active_moviepy_font_name = 'Liberation-Sans-Bold'
85
- except IOError as e_font_load_io:
86
- logger.error(f"Pillow font loading IOError for '{self.resolved_font_path_pil}': {e_font_load_io}. Using default font.")
87
- else:
88
- logger.warning("Preferred Pillow font not found in predefined paths. Using default font.")
89
-
90
- self.openai_api_key = None; self.USE_AI_IMAGE_GENERATION = False
91
- self.dalle_model = "dall-e-3"; self.image_size_dalle3 = "1792x1024"
92
  self.video_frame_size = (1280, 720)
93
-
94
- self.elevenlabs_api_key = None; self.USE_ELEVENLABS = False; self.elevenlabs_client_instance = None
95
- self.elevenlabs_voice_id = default_elevenlabs_voice_id # Use passed default
96
- if VoiceSettings and ELEVENLABS_CLIENT_IMPORTED:
97
- self.elevenlabs_voice_settings_obj = VoiceSettings(stability=0.60, similarity_boost=0.80, style=0.15, use_speaker_boost=True)
98
  else: self.elevenlabs_voice_settings_obj = None
99
-
100
  self.pexels_api_key = None; self.USE_PEXELS = False
101
  self.runway_api_key = None; self.USE_RUNWAYML = False; self.runway_ml_sdk_client_instance = None
102
-
103
  if RUNWAYML_SDK_IMPORTED and RunwayMLAPIClientClass and os.getenv("RUNWAYML_API_SECRET"):
104
  try: self.runway_ml_sdk_client_instance = RunwayMLAPIClientClass(); self.USE_RUNWAYML = True; logger.info("RunwayML Client init from env var at startup.")
105
- except Exception as e_rwy_init_startup: logger.error(f"Initial RunwayML client init failed: {e_rwy_init_startup}"); self.USE_RUNWAYML = False
106
-
107
- logger.info(f"VisualEngine __init__ complete. ElevenLabs Voice ID: {self.elevenlabs_voice_id}")
108
 
109
- def set_openai_api_key(self, api_key_value): self.openai_api_key = api_key_value; self.USE_AI_IMAGE_GENERATION = bool(api_key_value); logger.info(f"DALL-E status: {'Ready' if self.USE_AI_IMAGE_GENERATION else 'Disabled'}")
110
-
111
- def set_elevenlabs_api_key(self, api_key_value, voice_id_from_secret=None):
112
- self.elevenlabs_api_key = api_key_value
113
- if voice_id_from_secret: self.elevenlabs_voice_id = voice_id_from_secret; logger.info(f"11L Voice ID updated to: {self.elevenlabs_voice_id} via set_elevenlabs_api_key.")
114
- if api_key_value and ELEVENLABS_CLIENT_IMPORTED and ElevenLabsAPIClient:
115
- try: self.elevenlabs_client_instance = ElevenLabsAPIClient(api_key=api_key_value); self.USE_ELEVENLABS = bool(self.elevenlabs_client_instance); logger.info(f"11L Client: {'Ready' if self.USE_ELEVENLABS else 'Failed'} (Voice: {self.elevenlabs_voice_id})")
116
- except Exception as e_11l_setkey_init: logger.error(f"11L client init error: {e_11l_setkey_init}. Disabled.", exc_info=True); self.USE_ELEVENLABS=False; self.elevenlabs_client_instance=None
117
  else: self.USE_ELEVENLABS = False; logger.info(f"11L Disabled (key/SDK).")
118
-
119
- def set_pexels_api_key(self, api_key_value): self.pexels_api_key = api_key_value; self.USE_PEXELS = bool(api_key_value); logger.info(f"Pexels status: {'Ready' if self.USE_PEXELS else 'Disabled'}")
120
-
121
- def set_runway_api_key(self, api_key_value):
122
- self.runway_api_key = api_key_value
123
- if api_key_value:
124
  if RUNWAYML_SDK_IMPORTED and RunwayMLAPIClientClass:
125
  if not self.runway_ml_sdk_client_instance:
126
  try:
127
- original_env_secret = os.getenv("RUNWAYML_API_SECRET")
128
- if not original_env_secret: os.environ["RUNWAYML_API_SECRET"] = api_key_value; logger.info("Temp set RUNWAYML_API_SECRET for SDK.")
129
- self.runway_ml_sdk_client_instance = RunwayMLAPIClientClass(); self.USE_RUNWAYML = True; logger.info("RunwayML Client init via set_runway_api_key.")
130
- if not original_env_secret: del os.environ["RUNWAYML_API_SECRET"]; logger.info("Cleared temp RUNWAYML_API_SECRET.")
131
- except Exception as e_runway_setkey_init: logger.error(f"RunwayML Client init in set_runway_api_key fail: {e_runway_setkey_init}", exc_info=True); self.USE_RUNWAYML=False;self.runway_ml_sdk_client_instance=None
132
- else: self.USE_RUNWAYML = True; logger.info("RunwayML Client already init.")
133
- else: logger.warning("RunwayML SDK not imported. Service disabled."); self.USE_RUNWAYML = False
134
- else: self.USE_RUNWAYML = False; self.runway_ml_sdk_client_instance = None; logger.info("RunwayML Disabled (no API key).")
135
 
136
- # ... (Paste the rest of the methods from the PREVIOUS full visual_engine.py here:
137
- # _image_to_data_uri,
138
- # _map_resolution_to_runway_ratio,
139
- # _get_text_dimensions,
140
- # _create_placeholder_image_content, (ensure it's the corrected one)
141
- # _search_pexels_image, (ensure it's the corrected one)
142
- # _generate_video_clip_with_runwayml,
143
- # _create_placeholder_video_content, (ensure it's the corrected one with try/finally)
144
- # generate_scene_asset, (ensure it's the corrected one with try/except in DALL-E loop)
145
- # generate_narration_audio, (ensure it's the corrected one with try/except)
146
- # assemble_animatic_from_assets (the one with extensive image debugging and pix_fmt)
147
- # )
148
- # For brevity, I'm only showing the __init__ fix in this block.
149
- # You must take the FULL content of all other methods from the last "expertly crafted" version
150
- # that we were working on for the video corruption and other syntax errors.
151
-
152
- # --- PASTE THE REST OF THE VisualEngine METHODS HERE ---
153
- # (This includes _image_to_data_uri, _map_resolution_to_runway_ratio, _get_text_dimensions,
154
- # _create_placeholder_image_content, _search_pexels_image,
155
- # _generate_video_clip_with_runwayml, _create_placeholder_video_content,
156
- # generate_scene_asset, generate_narration_audio, and assemble_animatic_from_assets)
157
- # Ensure you use the versions that had syntax errors fixed from previous responses.
158
-
159
- # Example of where to paste the other methods:
160
- def _image_to_data_uri(self, image_path):
161
- # <<< CORRECTED METHOD from previous response >>>
162
  try:
163
- mime_type, _ = mimetypes.guess_type(image_path)
164
- if not mime_type:
165
- ext = os.path.splitext(image_path)[1].lower()
166
- mime_map = {".png": "image/png", ".jpg": "image/jpeg", ".jpeg": "image/jpeg", ".webp": "image/webp"}
167
- mime_type = mime_map.get(ext, "application/octet-stream")
168
- if mime_type == "application/octet-stream":
169
- logger.warning(f"Could not determine MIME type for {image_path} from extension '{ext}', using default {mime_type}.")
170
-
171
- with open(image_path, "rb") as image_file_handle:
172
- image_binary_data = image_file_handle.read()
173
-
174
- encoded_base64_string = base64.b64encode(image_binary_data).decode('utf-8')
175
-
176
- data_uri_string = f"data:{mime_type};base64,{encoded_base64_string}"
177
- logger.debug(f"Generated data URI for {os.path.basename(image_path)} (MIME: {mime_type}). Data URI starts with: {data_uri_string[:100]}...")
178
- return data_uri_string
179
- except FileNotFoundError:
180
- logger.error(f"Image file not found at path: '{image_path}' when trying to create data URI.")
181
- return None
182
- except Exception as e_data_uri_conversion:
183
- logger.error(f"Error converting image '{image_path}' to data URI: {e_data_uri_conversion}", exc_info=True)
184
- return None
185
 
186
- def _map_resolution_to_runway_ratio(self, width, height):
187
- ratio_str=f"{width}:{height}";supported_ratios_gen4=["1280:720","720:1280","1104:832","832:1104","960:960","1584:672"];
188
- if ratio_str in supported_ratios_gen4:return ratio_str
189
- logger.warning(f"Res {ratio_str} not in Gen-4 list. Default 1280:720.");return "1280:720"
190
 
191
- def _get_text_dimensions(self, text_content, font_object_pil):
192
- default_h = getattr(font_object_pil, 'size', self.active_font_size_pil)
193
- if not text_content: return 0, default_h
194
  try:
195
- if hasattr(font_object_pil,'getbbox'): bbox=font_object_pil.getbbox(text_content);w=bbox[2]-bbox[0];h=bbox[3]-bbox[1]; return w, h if h > 0 else default_h
196
- elif hasattr(font_object_pil,'getsize'): w,h=font_object_pil.getsize(text_content); return w, h if h > 0 else default_h
197
- else: return int(len(text_content)*default_h*0.6),int(default_h*1.2)
198
- except Exception as e_getdim: logger.warning(f"Error in _get_text_dimensions: {e_getdim}"); return int(len(text_content)*self.active_font_size_pil*0.6),int(self.active_font_size_pil*1.2)
199
 
200
- def _create_placeholder_image_content(self,text_description,filename,size=None):
201
- # (Corrected version from previous response)
202
- if size is None: size = self.video_frame_size
203
- img = Image.new('RGB', size, color=(20, 20, 40)); d = ImageDraw.Draw(img); padding = 25
204
- max_w = size[0] - (2 * padding); lines_for_placeholder = []
205
- if not text_description: text_description = "(Placeholder Image)"
206
- words_list = text_description.split(); current_line_buffer = ""
207
- for word_idx, word_item in enumerate(words_list):
208
- prospective_addition = word_item + (" " if word_idx < len(words_list) - 1 else "")
209
- test_line_candidate = current_line_buffer + prospective_addition
210
- current_w_text, _ = self._get_text_dimensions(test_line_candidate, self.active_font_pil)
211
- if current_w_text == 0 and test_line_candidate.strip(): current_w_text = len(test_line_candidate) * (self.active_font_size_pil * 0.6)
212
- if current_w_text <= max_w: current_line_buffer = test_line_candidate
213
  else:
214
- if current_line_buffer.strip(): lines_for_placeholder.append(current_line_buffer.strip())
215
- current_line_buffer = prospective_addition
216
- if current_line_buffer.strip(): lines_for_placeholder.append(current_line_buffer.strip())
217
- if not lines_for_placeholder and text_description:
218
- avg_char_w_est, _ = self._get_text_dimensions("W", self.active_font_pil); avg_char_w_est = avg_char_w_est or (self.active_font_size_pil * 0.6)
219
- chars_per_line_est = int(max_w / avg_char_w_est) if avg_char_w_est > 0 else 20
220
- lines_for_placeholder.append(text_description[:chars_per_line_est] + ("..." if len(text_description) > chars_per_line_est else ""))
221
- elif not lines_for_placeholder: lines_for_placeholder.append("(Placeholder Error)")
222
- _, single_h = self._get_text_dimensions("Ay", self.active_font_pil); single_h = single_h if single_h > 0 else self.active_font_size_pil + 2
223
- max_l = min(len(lines_for_placeholder), (size[1] - (2 * padding)) // (single_h + 2)) if single_h > 0 else 1; max_l = max(1, max_l)
224
- y_p = padding + (size[1] - (2 * padding) - max_l * (single_h + 2)) / 2.0
225
- for i_line in range(max_l):
226
- line_txt_content = lines_for_placeholder[i_line]; line_w_val, _ = self._get_text_dimensions(line_txt_content, self.active_font_pil)
227
- if line_w_val == 0 and line_txt_content.strip(): line_w_val = len(line_txt_content) * (self.active_font_size_pil * 0.6)
228
- x_p = (size[0] - line_w_val) / 2.0
229
- try: d.text((x_p, y_p), line_txt_content, font=self.active_font_pil, fill=(200, 200, 180))
230
- except Exception as e_draw: logger.error(f"Pillow d.text error: {e_draw} for '{line_txt_content}'")
231
- y_p += single_h + 2
232
- if i_line == 6 and max_l > 7:
233
- try: d.text((x_p, y_p), "...", font=self.active_font_pil, fill=(200, 200, 180))
234
- except Exception as e_elip: logger.error(f"Pillow d.text ellipsis error: {e_elip}"); break
235
- filepath_placeholder = os.path.join(self.output_dir, filename)
236
- try: img.save(filepath_placeholder); return filepath_placeholder
237
- except Exception as e_save: logger.error(f"Saving placeholder image '{filepath_placeholder}' error: {e_save}", exc_info=True); return None
238
 
239
- def _search_pexels_image(self, query_str, output_fn_base):
240
- # (Corrected version from previous response)
241
  if not self.USE_PEXELS or not self.pexels_api_key: return None
242
- http_headers = {"Authorization": self.pexels_api_key}
243
- http_params = {"query": query_str, "per_page": 1, "orientation": "landscape", "size": "large2x"}
244
- base_name_px, _ = os.path.splitext(output_fn_base)
245
- pexels_fn_str = base_name_px + f"_pexels_{random.randint(1000,9999)}.jpg"
246
- file_path_px = os.path.join(self.output_dir, pexels_fn_str)
247
  try:
248
- logger.info(f"Pexels: Searching for '{query_str}'")
249
- eff_query_px = " ".join(query_str.split()[:5])
250
- http_params["query"] = eff_query_px
251
- response_px = requests.get("https://api.pexels.com/v1/search", headers=http_headers, params=http_params, timeout=20)
252
- response_px.raise_for_status()
253
- data_px = response_px.json()
254
  if data_px.get("photos") and len(data_px["photos"]) > 0:
255
- photo_details_px = data_px["photos"][0]
256
- photo_url_px = photo_details_px.get("src", {}).get("large2x")
257
- if not photo_url_px: logger.warning(f"Pexels: 'large2x' URL missing for '{eff_query_px}'. Details: {photo_details_px}"); return None
258
- image_response_px = requests.get(photo_url_px, timeout=60); image_response_px.raise_for_status()
259
- img_pil_data_px = Image.open(io.BytesIO(image_response_px.content))
260
- if img_pil_data_px.mode != 'RGB': img_pil_data_px = img_pil_data_px.convert('RGB')
261
- img_pil_data_px.save(file_path_px); logger.info(f"Pexels: Image saved to {file_path_px}"); return file_path_px
262
- else: logger.info(f"Pexels: No photos for '{eff_query_px}'."); return None
263
- except requests.exceptions.RequestException as e_req_px: logger.error(f"Pexels: RequestException for '{query_str}': {e_req_px}", exc_info=False); return None
264
- except Exception as e_px_gen: logger.error(f"Pexels: General error for '{query_str}': {e_px_gen}", exc_info=True); return None
265
 
266
- def _generate_video_clip_with_runwayml(self, text_prompt_for_motion, input_image_path, scene_identifier_filename_base, target_duration_seconds=5):
267
- # (Updated RunwayML integration from before)
268
- if not self.USE_RUNWAYML or not self.runway_ml_sdk_client_instance: logger.warning("RunwayML not enabled/client not init. Skip video."); return None
269
- if not input_image_path or not os.path.exists(input_image_path): logger.error(f"Runway Gen-4 needs input image. Path invalid: {input_image_path}"); return None
270
- image_data_uri_str = self._image_to_data_uri(input_image_path)
271
- if not image_data_uri_str: return None
272
- runway_dur = 10 if target_duration_seconds >= 8 else 5
273
- runway_ratio = self._map_resolution_to_runway_ratio(self.video_frame_size[0], self.video_frame_size[1])
274
- base_name_for_runway_vid, _ = os.path.splitext(scene_identifier_filename_base); output_vid_fn = base_name_for_runway_vid + f"_runway_gen4_d{runway_dur}s.mp4"
275
- output_vid_fp = os.path.join(self.output_dir, output_vid_fn)
276
- logger.info(f"Runway Gen-4 task: motion='{text_prompt_for_motion[:100]}...', img='{os.path.basename(input_image_path)}', dur={runway_dur}s, ratio='{runway_ratio}'")
277
  try:
278
- task_submitted_runway = self.runway_ml_sdk_client_instance.image_to_video.create(model='gen4_turbo', prompt_image=image_data_uri_str, prompt_text=text_prompt_for_motion, duration=runway_dur, ratio=runway_ratio)
279
- task_id_runway = task_submitted_runway.id; logger.info(f"Runway Gen-4 task ID: {task_id_runway}. Polling...")
280
- poll_sec=10; max_poll_count=36; poll_start_time = time.time()
281
- while time.time() - poll_start_time < max_poll_count * poll_sec:
282
- time.sleep(poll_sec); task_details_runway = self.runway_ml_sdk_client_instance.tasks.retrieve(id=task_id_runway)
283
- logger.info(f"Runway task {task_id_runway} status: {task_details_runway.status}")
284
- if task_details_runway.status == 'SUCCEEDED':
285
- output_url_runway = getattr(getattr(task_details_runway,'output',None),'url',None) or \
286
- (getattr(task_details_runway,'artifacts',None) and task_details_runway.artifacts and hasattr(task_details_runway.artifacts[0],'url')and task_details_runway.artifacts[0].url) or \
287
- (getattr(task_details_runway,'artifacts',None) and task_details_runway.artifacts and hasattr(task_details_runway.artifacts[0],'download_url')and task_details_runway.artifacts[0].download_url)
288
- if not output_url_runway: logger.error(f"Runway task {task_id_runway} SUCCEEDED, but no output URL. Details: {vars(task_details_runway) if hasattr(task_details_runway,'__dict__') else task_details_runway}"); return None
289
- logger.info(f"Runway task {task_id_runway} SUCCEEDED. Downloading: {output_url_runway}")
290
- video_resp_get = requests.get(output_url_runway, stream=True, timeout=300); video_resp_get.raise_for_status()
291
- with open(output_vid_fp,'wb') as f_vid:
292
- for chunk_data in video_resp_get.iter_content(chunk_size=8192): f_vid.write(chunk_data)
293
- logger.info(f"Runway Gen-4 video saved: {output_vid_fp}"); return output_vid_fp
294
- elif task_details_runway.status in ['FAILED','ABORTED','ERROR']:
295
- err_msg_runway = getattr(task_details_runway,'error_message',None) or getattr(getattr(task_details_runway,'output',None),'error',"Unknown Runway error.")
296
- logger.error(f"Runway task {task_id_runway} status: {task_details_runway.status}. Error: {err_msg_runway}"); return None
297
- logger.warning(f"Runway task {task_id_runway} timed out."); return None
298
- except AttributeError as ae_sdk: logger.error(f"RunwayML SDK AttrError: {ae_sdk}. SDK/methods changed?", exc_info=True); return None
299
- except Exception as e_runway_gen: logger.error(f"Runway Gen-4 API error: {e_runway_gen}", exc_info=True); return None
300
 
301
- def _create_placeholder_video_content(self, text_desc_ph, filename_ph, duration_ph=4, size_ph=None):
302
- # (Corrected from previous response)
303
- if size_ph is None: size_ph = self.video_frame_size
304
- filepath_ph = os.path.join(self.output_dir, filename_ph)
305
- text_clip_ph = None
306
- try:
307
- text_clip_ph = TextClip(text_desc_ph, fontsize=50, color='white', font=self.video_overlay_font,
308
- bg_color='black', size=size_ph, method='caption').set_duration(duration_ph)
309
- text_clip_ph.write_videofile(filepath_ph, fps=24, codec='libx264', preset='ultrafast', logger=None, threads=2)
310
- logger.info(f"Generic placeholder video created: {filepath_ph}")
311
- return filepath_ph
312
- except Exception as e_ph_vid:
313
- logger.error(f"Failed to create generic placeholder video '{filepath_ph}': {e_ph_vid}", exc_info=True)
314
- return None
315
  finally:
316
- if text_clip_ph and hasattr(text_clip_ph, 'close'):
317
- try: text_clip_ph.close()
318
  except Exception as e_cl_phv: logger.warning(f"Ignoring error closing placeholder TextClip: {e_cl_phv}")
319
 
320
- def generate_scene_asset(self, image_generation_prompt_text, motion_prompt_text_for_video,
321
- scene_data_dict, scene_identifier_fn_base,
322
- generate_as_video_clip_flag=False, runway_target_dur_val=5):
323
- # (Corrected DALL-E loop from previous response)
324
- base_name_asset, _ = os.path.splitext(scene_identifier_fn_base)
325
- asset_info_result = {'path': None, 'type': 'none', 'error': True, 'prompt_used': image_generation_prompt_text, 'error_message': 'Asset generation init failed'}
326
- path_for_input_image_runway = None
327
- fn_for_base_image = base_name_asset + ("_base_for_video.png" if generate_as_video_clip_flag else ".png")
328
- fp_for_base_image = os.path.join(self.output_dir, fn_for_base_image)
329
  if self.USE_AI_IMAGE_GENERATION and self.openai_api_key:
330
- max_r_dalle, attempt_count_dalle = 2,0;
331
- for att_n_dalle in range(max_r_dalle):
332
- attempt_count_dalle = att_n_dalle + 1
333
- try:
334
- logger.info(f"Att {attempt_count_dalle} DALL-E (base img): {image_generation_prompt_text[:70]}..."); oai_cl = openai.OpenAI(api_key=self.openai_api_key,timeout=90.0); oai_r = oai_cl.images.generate(model=self.dalle_model,prompt=image_generation_prompt_text,n=1,size=self.image_size_dalle3,quality="hd",response_format="url",style="vivid"); oai_iu = oai_r.data[0].url; oai_rp = getattr(oai_r.data[0],'revised_prompt',None);
335
- if oai_rp: logger.info(f"DALL-E revised: {oai_rp[:70]}...")
336
- oai_ir = requests.get(oai_iu,timeout=120); oai_ir.raise_for_status(); oai_id = Image.open(io.BytesIO(oai_ir.content));
337
- if oai_id.mode!='RGB': oai_id=oai_id.convert('RGB')
338
- oai_id.save(fp_for_base_image); logger.info(f"DALL-E base img saved: {fp_for_base_image}"); path_for_input_image_runway=fp_for_base_image; asset_info_result={'path':fp_for_base_image,'type':'image','error':False,'prompt_used':image_generation_prompt_text,'revised_prompt':oai_rp}; break
339
- except openai.RateLimitError as e_oai_rl: logger.warning(f"OpenAI RateLimit Att {attempt_count_dalle}:{e_oai_rl}.Retry...");time.sleep(5*attempt_count_dalle);asset_info_result['error_message']=str(e_oai_rl)
340
- except openai.APIError as e_oai_api: logger.error(f"OpenAI APIError Att {attempt_count_dalle}:{e_oai_api}");asset_info_result['error_message']=str(e_oai_api);break
341
- except requests.exceptions.RequestException as e_oai_req: logger.error(f"Requests Err DALL-E Att {attempt_count_dalle}:{e_oai_req}");asset_info_result['error_message']=str(e_oai_req);break
342
- except Exception as e_oai_gen: logger.error(f"General DALL-E Err Att {attempt_count_dalle}:{e_oai_gen}",exc_info=True);asset_info_result['error_message']=str(e_oai_gen);break
343
- if asset_info_result['error']: logger.warning(f"DALL-E failed after {attempt_count_dalle} attempts for base img.")
344
- if asset_info_result['error'] and self.USE_PEXELS:
345
- logger.info("Trying Pexels for base img.");px_qt=scene_data_dict.get('pexels_search_query_감독',f"{scene_data_dict.get('emotional_beat','')} {scene_data_dict.get('setting_description','')}");px_pp=self._search_pexels_image(px_qt,fn_for_base_image);
346
- if px_pp:path_for_input_image_runway=px_pp;asset_info_result={'path':px_pp,'type':'image','error':False,'prompt_used':f"Pexels:{px_qt}"}
347
- else:current_em_px=asset_info_result.get('error_message',"");asset_info_result['error_message']=(current_em_px+" Pexels failed for base.").strip()
348
- if asset_info_result['error']:
349
- logger.warning("Base img (DALL-E/Pexels) failed. Using placeholder.");ph_ppt=asset_info_result.get('prompt_used',image_generation_prompt_text);php=self._create_placeholder_image_content(f"[Base Placeholder]{ph_ppt[:70]}...",fn_for_base_image);
350
- if php:path_for_input_image_runway=php;asset_info_result={'path':php,'type':'image','error':False,'prompt_used':ph_ppt}
351
- else:current_em_ph=asset_info_result.get('error_message',"");asset_info_result['error_message']=(current_em_ph+" Base placeholder failed.").strip()
352
- if generate_as_video_clip_flag:
353
- if not path_for_input_image_runway:logger.error("RunwayML video: base img failed.");asset_info_result['error']=True;asset_info_result['error_message']=(asset_info_result.get('error_message',"")+" Base img miss, Runway abort.").strip();asset_info_result['type']='none';return asset_info_result
354
  if self.USE_RUNWAYML:
355
- runway_video_p=self._generate_video_clip_with_runwayml(motion_prompt_text_for_video,path_for_input_image_runway,base_name_asset,runway_target_dur_val)
356
- if runway_video_p and os.path.exists(runway_video_p):asset_info_result={'path':runway_video_p,'type':'video','error':False,'prompt_used':motion_prompt_text_for_video,'base_image_path':path_for_input_image_runway}
357
- else:logger.warning(f"RunwayML video failed for {base_name_asset}. Fallback to base img.");asset_info_result['error']=True;asset_info_result['error_message']=(asset_info_result.get('error_message',"Base img ok.")+" RunwayML video fail; use base img.").strip();asset_info_result['path']=path_for_input_image_runway;asset_info_result['type']='image';asset_info_result['prompt_used']=image_generation_prompt_text
358
- else:logger.warning("RunwayML selected but disabled. Use base img.");asset_info_result['error']=True;asset_info_result['error_message']=(asset_info_result.get('error_message',"Base img ok.")+" RunwayML disabled; use base img.").strip();asset_info_result['path']=path_for_input_image_runway;asset_info_result['type']='image';asset_info_result['prompt_used']=image_generation_prompt_text
359
- return asset_info_result
360
 
361
- def generate_narration_audio(self, text_to_narrate, output_filename="narration_overall.mp3"):
362
  # (Corrected version from previous response)
363
- if not self.USE_ELEVENLABS or not self.elevenlabs_client_instance or not text_to_narrate:
364
- logger.info("ElevenLabs conditions not met. Skipping audio generation.")
365
- return None
366
- audio_filepath_narration = os.path.join(self.output_dir, output_filename)
367
  try:
368
- logger.info(f"Generating ElevenLabs audio (Voice ID: {self.elevenlabs_voice_id}) for text: \"{text_to_narrate[:70]}...\"")
369
- audio_stream_method_11l = None
370
- if hasattr(self.elevenlabs_client_instance, 'text_to_speech') and hasattr(self.elevenlabs_client_instance.text_to_speech, 'stream'):
371
- audio_stream_method_11l = self.elevenlabs_client_instance.text_to_speech.stream; logger.info("Using ElevenLabs SDK method: client.text_to_speech.stream()")
372
- elif hasattr(self.elevenlabs_client_instance, 'generate_stream'):
373
- audio_stream_method_11l = self.elevenlabs_client_instance.generate_stream; logger.info("Using ElevenLabs SDK method: client.generate_stream()")
374
- elif hasattr(self.elevenlabs_client_instance, 'generate'):
375
- logger.info("Using ElevenLabs SDK method: client.generate() (non-streaming).")
376
- voice_param_11l = str(self.elevenlabs_voice_id)
377
- if Voice and self.elevenlabs_voice_settings_obj: voice_param_11l = Voice(voice_id=str(self.elevenlabs_voice_id), settings=self.elevenlabs_voice_settings_obj)
378
- audio_bytes_data = self.elevenlabs_client_instance.generate(text=text_to_narrate, voice=voice_param_11l, model="eleven_multilingual_v2")
379
- with open(audio_filepath_narration, "wb") as audio_file_out: audio_file_out.write(audio_bytes_data)
380
- logger.info(f"ElevenLabs audio (non-streamed) saved successfully to: {audio_filepath_narration}"); return audio_filepath_narration
381
- else: logger.error("No recognized audio generation method found on the ElevenLabs client instance."); return None
382
-
383
- if audio_stream_method_11l:
384
- params_for_voice_stream = {"voice_id": str(self.elevenlabs_voice_id)}
385
  if self.elevenlabs_voice_settings_obj:
386
- if hasattr(self.elevenlabs_voice_settings_obj, 'model_dump'): params_for_voice_stream["voice_settings"] = self.elevenlabs_voice_settings_obj.model_dump()
387
- elif hasattr(self.elevenlabs_voice_settings_obj, 'dict'): params_for_voice_stream["voice_settings"] = self.elevenlabs_voice_settings_obj.dict()
388
- else: params_for_voice_stream["voice_settings"] = self.elevenlabs_voice_settings_obj
389
- audio_data_iterator_11l = audio_stream_method_11l(text=text_to_narrate, model_id="eleven_multilingual_v2", **params_for_voice_stream)
390
- with open(audio_filepath_narration, "wb") as audio_file_out_stream:
391
- for audio_chunk_data in audio_data_iterator_11l:
392
- if audio_chunk_data: audio_file_out_stream.write(audio_chunk_data)
393
- logger.info(f"ElevenLabs audio (streamed) saved successfully to: {audio_filepath_narration}"); return audio_filepath_narration
394
- except AttributeError as ae_11l_sdk: logger.error(f"AttributeError with ElevenLabs SDK client: {ae_11l_sdk}. SDK version/methods might differ.", exc_info=True); return None
395
- except Exception as e_11l_general_audio: logger.error(f"General error during ElevenLabs audio generation: {e_11l_general_audio}", exc_info=True); return None
396
 
397
  def assemble_animatic_from_assets(self, asset_data_list, overall_narration_path=None, output_filename="final_video.mp4", fps=24):
398
- # (Keep as in the version with robust image processing, C-contiguous array, debug saves, and pix_fmt)
 
399
  if not asset_data_list: logger.warning("No assets for animatic."); return None
400
  processed_moviepy_clips_list = []; narration_audio_clip_mvpy = None; final_video_output_clip = None
401
  logger.info(f"Assembling from {len(asset_data_list)} assets. Target Frame: {self.video_frame_size}.")
 
402
  for i_asset, asset_info_item_loop in enumerate(asset_data_list):
403
  path_of_asset, type_of_asset, duration_for_scene = asset_info_item_loop.get('path'), asset_info_item_loop.get('type'), asset_info_item_loop.get('duration', 4.5)
404
  num_of_scene, action_in_key = asset_info_item_loop.get('scene_num', i_asset + 1), asset_info_item_loop.get('key_action', '')
405
  logger.info(f"S{num_of_scene}: Path='{path_of_asset}', Type='{type_of_asset}', Dur='{duration_for_scene}'s")
 
406
  if not (path_of_asset and os.path.exists(path_of_asset)): logger.warning(f"S{num_of_scene}: Not found '{path_of_asset}'. Skip."); continue
407
  if duration_for_scene <= 0: logger.warning(f"S{num_of_scene}: Invalid duration ({duration_for_scene}s). Skip."); continue
408
- active_scene_clip = None
 
409
  try:
410
  if type_of_asset == 'image':
411
- opened_pil_img = Image.open(path_of_asset); logger.debug(f"S{num_of_scene}: Loaded img. Mode:{opened_pil_img.mode}, Size:{opened_pil_img.size}")
412
- converted_img_rgba = opened_pil_img.convert('RGBA') if opened_pil_img.mode != 'RGBA' else opened_pil_img.copy()
413
- thumbnailed_img = converted_img_rgba.copy(); resample_f = Image.Resampling.LANCZOS if hasattr(Image.Resampling,'LANCZOS') else Image.BILINEAR; thumbnailed_img.thumbnail(self.video_frame_size,resample_f)
414
- rgba_canvas = Image.new('RGBA',self.video_frame_size,(0,0,0,0)); pos_x,pos_y=(self.video_frame_size[0]-thumbnailed_img.width)//2,(self.video_frame_size[1]-thumbnailed_img.height)//2
415
- rgba_canvas.paste(thumbnailed_img,(pos_x,pos_y),thumbnailed_img)
416
- final_rgb_img_pil = Image.new("RGB",self.video_frame_size,(0,0,0)); final_rgb_img_pil.paste(rgba_canvas,mask=rgba_canvas.split()[3])
417
- debug_path_img_pre_numpy = os.path.join(self.output_dir,f"debug_PRE_NUMPY_S{num_of_scene}.png"); final_rgb_img_pil.save(debug_path_img_pre_numpy); logger.info(f"DEBUG: Saved PRE_NUMPY_S{num_of_scene} to {debug_path_img_pre_numpy}")
418
- numpy_frame_arr = np.array(final_rgb_img_pil,dtype=np.uint8);
419
- if not numpy_frame_arr.flags['C_CONTIGUOUS']: numpy_frame_arr=np.ascontiguousarray(numpy_frame_arr,dtype=np.uint8)
420
- logger.debug(f"S{num_of_scene}: NumPy for MoviePy. Shape:{numpy_frame_arr.shape}, DType:{numpy_frame_arr.dtype}, C-Contig:{numpy_frame_arr.flags['C_CONTIGUOUS']}")
421
- if numpy_frame_arr.size==0 or numpy_frame_arr.ndim!=3 or numpy_frame_arr.shape[2]!=3: logger.error(f"S{num_of_scene}: Invalid NumPy array for MoviePy. Skip."); continue
422
- base_image_clip = ImageClip(numpy_frame_arr,transparent=False).set_duration(duration_for_scene)
423
- debug_path_moviepy_frame=os.path.join(self.output_dir,f"debug_MOVIEPY_FRAME_S{num_of_scene}.png"); base_image_clip.save_frame(debug_path_moviepy_frame,t=0.1); logger.info(f"DEBUG: Saved MOVIEPY_FRAME_S{num_of_scene} to {debug_path_moviepy_frame}")
424
- fx_image_clip = base_image_clip
425
- try: scale_end_kb=random.uniform(1.03,1.08); fx_image_clip=base_image_clip.fx(vfx.resize,lambda t_val:1+(scale_end_kb-1)*(t_val/duration_for_scene) if duration_for_scene>0 else 1).set_position('center')
426
- except Exception as e_kb_fx: logger.error(f"S{num_of_scene} Ken Burns error: {e_kb_fx}",exc_info=False)
427
- active_scene_clip = fx_image_clip
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
428
  elif type_of_asset == 'video':
 
429
  source_video_clip_obj=None
430
  try:
 
431
  source_video_clip_obj=VideoFileClip(path_of_asset,target_resolution=(self.video_frame_size[1],self.video_frame_size[0])if self.video_frame_size else None, audio=False)
432
  temp_video_clip_obj_loop=source_video_clip_obj
433
  if source_video_clip_obj.duration!=duration_for_scene:
@@ -437,49 +358,59 @@ class VisualEngine:
437
  else:temp_video_clip_obj_loop=source_video_clip_obj.set_duration(source_video_clip_obj.duration);logger.info(f"S{num_of_scene} Video clip ({source_video_clip_obj.duration:.2f}s) shorter than target ({duration_for_scene:.2f}s).")
438
  active_scene_clip=temp_video_clip_obj_loop.set_duration(duration_for_scene)
439
  if active_scene_clip.size!=list(self.video_frame_size):active_scene_clip=active_scene_clip.resize(self.video_frame_size)
 
440
  except Exception as e_vid_load_loop:logger.error(f"S{num_of_scene} Video load error '{path_of_asset}':{e_vid_load_loop}",exc_info=True);continue
441
  finally:
442
- if source_video_clip_obj and source_video_clip_obj is not active_scene_clip and hasattr(source_video_clip_obj,'close'):source_video_clip_obj.close()
443
- else: logger.warning(f"S{num_of_scene} Unknown asset type '{type_of_asset}'. Skip."); continue
 
 
 
444
  if active_scene_clip and action_in_key:
445
  try:
446
- dur_text_overlay=min(active_scene_clip.duration-0.5,active_scene_clip.duration*0.8)if active_scene_clip.duration>0.5 else active_scene_clip.duration; start_text_overlay=0.25
447
- if dur_text_overlay > 0:
448
- text_clip_for_overlay=TextClip(f"Scene {num_of_scene}\n{action_in_key}",fontsize=self.VIDEO_OVERLAY_FONT_SIZE,color=self.VIDEO_OVERLAY_FONT_COLOR,font=self.active_moviepy_font_name,bg_color='rgba(10,10,20,0.7)',method='caption',align='West',size=(self.video_frame_size[0]*0.9,None),kerning=-1,stroke_color='black',stroke_width=1.5).set_duration(dur_text_overlay).set_start(start_text_overlay).set_position(('center',0.92),relative=True)
449
- active_scene_clip=CompositeVideoClip([active_scene_clip,text_clip_for_overlay],size=self.video_frame_size,use_bgclip=True)
450
- else: logger.warning(f"S{num_of_scene}: Text overlay duration zero. Skip text.")
451
- except Exception as e_txt_comp:logger.error(f"S{num_of_scene} TextClip error:{e_txt_comp}. No text.",exc_info=True)
452
- if active_scene_clip:processed_moviepy_clips_list.append(active_scene_clip);logger.info(f"S{num_of_scene} Processed. Dur:{active_scene_clip.duration:.2f}s.")
453
- except Exception as e_asset_loop_main:logger.error(f"MAJOR Error processing asset for S{num_of_scene} ({path_of_asset}):{e_asset_loop_main}",exc_info=True)
 
454
  finally:
455
  if active_scene_clip and hasattr(active_scene_clip,'close'):
456
  try: active_scene_clip.close()
457
- except: pass
458
- if not processed_moviepy_clips_list:logger.warning("No clips processed for animatic. Aborting.");return None
 
459
  transition_duration_val=0.75
460
  try:
461
- logger.info(f"Concatenating {len(processed_moviepy_clips_list)} clips for final animatic.");
462
- if len(processed_moviepy_clips_list)>1:final_video_output_clip=concatenate_videoclips(processed_moviepy_clips_list,padding=-transition_duration_val if transition_duration_val>0 else 0,method="compose")
463
- elif processed_moviepy_clips_list:final_video_output_clip=processed_moviepy_clips_list[0]
464
- if not final_video_output_clip:logger.error("Concatenation resulted in a None clip. Aborting.");return None
465
- logger.info(f"Concatenated animatic duration:{final_video_output_clip.duration:.2f}s")
466
- if transition_duration_val>0 and final_video_output_clip.duration>0:
467
- if final_video_output_clip.duration>transition_duration_val*2:final_video_output_clip=final_video_output_clip.fx(vfx.fadein,transition_duration_val).fx(vfx.fadeout,transition_duration_val)
468
- else:final_video_output_clip=final_video_output_clip.fx(vfx.fadein,min(transition_duration_val,final_video_output_clip.duration/2.0))
469
- if overall_narration_path and os.path.exists(overall_narration_path) and final_video_output_clip.duration>0:
470
- try:narration_audio_clip_mvpy=AudioFileClip(overall_narration_path);final_video_output_clip=final_video_output_clip.set_audio(narration_audio_clip_mvpy);logger.info("Overall narration added to animatic.")
471
- except Exception as e_narr_add:logger.error(f"Error adding narration to animatic:{e_narr_add}",exc_info=True)
472
- elif final_video_output_clip.duration<=0:logger.warning("Animatic has no duration. Audio not added.")
473
- if final_video_output_clip and final_video_output_clip.duration>0:
474
- final_output_path_str=os.path.join(self.output_dir,output_filename);logger.info(f"Writing final animatic video to:{final_output_path_str} (Duration:{final_video_output_clip.duration:.2f}s)")
475
- final_video_output_clip.write_videofile(final_output_path_str,fps=fps,codec='libx264',preset='medium',audio_codec='aac',temp_audiofile=os.path.join(self.output_dir,f'temp-audio-{os.urandom(4).hex()}.m4a'),remove_temp=True,threads=os.cpu_count()or 2,logger='bar',bitrate="5000k",ffmpeg_params=["-pix_fmt", "yuv420p"])
476
- logger.info(f"Animatic video created successfully:{final_output_path_str}");return final_output_path_str
477
- else:logger.error("Final animatic clip is invalid or has zero duration. Cannot write video file.");return None
478
- except Exception as e_vid_write_final:logger.error(f"Error during final animatic video file writing or composition:{e_vid_write_final}",exc_info=True);return None
 
 
479
  finally:
480
  logger.debug("Closing all MoviePy clips in `assemble_animatic_from_assets` main finally block.")
481
- all_clips_to_close_list = processed_moviepy_clips_list + ([narration_audio_clip_mvpy] if narration_audio_clip_mvpy else []) + ([final_video_output_clip] if final_video_output_clip else [])
482
- for clip_item_to_close in all_clips_to_close_list:
483
- if clip_item_to_close and hasattr(clip_item_to_close, 'close'):
484
- try: clip_item_to_close.close()
485
- except Exception as e_final_close: logger.warning(f"Ignoring error while closing a MoviePy clip: {type(clip_item_to_close).__name__} - {e_final_close}")
 
 
 
4
  import mimetypes
5
  import numpy as np
6
  import os
7
+ import openai
8
  import requests
9
  import io
10
  import time
11
  import random
12
  import logging
13
 
 
14
  from moviepy.editor import (ImageClip, VideoFileClip, concatenate_videoclips, TextClip,
15
  CompositeVideoClip, AudioFileClip)
16
  import moviepy.video.fx.all as vfx
17
 
18
+ try: # MONKEY PATCH
19
+ if hasattr(Image, 'Resampling') and hasattr(Image.Resampling, 'LANCZOS'):
 
20
  if not hasattr(Image, 'ANTIALIAS'): Image.ANTIALIAS = Image.Resampling.LANCZOS
21
+ elif hasattr(Image, 'LANCZOS'):
22
  if not hasattr(Image, 'ANTIALIAS'): Image.ANTIALIAS = Image.LANCZOS
23
+ elif not hasattr(Image, 'ANTIALIAS'): print("WARNING: Pillow ANTIALIAS/Resampling issue.")
24
+ except Exception as e_mp: print(f"WARNING: ANTIALIAS patch error: {e_mp}")
 
 
25
 
26
  logger = logging.getLogger(__name__)
27
+ # logger.setLevel(logging.DEBUG)
28
 
 
29
  ELEVENLABS_CLIENT_IMPORTED = False; ElevenLabsAPIClient = None; Voice = None; VoiceSettings = None
30
  try:
31
  from elevenlabs.client import ElevenLabs as ImportedElevenLabsClient
32
  from elevenlabs import Voice as ImportedVoice, VoiceSettings as ImportedVoiceSettings
33
  ElevenLabsAPIClient = ImportedElevenLabsClient; Voice = ImportedVoice; VoiceSettings = ImportedVoiceSettings
34
+ ELEVENLABS_CLIENT_IMPORTED = True; logger.info("ElevenLabs client components imported.")
35
+ except Exception as e_11l_imp: logger.warning(f"ElevenLabs client import failed: {e_11l_imp}. Audio disabled.")
 
36
 
37
  RUNWAYML_SDK_IMPORTED = False; RunwayMLAPIClientClass = None
38
  try:
39
  from runwayml import RunwayML as ImportedRunwayMLAPIClientClass
40
  RunwayMLAPIClientClass = ImportedRunwayMLAPIClientClass; RUNWAYML_SDK_IMPORTED = True
41
+ logger.info("RunwayML SDK imported.")
42
+ except Exception as e_rwy_imp: logger.warning(f"RunwayML SDK import failed: {e_rwy_imp}. RunwayML disabled.")
 
 
43
 
44
  class VisualEngine:
45
  DEFAULT_FONT_SIZE_PIL = 10; PREFERRED_FONT_SIZE_PIL = 20
46
  VIDEO_OVERLAY_FONT_SIZE = 30; VIDEO_OVERLAY_FONT_COLOR = 'white'
47
  DEFAULT_MOVIEPY_FONT = 'DejaVu-Sans-Bold'; PREFERRED_MOVIEPY_FONT = 'Liberation-Sans-Bold'
48
 
 
49
  def __init__(self, output_dir="temp_cinegen_media", default_elevenlabs_voice_id="Rachel"):
50
+ self.output_dir = output_dir; os.makedirs(self.output_dir, exist_ok=True)
51
+ self.font_filename_pil_preference = "DejaVuSans-Bold.ttf"
52
+ font_paths = [ self.font_filename_pil_preference, f"/usr/share/fonts/truetype/dejavu/{self.font_filename_pil_preference}", f"/usr/share/fonts/truetype/liberation/LiberationSans-Bold.ttf", f"/System/Library/Fonts/Supplemental/Arial.ttf", f"C:/Windows/Fonts/arial.ttf", f"/usr/local/share/fonts/truetype/mycustomfonts/arial.ttf"]
53
+ self.resolved_font_path_pil = next((p for p in font_paths if os.path.exists(p)), None)
54
+ self.active_font_pil = ImageFont.load_default(); self.active_font_size_pil = self.DEFAULT_FONT_SIZE_PIL; self.active_moviepy_font_name = self.DEFAULT_MOVIEPY_FONT
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  if self.resolved_font_path_pil:
56
+ try: self.active_font_pil = ImageFont.truetype(self.resolved_font_path_pil, self.PREFERRED_FONT_SIZE_PIL); self.active_font_size_pil = self.PREFERRED_FONT_SIZE_PIL; logger.info(f"Pillow font: {self.resolved_font_path_pil} sz {self.active_font_size_pil}."); self.active_moviepy_font_name = 'DejaVu-Sans-Bold' if "dejavu" in self.resolved_font_path_pil.lower() else ('Liberation-Sans-Bold' if "liberation" in self.resolved_font_path_pil.lower() else self.DEFAULT_MOVIEPY_FONT)
57
+ except IOError as e_font: logger.error(f"Pillow font IOError '{self.resolved_font_path_pil}': {e_font}. Default.")
58
+ else: logger.warning("Preferred Pillow font not found. Default.")
59
+ self.openai_api_key = None; self.USE_AI_IMAGE_GENERATION = False; self.dalle_model = "dall-e-3"; self.image_size_dalle3 = "1792x1024"
 
 
 
 
 
 
 
 
 
60
  self.video_frame_size = (1280, 720)
61
+ self.elevenlabs_api_key = None; self.USE_ELEVENLABS = False; self.elevenlabs_client_instance = None; self.elevenlabs_voice_id = default_elevenlabs_voice_id
62
+ if VoiceSettings and ELEVENLABS_CLIENT_IMPORTED: self.elevenlabs_voice_settings_obj = VoiceSettings(stability=0.60, similarity_boost=0.80, style=0.15, use_speaker_boost=True)
 
 
 
63
  else: self.elevenlabs_voice_settings_obj = None
 
64
  self.pexels_api_key = None; self.USE_PEXELS = False
65
  self.runway_api_key = None; self.USE_RUNWAYML = False; self.runway_ml_sdk_client_instance = None
 
66
  if RUNWAYML_SDK_IMPORTED and RunwayMLAPIClientClass and os.getenv("RUNWAYML_API_SECRET"):
67
  try: self.runway_ml_sdk_client_instance = RunwayMLAPIClientClass(); self.USE_RUNWAYML = True; logger.info("RunwayML Client init from env var at startup.")
68
+ except Exception as e_rwy_init: logger.error(f"Initial RunwayML client init failed: {e_rwy_init}"); self.USE_RUNWAYML = False
69
+ logger.info("VisualEngine initialized.")
 
70
 
71
+ def set_openai_api_key(self, k): self.openai_api_key=k; self.USE_AI_IMAGE_GENERATION=bool(k); logger.info(f"DALL-E: {'Ready' if self.USE_AI_IMAGE_GENERATION else 'Disabled'}")
72
+ def set_elevenlabs_api_key(self, k, vid=None):
73
+ self.elevenlabs_api_key=k;
74
+ if vid: self.elevenlabs_voice_id = vid; logger.info(f"11L Voice ID updated to: {vid}")
75
+ if k and ELEVENLABS_CLIENT_IMPORTED and ElevenLabsAPIClient:
76
+ try: self.elevenlabs_client_instance = ElevenLabsAPIClient(api_key=k); self.USE_ELEVENLABS=True; logger.info(f"11L Client: Ready (Voice:{self.elevenlabs_voice_id})")
77
+ except Exception as e: logger.error(f"11L client init err: {e}. Disabled.", exc_info=True); self.USE_ELEVENLABS=False; self.elevenlabs_client_instance=None
 
78
  else: self.USE_ELEVENLABS = False; logger.info(f"11L Disabled (key/SDK).")
79
+ def set_pexels_api_key(self, k): self.pexels_api_key=k; self.USE_PEXELS=bool(k); logger.info(f"Pexels: {'Ready' if self.USE_PEXELS else 'Disabled'}")
80
+ def set_runway_api_key(self, k):
81
+ self.runway_api_key = k
82
+ if k:
 
 
83
  if RUNWAYML_SDK_IMPORTED and RunwayMLAPIClientClass:
84
  if not self.runway_ml_sdk_client_instance:
85
  try:
86
+ orig_secret = os.getenv("RUNWAYML_API_SECRET")
87
+ if not orig_secret: os.environ["RUNWAYML_API_SECRET"]=k; logger.info("Temp set RUNWAYML_API_SECRET for SDK.")
88
+ self.runway_ml_sdk_client_instance=RunwayMLAPIClientClass(); self.USE_RUNWAYML=True; logger.info("RunwayML Client init via set_key.")
89
+ if not orig_secret: del os.environ["RUNWAYML_API_SECRET"]; logger.info("Cleared temp RUNWAYML_API_SECRET.")
90
+ except Exception as e: logger.error(f"RunwayML Client init in set_key fail: {e}", exc_info=True); self.USE_RUNWAYML=False;self.runway_ml_sdk_client_instance=None
91
+ else: self.USE_RUNWAYML=True; logger.info("RunwayML Client already init.")
92
+ else: logger.warning("RunwayML SDK not imported. Disabled."); self.USE_RUNWAYML=False
93
+ else: self.USE_RUNWAYML=False; self.runway_ml_sdk_client_instance=None; logger.info("RunwayML Disabled (no key).")
94
 
95
+ def _image_to_data_uri(self, img_path):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
  try:
97
+ mime, _ = mimetypes.guess_type(img_path)
98
+ if not mime: ext=os.path.splitext(img_path)[1].lower(); mime_map={".png":"image/png",".jpg":"image/jpeg",".jpeg":"image/jpeg",".webp":"image/webp"}; mime=mime_map.get(ext,"application/octet-stream");
99
+ if mime=="application/octet-stream": logger.warning(f"Unknown MIME for {img_path}, using {mime}.")
100
+ with open(img_path,"rb") as f_img: enc_str=base64.b64encode(f_img.read()).decode('utf-8')
101
+ uri=f"data:{mime};base64,{enc_str}"; logger.debug(f"Data URI for {os.path.basename(img_path)} (MIME:{mime}): {uri[:100]}..."); return uri
102
+ except FileNotFoundError: logger.error(f"Img not found {img_path} for data URI."); return None
103
+ except Exception as e: logger.error(f"Error converting {img_path} to data URI:{e}",exc_info=True); return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
 
105
+ def _map_resolution_to_runway_ratio(self, w, h):
106
+ r_str=f"{w}:{h}"; supp_r=["1280:720","720:1280","1104:832","832:1104","960:960","1584:672"];
107
+ if r_str in supp_r: return r_str
108
+ logger.warning(f"Res {r_str} not in Gen-4 list. Default 1280:720."); return "1280:720"
109
 
110
+ def _get_text_dimensions(self, txt, font):
111
+ dh=getattr(font,'size',self.active_font_size_pil);
112
+ if not txt: return 0,dh
113
  try:
114
+ if hasattr(font,'getbbox'):b=font.getbbox(txt);w=b[2]-b[0];h=b[3]-b[1];return w,h if h>0 else dh
115
+ elif hasattr(font,'getsize'):w,h=font.getsize(txt);return w,h if h>0 else dh
116
+ else: return int(len(txt)*dh*0.6),int(dh*1.2)
117
+ except Exception as e:logger.warning(f"Err _get_text_dimensions:{e}");return int(len(txt)*self.active_font_size_pil*0.6),int(self.active_font_size_pil*1.2)
118
 
119
+ def _create_placeholder_image_content(self, desc, fname, sz=None):
120
+ # (Keep robust placeholder logic from before)
121
+ if sz is None: sz=self.video_frame_size; img=Image.new('RGB',sz,color=(20,20,40));drw=ImageDraw.Draw(img);pad=25;maxw=sz[0]-(2*pad);lns=[]
122
+ if not desc: desc="(Placeholder)"
123
+ wds=desc.split();curr_ln=""
124
+ for idx,w in enumerate(wds):
125
+ prosp_add=w+(" "if idx<len(wds)-1 else"");test_ln=curr_ln+prosp_add
126
+ curr_w,_=self._get_text_dimensions(test_ln,self.active_font_pil)
127
+ if curr_w==0 and test_ln.strip():curr_w=len(test_ln)*(self.active_font_size_pil*0.6)
128
+ if curr_w<=maxw:curr_ln=test_ln
 
 
 
129
  else:
130
+ if curr_ln.strip():lns.append(curr_ln.strip())
131
+ curr_ln=prosp_add
132
+ if curr_ln.strip():lns.append(curr_ln.strip())
133
+ if not lns and desc:
134
+ avg_cw,_=self._get_text_dimensions("W",self.active_font_pil);avg_cw=avg_cw or(self.active_font_size_pil*0.6)
135
+ cpl=int(maxw/avg_cw)if avg_cw>0 else 20;lns.append(desc[:cpl]+("..."if len(desc)>cpl else""))
136
+ elif not lns:lns.append("(PH Error)")
137
+ _,slh=self._get_text_dimensions("Ay",self.active_font_pil);slh=slh if slh>0 else self.active_font_size_pil+2
138
+ maxl=min(len(lns),(sz[1]-(2*pad))//(slh+2))if slh>0 else 1;maxl=max(1,maxl)
139
+ yp=pad+(sz[1]-(2*pad)-maxl*(slh+2))/2.0
140
+ for i in range(maxl):
141
+ lt=lns[i];lw,_=self._get_text_dimensions(lt,self.active_font_pil)
142
+ if lw==0 and lt.strip():lw=len(lt)*(self.active_font_size_pil*0.6)
143
+ xp=(sz[0]-lw)/2.0
144
+ try:drw.text((xp,yp),lt,font=self.active_font_pil,fill=(200,200,180))
145
+ except Exception as e:logger.error(f"Pillow d.text err:{e} for '{lt}'")
146
+ yp+=slh+2
147
+ if i==6 and maxl>7:
148
+ try:drw.text((xp,yp),"...",font=self.active_font_pil,fill=(200,200,180))
149
+ except Exception as e:logger.error(f"Pillow ellipsis err:{e}");break
150
+ fpath=os.path.join(self.output_dir,fname)
151
+ try:img.save(fpath);return fpath
152
+ except Exception as e:logger.error(f"Save PH img '{fpath}' err:{e}",exc_info=True);return None
 
153
 
154
+ def _search_pexels_image(self, q_str, out_fn_base):
155
+ # (Keep robust Pexels logic from before)
156
  if not self.USE_PEXELS or not self.pexels_api_key: return None
157
+ h={"Authorization":self.pexels_api_key};p={"query":q_str,"per_page":1,"orientation":"landscape","size":"large2x"}
158
+ base_n_px,_=os.path.splitext(out_fn_base);px_fn=base_n_px+f"_pexels_{random.randint(1000,9999)}.jpg";fp_px=os.path.join(self.output_dir,px_fn)
 
 
 
159
  try:
160
+ logger.info(f"Pexels: Search '{q_str}'");eff_q=" ".join(q_str.split()[:5]);p["query"]=eff_q
161
+ resp_px=requests.get("https://api.pexels.com/v1/search",headers=h,params=p,timeout=20);resp_px.raise_for_status();data_px=resp_px.json()
 
 
 
 
162
  if data_px.get("photos") and len(data_px["photos"]) > 0:
163
+ ph_det=data_px["photos"][0];ph_url=ph_det.get("src",{}).get("large2x")
164
+ if not ph_url:logger.warning(f"Pexels: 'large2x' URL missing for '{eff_q}'.");return None
165
+ img_resp=requests.get(ph_url,timeout=60);img_resp.raise_for_status();img_pil=Image.open(io.BytesIO(img_resp.content))
166
+ if img_pil.mode!='RGB':img_pil=img_pil.convert('RGB')
167
+ img_pil.save(fp_px);logger.info(f"Pexels: Saved to {fp_px}");return fp_px
168
+ else:logger.info(f"Pexels: No photos for '{eff_q}'.");return None
169
+ except requests.exceptions.RequestException as e:logger.error(f"Pexels ReqExc '{q_str}':{e}",exc_info=False);return None
170
+ except Exception as e:logger.error(f"Pexels GenErr '{q_str}':{e}",exc_info=True);return None
 
 
171
 
172
+ def _generate_video_clip_with_runwayml(self, motion_prompt, input_img_path, scene_id_base_fn, duration_s=5):
173
+ # (Keep robust RunwayML placeholder/integration logic from before)
174
+ if not self.USE_RUNWAYML or not self.runway_ml_sdk_client_instance: logger.warning("RunwayML skip: Not enabled/client not init."); return None
175
+ if not input_img_path or not os.path.exists(input_img_path): logger.error(f"Runway Gen-4 needs input img. Invalid: {input_img_path}"); return None
176
+ img_data_uri = self._image_to_data_uri(input_img_path);
177
+ if not img_data_uri: return None
178
+ rwy_dur = 10 if duration_s >= 8 else 5; rwy_ratio = self._map_resolution_to_runway_ratio(self.video_frame_size[0],self.video_frame_size[1])
179
+ rwy_base_name,_=os.path.splitext(scene_id_base_fn);rwy_out_fn=rwy_base_name+f"_runway_gen4_d{rwy_dur}s.mp4";rwy_out_fp=os.path.join(self.output_dir,rwy_out_fn)
180
+ logger.info(f"Runway Gen-4 task: motion='{motion_prompt[:70]}...', img='{os.path.basename(input_img_path)}', dur={rwy_dur}s, ratio='{rwy_ratio}'")
 
 
181
  try:
182
+ rwy_task_sub = self.runway_ml_sdk_client_instance.image_to_video.create(model='gen4_turbo',prompt_image=img_data_uri,prompt_text=motion_prompt,duration=rwy_dur,ratio=rwy_ratio)
183
+ rwy_task_id = rwy_task_sub.id; logger.info(f"Runway task ID: {rwy_task_id}. Polling...")
184
+ poll_s=10;max_p_count=36;poll_t_start=time.time()
185
+ while time.time()-poll_t_start < max_p_count*poll_s:
186
+ time.sleep(poll_s);rwy_task_det=self.runway_ml_sdk_client_instance.tasks.retrieve(id=rwy_task_id)
187
+ logger.info(f"Runway task {rwy_task_id} status: {rwy_task_det.status}")
188
+ if rwy_task_det.status=='SUCCEEDED':
189
+ rwy_out_url=getattr(getattr(rwy_task_det,'output',None),'url',None) or (getattr(rwy_task_det,'artifacts',None)and rwy_task_det.artifacts and hasattr(rwy_task_det.artifacts[0],'url')and rwy_task_det.artifacts[0].url) or (getattr(rwy_task_det,'artifacts',None)and rwy_task_det.artifacts and hasattr(rwy_task_det.artifacts[0],'download_url')and rwy_task_det.artifacts[0].download_url)
190
+ if not rwy_out_url:logger.error(f"Runway task {rwy_task_id} SUCCEEDED, no output URL. Details:{vars(rwy_task_det)if hasattr(rwy_task_det,'__dict__')else rwy_task_det}");return None
191
+ logger.info(f"Runway task {rwy_task_id} SUCCEEDED. Downloading: {rwy_out_url}")
192
+ vid_resp=requests.get(rwy_out_url,stream=True,timeout=300);vid_resp.raise_for_status()
193
+ with open(rwy_out_fp,'wb')as f:
194
+ for chk in vid_resp.iter_content(chunk_size=8192):f.write(chk)
195
+ logger.info(f"Runway Gen-4 video saved: {rwy_out_fp}");return rwy_out_fp
196
+ elif rwy_task_det.status in['FAILED','ABORTED','ERROR']:
197
+ rwy_err_msg=getattr(rwy_task_det,'error_message',None)or getattr(getattr(rwy_task_det,'output',None),'error',"Unknown Runway error.")
198
+ logger.error(f"Runway task {rwy_task_id} status:{rwy_task_det.status}. Error:{rwy_err_msg}");return None
199
+ logger.warning(f"Runway task {rwy_task_id} timed out.");return None
200
+ except AttributeError as e:logger.error(f"RunwayML SDK AttrError:{e}. SDK methods changed?",exc_info=True);return None
201
+ except Exception as e:logger.error(f"Runway Gen-4 API error:{e}",exc_info=True);return None
 
 
202
 
203
+ def _create_placeholder_video_content(self, text_desc, fname, duration=4, size=None):
204
+ # (Keep robust placeholder video logic from before)
205
+ if size is None: size = self.video_frame_size; fp = os.path.join(self.output_dir, fname); tc = None
206
+ try: tc = TextClip(text_desc, fontsize=50, color='white', font=self.video_overlay_font, bg_color='black', size=size, method='caption').set_duration(duration); tc.write_videofile(fp, fps=24, codec='libx264', preset='ultrafast', logger=None, threads=2); logger.info(f"Generic placeholder video: {fp}"); return fp
207
+ except Exception as e: logger.error(f"Generic placeholder video error {fp}: {e}", exc_info=True); return None
 
 
 
 
 
 
 
 
 
208
  finally:
209
+ if tc and hasattr(tc, 'close'):
210
+ try: tc.close()
211
  except Exception as e_cl_phv: logger.warning(f"Ignoring error closing placeholder TextClip: {e_cl_phv}")
212
 
213
+ def generate_scene_asset(self, img_prompt, motion_prompt, scene_dict, scene_id_fn_base, gen_as_vid=False, rwy_dur=5):
214
+ # (Keep robust asset generation logic from before, ensuring parameters match)
215
+ asset_base_name,_=os.path.splitext(scene_id_fn_base); asset_info_obj={'path':None,'type':'none','error':True,'prompt_used':img_prompt,'error_message':'Asset gen init failed'}; base_img_path_for_rwy=None
216
+ base_img_fn = asset_base_name + ("_base_for_video.png" if gen_as_vid else ".png"); base_img_fp = os.path.join(self.output_dir, base_img_fn)
 
 
 
 
 
217
  if self.USE_AI_IMAGE_GENERATION and self.openai_api_key:
218
+ max_r,att_c=2,0
219
+ for att_idx in range(max_r):
220
+ att_c=att_idx+1
221
+ try:
222
+ logger.info(f"Att {att_c} DALL-E (base img): {img_prompt[:70]}...");oai_client=openai.OpenAI(api_key=self.openai_api_key,timeout=90.0);oai_resp=oai_client.images.generate(model=self.dalle_model,prompt=img_prompt,n=1,size=self.image_size_dalle3,quality="hd",response_format="url",style="vivid");oai_url=oai_resp.data[0].url;oai_rev_p=getattr(oai_resp.data[0],'revised_prompt',None)
223
+ if oai_rev_p:logger.info(f"DALL-E revised: {oai_rev_p[:70]}...")
224
+ oai_img_get_resp=requests.get(oai_url,timeout=120);oai_img_get_resp.raise_for_status();oai_pil_img=Image.open(io.BytesIO(oai_img_get_resp.content))
225
+ if oai_pil_img.mode!='RGB':oai_pil_img=oai_pil_img.convert('RGB')
226
+ oai_pil_img.save(base_img_fp);logger.info(f"DALL-E base img saved: {base_img_fp}");base_img_path_for_rwy=base_img_fp;asset_info_obj={'path':base_img_fp,'type':'image','error':False,'prompt_used':img_prompt,'revised_prompt':oai_rev_p};break
227
+ except openai.RateLimitError as e:logger.warning(f"OpenAI RateLimit Att {att_c}:{e}.Retry...");time.sleep(5*att_c);asset_info_obj['error_message']=str(e)
228
+ except openai.APIError as e:logger.error(f"OpenAI APIError Att {att_c}:{e}");asset_info_obj['error_message']=str(e);break
229
+ except requests.exceptions.RequestException as e:logger.error(f"Requests Err DALL-E Att {att_c}:{e}");asset_info_obj['error_message']=str(e);break
230
+ except Exception as e:logger.error(f"General DALL-E Err Att {att_c}:{e}",exc_info=True);asset_info_obj['error_message']=str(e);break
231
+ if asset_info_obj['error']:logger.warning(f"DALL-E failed after {att_c} attempts for base img.")
232
+ if asset_info_obj['error'] and self.USE_PEXELS:
233
+ logger.info("Trying Pexels for base img.");px_q=scene_dict.get('pexels_search_query_감독',f"{scene_dict.get('emotional_beat','')} {scene_dict.get('setting_description','')}");px_p=self._search_pexels_image(px_q,base_img_fn)
234
+ if px_p:base_img_path_for_rwy=px_p;asset_info_obj={'path':px_p,'type':'image','error':False,'prompt_used':f"Pexels:{px_q}"}
235
+ else:curr_err=asset_info_obj.get('error_message',"");asset_info_obj['error_message']=(curr_err+" Pexels failed for base.").strip()
236
+ if asset_info_obj['error']:
237
+ logger.warning("Base img (DALL-E/Pexels) failed. Using placeholder.");ph_p_txt=asset_info_obj.get('prompt_used',img_prompt);ph_img_p=self._create_placeholder_image_content(f"[Base Placeholder]{ph_p_txt[:70]}...",base_img_fn)
238
+ if ph_img_p:base_img_path_for_rwy=ph_img_p;asset_info_obj={'path':ph_img_p,'type':'image','error':False,'prompt_used':ph_p_txt}
239
+ else:curr_err=asset_info_obj.get('error_message',"");asset_info_obj['error_message']=(curr_err+" Base placeholder failed.").strip()
240
+ if gen_as_vid:
241
+ if not base_img_path_for_rwy:logger.error("RunwayML video: base img failed.");asset_info_obj['error']=True;asset_info_obj['error_message']=(asset_info_obj.get('error_message',"")+" Base img miss, Runway abort.").strip();asset_info_obj['type']='none';return asset_info_obj
242
  if self.USE_RUNWAYML:
243
+ rwy_vid_p=self._generate_video_clip_with_runwayml(motion_prompt,base_img_path_for_rwy,asset_base_name,rwy_dur)
244
+ if rwy_vid_p and os.path.exists(rwy_vid_p):asset_info_obj={'path':rwy_vid_p,'type':'video','error':False,'prompt_used':motion_prompt,'base_image_path':base_img_path_for_rwy}
245
+ else:logger.warning(f"RunwayML video failed for {asset_base_name}. Fallback to base img.");asset_info_obj['error']=True;asset_info_obj['error_message']=(asset_info_obj.get('error_message',"Base img ok.")+" RunwayML video fail; use base img.").strip();asset_info_obj['path']=base_img_path_for_rwy;asset_info_obj['type']='image';asset_info_obj['prompt_used']=img_prompt
246
+ else:logger.warning("RunwayML selected but disabled. Use base img.");asset_info_obj['error']=True;asset_info_obj['error_message']=(asset_info_obj.get('error_message',"Base img ok.")+" RunwayML disabled; use base img.").strip();asset_info_obj['path']=base_img_path_for_rwy;asset_info_obj['type']='image';asset_info_obj['prompt_used']=img_prompt
247
+ return asset_info_obj
248
 
249
+ def generate_narration_audio(self, narration_text, output_fn="narration_overall.mp3"):
250
  # (Corrected version from previous response)
251
+ if not self.USE_ELEVENLABS or not self.elevenlabs_client_instance or not narration_text: logger.info("11L conditions not met. Skip audio."); return None
252
+ narration_fp = os.path.join(self.output_dir, output_fn)
 
 
253
  try:
254
+ logger.info(f"11L audio (Voice:{self.elevenlabs_voice_id}): \"{narration_text[:70]}...\"")
255
+ stream_method = None
256
+ if hasattr(self.elevenlabs_client_instance,'text_to_speech') and hasattr(self.elevenlabs_client_instance.text_to_speech,'stream'): stream_method=self.elevenlabs_client_instance.text_to_speech.stream; logger.info("Using 11L .text_to_speech.stream()")
257
+ elif hasattr(self.elevenlabs_client_instance,'generate_stream'): stream_method=self.elevenlabs_client_instance.generate_stream; logger.info("Using 11L .generate_stream()")
258
+ elif hasattr(self.elevenlabs_client_instance,'generate'):
259
+ logger.info("Using 11L .generate() (non-streaming).")
260
+ voice_p = Voice(voice_id=str(self.elevenlabs_voice_id),settings=self.elevenlabs_voice_settings_obj) if Voice and self.elevenlabs_voice_settings_obj else str(self.elevenlabs_voice_id)
261
+ audio_b = self.elevenlabs_client_instance.generate(text=narration_text,voice=voice_p,model="eleven_multilingual_v2")
262
+ with open(narration_fp,"wb") as f_audio: f_audio.write(audio_b); logger.info(f"11L audio (non-stream): {narration_fp}"); return narration_fp
263
+ else: logger.error("No recognized 11L audio method."); return None
264
+ if stream_method:
265
+ voice_stream_params={"voice_id":str(self.elevenlabs_voice_id)}
 
 
 
 
 
266
  if self.elevenlabs_voice_settings_obj:
267
+ if hasattr(self.elevenlabs_voice_settings_obj,'model_dump'): voice_stream_params["voice_settings"]=self.elevenlabs_voice_settings_obj.model_dump()
268
+ elif hasattr(self.elevenlabs_voice_settings_obj,'dict'): voice_stream_params["voice_settings"]=self.elevenlabs_voice_settings_obj.dict()
269
+ else: voice_stream_params["voice_settings"]=self.elevenlabs_voice_settings_obj
270
+ audio_iter = stream_method(text=narration_text,model_id="eleven_multilingual_v2",**voice_stream_params)
271
+ with open(narration_fp,"wb") as f_audio_stream:
272
+ for chunk_item in audio_iter:
273
+ if chunk_item: f_audio_stream.write(chunk_item)
274
+ logger.info(f"11L audio (stream): {narration_fp}"); return narration_fp
275
+ except AttributeError as e_11l_attr: logger.error(f"11L SDK AttrError: {e_11l_attr}. SDK/methods changed?", exc_info=True); return None
276
+ except Exception as e_11l_gen: logger.error(f"11L audio gen error: {e_11l_gen}", exc_info=True); return None
277
 
278
  def assemble_animatic_from_assets(self, asset_data_list, overall_narration_path=None, output_filename="final_video.mp4", fps=24):
279
+ # (Keep the version with robust image processing, C-contiguous array, debug saves, and pix_fmt)
280
+ # This is the most critical part for your "blank/corrupted video" issue.
281
  if not asset_data_list: logger.warning("No assets for animatic."); return None
282
  processed_moviepy_clips_list = []; narration_audio_clip_mvpy = None; final_video_output_clip = None
283
  logger.info(f"Assembling from {len(asset_data_list)} assets. Target Frame: {self.video_frame_size}.")
284
+
285
  for i_asset, asset_info_item_loop in enumerate(asset_data_list):
286
  path_of_asset, type_of_asset, duration_for_scene = asset_info_item_loop.get('path'), asset_info_item_loop.get('type'), asset_info_item_loop.get('duration', 4.5)
287
  num_of_scene, action_in_key = asset_info_item_loop.get('scene_num', i_asset + 1), asset_info_item_loop.get('key_action', '')
288
  logger.info(f"S{num_of_scene}: Path='{path_of_asset}', Type='{type_of_asset}', Dur='{duration_for_scene}'s")
289
+
290
  if not (path_of_asset and os.path.exists(path_of_asset)): logger.warning(f"S{num_of_scene}: Not found '{path_of_asset}'. Skip."); continue
291
  if duration_for_scene <= 0: logger.warning(f"S{num_of_scene}: Invalid duration ({duration_for_scene}s). Skip."); continue
292
+
293
+ active_scene_clip = None # Clip for this iteration
294
  try:
295
  if type_of_asset == 'image':
296
+ pil_img_original = Image.open(path_of_asset)
297
+ logger.debug(f"S{num_of_scene} (0-Load): Original loaded. Mode:{pil_img_original.mode}, Size:{pil_img_original.size}")
298
+ pil_img_original.save(os.path.join(self.output_dir,f"debug_0_ORIGINAL_S{num_of_scene}.png"))
299
+
300
+ img_rgba_intermediate = pil_img_original.convert('RGBA') if pil_img_original.mode != 'RGBA' else pil_img_original.copy().convert('RGBA')
301
+ logger.debug(f"S{num_of_scene} (1-ToRGBA): Converted to RGBA. Mode:{img_rgba_intermediate.mode}, Size:{img_rgba_intermediate.size}")
302
+ img_rgba_intermediate.save(os.path.join(self.output_dir,f"debug_1_AS_RGBA_S{num_of_scene}.png"))
303
+
304
+ thumbnailed_img_rgba = img_rgba_intermediate.copy()
305
+ resample_filter_pil = Image.Resampling.LANCZOS if hasattr(Image.Resampling,'LANCZOS') else Image.BILINEAR
306
+ thumbnailed_img_rgba.thumbnail(self.video_frame_size, resample_filter_pil)
307
+ logger.debug(f"S{num_of_scene} (2-Thumbnail): Thumbnailed RGBA. Mode:{thumbnailed_img_rgba.mode}, Size:{thumbnailed_img_rgba.size}")
308
+ thumbnailed_img_rgba.save(os.path.join(self.output_dir,f"debug_2_THUMBNAIL_RGBA_S{num_of_scene}.png"))
309
+
310
+ canvas_for_compositing_rgba = Image.new('RGBA', self.video_frame_size, (0,0,0,0))
311
+ pos_x_paste = (self.video_frame_size[0] - thumbnailed_img_rgba.width) // 2
312
+ pos_y_paste = (self.video_frame_size[1] - thumbnailed_img_rgba.height) // 2
313
+ canvas_for_compositing_rgba.paste(thumbnailed_img_rgba, (pos_x_paste, pos_y_paste), thumbnailed_img_rgba)
314
+ logger.debug(f"S{num_of_scene} (3-PasteOnRGBA): Image pasted onto transparent RGBA canvas. Mode:{canvas_for_compositing_rgba.mode}, Size:{canvas_for_compositing_rgba.size}")
315
+ canvas_for_compositing_rgba.save(os.path.join(self.output_dir,f"debug_3_COMPOSITED_RGBA_S{num_of_scene}.png"))
316
+
317
+ final_rgb_image_for_pil = Image.new("RGB", self.video_frame_size, (0, 0, 0)) # Opaque black background
318
+ if canvas_for_compositing_rgba.mode == 'RGBA':
319
+ final_rgb_image_for_pil.paste(canvas_for_compositing_rgba, mask=canvas_for_compositing_rgba.split()[3])
320
+ else: final_rgb_image_for_pil.paste(canvas_for_compositing_rgba)
321
+ logger.debug(f"S{num_of_scene} (4-ToRGB): Final RGB image created. Mode:{final_rgb_image_for_pil.mode}, Size:{final_rgb_image_for_pil.size}")
322
+
323
+ debug_path_img_pre_numpy = os.path.join(self.output_dir,f"debug_4_PRE_NUMPY_RGB_S{num_of_scene}.png");
324
+ final_rgb_image_for_pil.save(debug_path_img_pre_numpy);
325
+ logger.info(f"CRITICAL DEBUG: Saved PRE_NUMPY_RGB_S{num_of_scene} (image fed to NumPy) to {debug_path_img_pre_numpy}")
326
+
327
+ numpy_frame_arr = np.array(final_rgb_image_for_pil, dtype=np.uint8)
328
+ if not numpy_frame_arr.flags['C_CONTIGUOUS']: numpy_frame_arr = np.ascontiguousarray(numpy_frame_arr, dtype=np.uint8)
329
+ logger.debug(f"S{num_of_scene} (5-NumPy): Final NumPy array for MoviePy. Shape:{numpy_frame_arr.shape}, DType:{numpy_frame_arr.dtype}, Flags:{numpy_frame_arr.flags}")
330
+ if numpy_frame_arr.size == 0 or numpy_frame_arr.ndim != 3 or numpy_frame_arr.shape[2] != 3: logger.error(f"S{num_of_scene}: Invalid NumPy array shape/size ({numpy_frame_arr.shape}) for ImageClip. Skipping."); continue
331
+
332
+ base_image_clip_mvpy = ImageClip(numpy_frame_arr, transparent=False, ismask=False).set_duration(duration_for_scene)
333
+ logger.debug(f"S{num_of_scene} (6-ImageClip): Base ImageClip created. Duration: {base_image_clip_mvpy.duration}")
334
+
335
+ debug_path_moviepy_frame = os.path.join(self.output_dir,f"debug_7_MOVIEPY_FRAME_S{num_of_scene}.png")
336
+ try: base_image_clip_mvpy.save_frame(debug_path_moviepy_frame, t=min(0.1, base_image_clip_mvpy.duration / 2 if base_image_clip_mvpy.duration > 0 else 0.1))
337
+ logger.info(f"CRITICAL DEBUG: Saved frame FROM MOVIEPY ImageClip for S{num_of_scene} to {debug_path_moviepy_frame}")
338
+ except Exception as e_save_mvpy_frame: logger.error(f"DEBUG: Error saving frame FROM MOVIEPY ImageClip S{num_of_scene}: {e_save_mvpy_frame}", exc_info=True)
339
+
340
+ fx_image_clip_mvpy = base_image_clip_mvpy
341
+ try:
342
+ scale_end_kb_val = random.uniform(1.03, 1.08)
343
+ if duration_for_scene > 0: fx_image_clip_mvpy = base_image_clip_mvpy.fx(vfx.resize, lambda t_val: 1 + (scale_end_kb_val - 1) * (t_val / duration_for_scene)).set_position('center'); logger.debug(f"S{num_of_scene} (8-KenBurns): Ken Burns applied.")
344
+ else: logger.warning(f"S{num_of_scene}: Duration zero, skipping Ken Burns.")
345
+ except Exception as e_kb_fx_loop: logger.error(f"S{num_of_scene} Ken Burns error: {e_kb_fx_loop}", exc_info=False)
346
+ active_scene_clip = fx_image_clip_mvpy
347
  elif type_of_asset == 'video':
348
+ # (Video processing logic as before)
349
  source_video_clip_obj=None
350
  try:
351
+ logger.debug(f"S{num_of_scene}: Loading VIDEO asset: {path_of_asset}")
352
  source_video_clip_obj=VideoFileClip(path_of_asset,target_resolution=(self.video_frame_size[1],self.video_frame_size[0])if self.video_frame_size else None, audio=False)
353
  temp_video_clip_obj_loop=source_video_clip_obj
354
  if source_video_clip_obj.duration!=duration_for_scene:
 
358
  else:temp_video_clip_obj_loop=source_video_clip_obj.set_duration(source_video_clip_obj.duration);logger.info(f"S{num_of_scene} Video clip ({source_video_clip_obj.duration:.2f}s) shorter than target ({duration_for_scene:.2f}s).")
359
  active_scene_clip=temp_video_clip_obj_loop.set_duration(duration_for_scene)
360
  if active_scene_clip.size!=list(self.video_frame_size):active_scene_clip=active_scene_clip.resize(self.video_frame_size)
361
+ logger.debug(f"S{num_of_scene}: Video asset processed. Final duration for scene: {active_scene_clip.duration:.2f}s")
362
  except Exception as e_vid_load_loop:logger.error(f"S{num_of_scene} Video load error '{path_of_asset}':{e_vid_load_loop}",exc_info=True);continue
363
  finally:
364
+ if source_video_clip_obj and source_video_clip_obj is not active_scene_clip and hasattr(source_video_clip_obj,'close'):
365
+ try: source_video_clip_obj.close()
366
+ except Exception as e_close_src_vid: logger.warning(f"S{num_of_scene}: Error closing source VideoFileClip: {e_close_src_vid}")
367
+ else: logger.warning(f"S{num_of_scene} Unknown asset type '{type_of_asset}'. Skipping."); continue
368
+
369
  if active_scene_clip and action_in_key:
370
  try:
371
+ dur_text_overlay_val=min(active_scene_clip.duration-0.5,active_scene_clip.duration*0.8)if active_scene_clip.duration>0.5 else active_scene_clip.duration; start_text_overlay_val=0.25
372
+ if dur_text_overlay_val > 0:
373
+ text_clip_for_overlay_obj=TextClip(f"Scene {num_of_scene}\n{action_in_key}",fontsize=self.VIDEO_OVERLAY_FONT_SIZE,color=self.VIDEO_OVERLAY_FONT_COLOR,font=self.active_moviepy_font_name,bg_color='rgba(10,10,20,0.7)',method='caption',align='West',size=(self.video_frame_size[0]*0.9,None),kerning=-1,stroke_color='black',stroke_width=1.5).set_duration(dur_text_overlay_val).set_start(start_text_overlay_val).set_position(('center',0.92),relative=True)
374
+ active_scene_clip=CompositeVideoClip([active_scene_clip,text_clip_for_overlay_obj],size=self.video_frame_size,use_bgclip=True)
375
+ logger.debug(f"S{num_of_scene}: Text overlay composited.")
376
+ else: logger.warning(f"S{num_of_scene}: Text overlay duration zero or negative ({dur_text_overlay_val}). Skipping text overlay.")
377
+ except Exception as e_txt_comp_loop:logger.error(f"S{num_of_scene} TextClip compositing error:{e_txt_comp_loop}. Proceeding without text for this scene.",exc_info=True)
378
+ if active_scene_clip: processed_moviepy_clips_list.append(active_scene_clip); logger.info(f"S{num_of_scene}: Asset successfully processed. Clip duration: {active_scene_clip.duration:.2f}s. Added to final list.")
379
+ except Exception as e_asset_loop_main_exc: logger.error(f"MAJOR UNHANDLED ERROR processing asset for S{num_of_scene} (Path: {path_of_asset}): {e_asset_loop_main_exc}", exc_info=True)
380
  finally:
381
  if active_scene_clip and hasattr(active_scene_clip,'close'):
382
  try: active_scene_clip.close()
383
+ except Exception as e_close_active_err: logger.warning(f"S{num_of_scene}: Error closing active_scene_clip in error handler: {e_close_active_err}")
384
+ continue
385
+ if not processed_moviepy_clips_list: logger.warning("No MoviePy clips were successfully processed. Aborting animatic assembly before concatenation."); return None
386
  transition_duration_val=0.75
387
  try:
388
+ logger.info(f"Concatenating {len(processed_moviepy_clips_list)} processed clips for final animatic.");
389
+ if len(processed_moviepy_clips_list)>1: final_video_output_clip=concatenate_videoclips(processed_moviepy_clips_list, padding=-transition_duration_val if transition_duration_val > 0 else 0, method="compose")
390
+ elif processed_moviepy_clips_list: final_video_output_clip=processed_moviepy_clips_list[0]
391
+ if not final_video_output_clip: logger.error("Concatenation resulted in a None clip. Aborting."); return None
392
+ logger.info(f"Concatenated animatic base duration:{final_video_output_clip.duration:.2f}s")
393
+ if transition_duration_val > 0 and final_video_output_clip.duration > 0:
394
+ if final_video_output_clip.duration > transition_duration_val * 2: final_video_output_clip=final_video_output_clip.fx(vfx.fadein,transition_duration_val).fx(vfx.fadeout,transition_duration_val)
395
+ else: final_video_output_clip=final_video_output_clip.fx(vfx.fadein,min(transition_duration_val,final_video_output_clip.duration/2.0))
396
+ logger.debug("Applied fade in/out effects to final composite clip.")
397
+ if overall_narration_path and os.path.exists(overall_narration_path) and final_video_output_clip.duration > 0:
398
+ try: narration_audio_clip_mvpy=AudioFileClip(overall_narration_path); logger.info(f"Adding overall narration. Video duration: {final_video_output_clip.duration:.2f}s, Narration duration: {narration_audio_clip_mvpy.duration:.2f}s"); final_video_output_clip=final_video_output_clip.set_audio(narration_audio_clip_mvpy); logger.info("Overall narration successfully added to animatic.")
399
+ except Exception as e_narr_add_final:logger.error(f"Error adding overall narration to animatic:{e_narr_add_final}",exc_info=True)
400
+ elif final_video_output_clip.duration <= 0: logger.warning("Animatic has zero or negative duration before adding audio. Audio will not be added.")
401
+ if final_video_output_clip and final_video_output_clip.duration > 0:
402
+ final_output_path_str=os.path.join(self.output_dir,output_filename); logger.info(f"Writing final animatic video to: {final_output_path_str} (Target Duration: {final_video_output_clip.duration:.2f}s)")
403
+ num_threads = os.cpu_count(); num_threads = num_threads if isinstance(num_threads, int) and num_threads >= 1 else 2
404
+ final_video_output_clip.write_videofile(final_output_path_str, fps=fps, codec='libx264', preset='medium', audio_codec='aac', temp_audiofile=os.path.join(self.output_dir,f'temp-audio-{os.urandom(4).hex()}.m4a'), remove_temp=True, threads=num_threads, logger='bar', bitrate="5000k", ffmpeg_params=["-pix_fmt", "yuv420p"])
405
+ logger.info(f"Animatic video created successfully: {final_output_path_str}"); return final_output_path_str
406
+ else: logger.error("Final animatic clip is invalid or has zero duration. Cannot write video file."); return None
407
+ except Exception as e_vid_write_final_op: logger.error(f"Error during final animatic video file writing or composition stage: {e_vid_write_final_op}", exc_info=True); return None
408
  finally:
409
  logger.debug("Closing all MoviePy clips in `assemble_animatic_from_assets` main finally block.")
410
+ all_clips_for_closure = processed_moviepy_clips_list[:]
411
+ if narration_audio_clip_mvpy: all_clips_for_closure.append(narration_audio_clip_mvpy)
412
+ if final_video_output_clip: all_clips_for_closure.append(final_video_output_clip)
413
+ for clip_to_close_item_final in all_clips_for_closure:
414
+ if clip_to_close_item_final and hasattr(clip_to_close_item_final, 'close'):
415
+ try: clip_to_close_item_final.close()
416
+ except Exception as e_final_clip_close_op: logger.warning(f"Ignoring error while closing a MoviePy clip ({type(clip_to_close_item_final).__name__}): {e_final_clip_close_op}")