mgbam commited on
Commit
59af6e7
·
verified ·
1 Parent(s): 3313da9

Update core/visual_engine.py

Browse files
Files changed (1) hide show
  1. core/visual_engine.py +231 -352
core/visual_engine.py CHANGED
@@ -24,315 +24,225 @@ import random
24
  import logging
25
 
26
  logger = logging.getLogger(__name__)
27
- logger.setLevel(logging.INFO) # Set default logging level for this module
28
 
29
  # --- ElevenLabs Client Import ---
30
- ELEVENLABS_CLIENT_IMPORTED = False
31
- ElevenLabsAPIClient = None
32
- Voice = None
33
- VoiceSettings = None
34
  try:
35
  from elevenlabs.client import ElevenLabs as ImportedElevenLabsClient
36
  from elevenlabs import Voice as ImportedVoice, VoiceSettings as ImportedVoiceSettings
37
- ElevenLabsAPIClient = ImportedElevenLabsClient
38
- Voice = ImportedVoice
39
- VoiceSettings = ImportedVoiceSettings
40
- ELEVENLABS_CLIENT_IMPORTED = True
41
- logger.info("ElevenLabs client components imported successfully.")
42
- except Exception as e_eleven:
43
- logger.warning(f"ElevenLabs client import failed: {e_eleven}. Audio generation will be disabled.")
44
 
45
  # --- RunwayML Client Import (Placeholder) ---
46
- RUNWAYML_SDK_IMPORTED = False
47
- RunwayMLClient = None # Placeholder for the actual RunwayML client class
48
  try:
49
- # Example: from runwayml import RunwayClient as ImportedRunwayMLClient
50
- # RunwayMLClient = ImportedRunwayMLClient
51
- # RUNWAYML_SDK_IMPORTED = True
52
- logger.info("RunwayML SDK import is a placeholder. Actual SDK needed for Runway features.")
53
- except ImportError:
54
- logger.warning("RunwayML SDK (placeholder) not found. RunwayML video generation will be disabled.")
55
- except Exception as e_runway_sdk:
56
- logger.warning(f"Error importing RunwayML SDK (placeholder): {e_runway_sdk}. RunwayML features disabled.")
57
 
58
 
59
  class VisualEngine:
60
  def __init__(self, output_dir="temp_cinegen_media", default_elevenlabs_voice_id="Rachel"):
61
  self.output_dir = output_dir
62
  os.makedirs(self.output_dir, exist_ok=True)
63
-
64
- self.font_filename = "arial.ttf" # Or a more reliably found font like "DejaVuSans-Bold.ttf"
65
  font_paths_to_try = [
66
  self.font_filename,
67
  f"/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf",
68
  f"/usr/share/fonts/truetype/liberation/LiberationSans-Bold.ttf",
69
- f"/System/Library/Fonts/Supplemental/Arial.ttf", # macOS
70
- f"C:/Windows/Fonts/arial.ttf", # Windows
71
- f"/usr/local/share/fonts/truetype/mycustomfonts/{self.font_filename}"
72
  ]
73
  self.font_path_pil = next((p for p in font_paths_to_try if os.path.exists(p)), None)
74
  self.font_size_pil = 20
75
  self.video_overlay_font_size = 30
76
  self.video_overlay_font_color = 'white'
77
- # For MoviePy TextClip, use font names ImageMagick knows. Check with `convert -list font`.
78
- # 'Liberation-Sans-Bold' is a good default if available.
79
- self.video_overlay_font = 'DejaVuSans-Bold' if 'dejavu' in (self.font_path_pil or '').lower() else 'Liberation-Sans-Bold'
80
-
81
 
82
  try:
83
- if self.font_path_pil:
84
- self.font = ImageFont.truetype(self.font_path_pil, self.font_size_pil)
85
- logger.info(f"Pillow font loaded: {self.font_path_pil}.")
86
- else:
87
- self.font = ImageFont.load_default()
88
- logger.warning("Custom Pillow font not found. Using default. Text rendering for placeholders might be basic.")
89
- self.font_size_pil = 10 # Default Pillow font is small
90
- except IOError as e_font:
91
- logger.error(f"Pillow font loading IOError for '{self.font_path_pil or 'default'}': {e_font}. Using default.")
92
- self.font = ImageFont.load_default()
93
- self.font_size_pil = 10
94
 
95
  self.openai_api_key = None; self.USE_AI_IMAGE_GENERATION = False
96
  self.dalle_model = "dall-e-3"; self.image_size_dalle3 = "1792x1024"
97
  self.video_frame_size = (1280, 720)
98
-
99
- self.elevenlabs_api_key = None; self.USE_ELEVENLABS = False
100
- self.elevenlabs_client = None
101
  self.elevenlabs_voice_id = default_elevenlabs_voice_id
102
- if VoiceSettings and ELEVENLABS_CLIENT_IMPORTED:
103
- self.elevenlabs_voice_settings = VoiceSettings(stability=0.60, similarity_boost=0.80, style=0.15, use_speaker_boost=True)
104
  else: self.elevenlabs_voice_settings = None
105
-
106
  self.pexels_api_key = None; self.USE_PEXELS = False
107
- self.runway_api_key = None; self.USE_RUNWAYML = False
108
- self.runway_client = None
109
-
110
  logger.info("VisualEngine initialized.")
111
 
112
- def set_openai_api_key(self,k):
113
- self.openai_api_key=k; self.USE_AI_IMAGE_GENERATION=bool(k)
114
- logger.info(f"DALL-E ({self.dalle_model}) {'Ready.' if k else 'Disabled.'}")
115
-
116
  def set_elevenlabs_api_key(self,api_key, voice_id_from_secret=None):
117
  self.elevenlabs_api_key=api_key
118
  if voice_id_from_secret: self.elevenlabs_voice_id = voice_id_from_secret
119
  if api_key and ELEVENLABS_CLIENT_IMPORTED and ElevenLabsAPIClient:
120
- try:
121
- self.elevenlabs_client = ElevenLabsAPIClient(api_key=api_key)
122
- self.USE_ELEVENLABS=bool(self.elevenlabs_client)
123
- logger.info(f"ElevenLabs Client {'Ready' if self.USE_ELEVENLABS else 'Failed Init'} (Voice ID: {self.elevenlabs_voice_id}).")
124
  except Exception as e: logger.error(f"ElevenLabs client init error: {e}. Disabled.", exc_info=True); self.USE_ELEVENLABS=False
125
  else: self.USE_ELEVENLABS=False; logger.info("ElevenLabs Disabled (no key or SDK).")
126
-
127
- def set_pexels_api_key(self,k):
128
- self.pexels_api_key=k; self.USE_PEXELS=bool(k)
129
- logger.info(f"Pexels Search {'Ready.' if k else 'Disabled.'}")
130
-
131
  def set_runway_api_key(self, k):
132
  self.runway_api_key = k
133
  if k and RUNWAYML_SDK_IMPORTED and RunwayMLClient:
134
- try:
135
- # self.runway_client = RunwayMLClient(api_key=k) # Actual initialization
136
- self.USE_RUNWAYML = True
137
- logger.info(f"RunwayML Client (Placeholder with SDK) {'Ready.' if self.USE_RUNWAYML else 'Failed Init.'}")
138
- except Exception as e: logger.error(f"RunwayML client (Placeholder with SDK) init error: {e}. Disabled.", exc_info=True); self.USE_RUNWAYML = False
139
- elif k:
140
- self.USE_RUNWAYML = True
141
- logger.info("RunwayML API Key set. Using direct API calls or placeholder (SDK not fully integrated/imported).")
142
  else: self.USE_RUNWAYML = False; logger.info("RunwayML Disabled (no API key).")
143
 
144
- def _get_text_dimensions(self,text_content,font_obj):
145
- if not text_content: return 0, (self.font.size if hasattr(self.font, 'size') else self.font_size_pil)
146
- try:
147
- if hasattr(font_obj,'getbbox'):
148
- bbox=font_obj.getbbox(text_content);w=bbox[2]-bbox[0];h=bbox[3]-bbox[1]
149
- return w, h if h > 0 else font_obj.size
150
- elif hasattr(font_obj,'getsize'):
151
- w,h=font_obj.getsize(text_content)
152
- return w, h if h > 0 else font_obj.size
153
- else: return int(len(text_content)*font_obj.size*0.6), int(font_obj.size*1.2)
154
- except Exception as e: logger.warning(f"Error in _get_text_dimensions for '{text_content[:20]}...': {e}"); return int(len(text_content)*self.font_size_pil*0.6),int(self.font_size_pil*1.2)
155
-
156
- def _create_placeholder_image_content(self,text_description,filename,size=None):
157
- if size is None: size = self.video_frame_size
158
- img=Image.new('RGB',size,color=(20,20,40));d=ImageDraw.Draw(img);padding=25;max_w=size[0]-(2*padding);lines=[];
159
- if not text_description: text_description="(Placeholder: No prompt text)"
160
- words=text_description.split();current_line=""
161
- for word in words:
162
- test_line=current_line+word+" ";
163
- if self._get_text_dimensions(test_line,self.font)[0] <= max_w: current_line=test_line
164
  else:
165
- if current_line: lines.append(current_line.strip());
166
- current_line=word+" "
167
- if current_line.strip(): lines.append(current_line.strip())
168
- if not lines and text_description: lines.append(text_description[:int(max_w//(self._get_text_dimensions("A",self.font)[0] or 10))]+"..." if text_description else "(Text too long)")
169
- elif not lines: lines.append("(Placeholder Text Error)")
170
- _,single_line_h=self._get_text_dimensions("Ay",self.font); single_line_h = single_line_h if single_line_h > 0 else self.font_size_pil + 2
171
- max_lines_to_display=min(len(lines),(size[1]-(2*padding))//(single_line_h+2)) if single_line_h > 0 else 1
172
- if max_lines_to_display <=0: max_lines_to_display = 1
173
- y_text_start = padding + (size[1]-(2*padding) - max_lines_to_display*(single_line_h+2))/2.0
174
- y_text = y_text_start
175
- for i in range(max_lines_to_display):
176
- line_content=lines[i];line_w,_=self._get_text_dimensions(line_content,self.font);x_text=(size[0]-line_w)/2.0
177
- d.text((x_text,y_text),line_content,font=self.font,fill=(200,200,180));y_text+=single_line_h+2
178
- if i==6 and max_lines_to_display > 7: d.text((x_text,y_text),"...",font=self.font,fill=(200,200,180));break
179
- filepath=os.path.join(self.output_dir,filename);
180
- try:img.save(filepath);return filepath
181
- except Exception as e:logger.error(f"Saving placeholder image {filepath}: {e}", exc_info=True);return None
182
-
183
- def _search_pexels_image(self, query, output_filename_base):
 
184
  if not self.USE_PEXELS or not self.pexels_api_key: return None
185
- headers = {"Authorization": self.pexels_api_key}; params = {"query": query, "per_page": 1, "orientation": "landscape", "size": "large2x"} # Request higher quality
186
- pexels_filename = output_filename_base.replace(".png", f"_pexels_{random.randint(1000,9999)}.jpg").replace(".mp4", f"_pexels_{random.randint(1000,9999)}.jpg")
187
- filepath = os.path.join(self.output_dir, pexels_filename)
188
  try:
189
- logger.info(f"Searching Pexels for: '{query}'"); effective_query = " ".join(query.split()[:5]); params["query"] = effective_query
190
- response = requests.get("https://api.pexels.com/v1/search", headers=headers, params=params, timeout=20)
191
- response.raise_for_status(); data = response.json()
192
- if data.get("photos") and len(data["photos"]) > 0:
193
- photo_url = data["photos"][0]["src"]["large2x"]
194
- image_response = requests.get(photo_url, timeout=60); image_response.raise_for_status()
195
- img_data = Image.open(io.BytesIO(image_response.content))
196
- if img_data.mode != 'RGB': img_data = img_data.convert('RGB')
197
- img_data.save(filepath); logger.info(f"Pexels image saved: {filepath}"); return filepath
198
- else: logger.info(f"No photos found on Pexels for query: '{effective_query}'")
199
- except Exception as e: logger.error(f"Pexels search/download for query '{query}': {e}", exc_info=True)
200
  return None
201
 
202
- def _generate_video_clip_with_runwayml(self, prompt_text, scene_identifier_filename_base, target_duration_seconds=4, input_image_path=None):
203
- if not self.USE_RUNWAYML or not self.runway_api_key:
204
- logger.warning("RunwayML not enabled or API key missing. Cannot generate video clip.")
205
- return None
206
- output_video_filename = scene_identifier_filename_base.replace(".png", "_runway.mp4") # More specific extension
207
- output_video_filepath = os.path.join(self.output_dir, output_video_filename)
208
- logger.info(f"Attempting RunwayML video generation for: {prompt_text[:100]}... (Target duration: {target_duration_seconds}s)")
209
- # --- START ACTUAL RUNWAYML API INTERACTION (HYPOTHETICAL - NEEDS IMPLEMENTATION) ---
210
- # Example:
211
- # if self.runway_client:
212
- # try:
213
- # # result = self.runway_client.generate(text=prompt_text, duration=target_duration_seconds, seed_image=input_image_path)
214
- # # result.save(output_video_filepath)
215
- # # return output_video_filepath
216
- # except Exception as e_runway:
217
- # logger.error(f"Actual RunwayML generation error: {e_runway}", exc_info=True)
218
- # return None
219
- # else: logger.warning("RunwayML client not initialized (placeholder).")
220
- # --- END ACTUAL RUNWAYML API INTERACTION (HYPOTHETICAL) ---
221
- logger.warning("Using PLACEHOLDER video generation for RunwayML as actual API calls are not implemented.")
222
- return self._create_placeholder_video_content(f"[RunwayML Placeholder] {prompt_text}", output_video_filename, duration=target_duration_seconds)
223
-
224
- def _create_placeholder_video_content(self, text_description, filename, duration=4, size=None):
225
- if size is None: size = self.video_frame_size
226
- filepath = os.path.join(self.output_dir, filename)
227
- txt_clip = None # Initialize
228
  try:
229
- txt_clip = TextClip(text_description, fontsize=50, color='white', font=self.video_overlay_font,
230
- bg_color='black', size=size, method='caption').set_duration(duration)
231
- txt_clip.write_videofile(filepath, fps=24, codec='libx264', preset='ultrafast', logger=None, threads=2)
232
- logger.info(f"Placeholder video saved: {filepath}")
233
- return filepath
234
- except Exception as e: logger.error(f"Failed to create placeholder video {filepath}: {e}", exc_info=True); return None
235
  finally:
236
- if txt_clip and hasattr(txt_clip, 'close'): txt_clip.close()
237
 
238
  def generate_scene_asset(self, image_prompt_text, scene_data, scene_identifier_filename_base,
239
  generate_as_video_clip=False, runway_target_duration=4, input_image_for_runway=None):
 
240
  base_name, _ = os.path.splitext(scene_identifier_filename_base)
241
  asset_info = {'path': None, 'type': 'none', 'error': True, 'prompt_used': image_prompt_text, 'error_message': 'Generation not attempted'}
242
-
243
  if generate_as_video_clip and self.USE_RUNWAYML:
244
- logger.info(f"Attempting RunwayML video clip generation for {base_name}")
245
- video_path = self._generate_video_clip_with_runwayml(
246
- image_prompt_text, base_name,
247
- target_duration_seconds=runway_target_duration,
248
- input_image_path=input_image_for_runway
249
- )
250
- if video_path and os.path.exists(video_path):
251
- asset_info = {'path': video_path, 'type': 'video', 'error': False, 'prompt_used': image_prompt_text}
252
- return asset_info
253
- else: logger.warning(f"RunwayML video clip generation failed for {base_name}. Falling back to image."); asset_info['error_message'] = "RunwayML video generation failed."
254
-
255
- image_filename_with_ext = base_name + ".png"
256
- filepath = os.path.join(self.output_dir, image_filename_with_ext)
257
- asset_info['type'] = 'image'
258
-
259
  if self.USE_AI_IMAGE_GENERATION and self.openai_api_key:
260
- max_retries = 2; attempt_num = 0
261
- for attempt_num in range(max_retries):
262
  try:
263
- logger.info(f"Attempt {attempt_num+1}: DALL-E ({self.dalle_model}) for: {image_prompt_text[:100]}...")
264
- client = openai.OpenAI(api_key=self.openai_api_key, timeout=90.0)
265
- response = client.images.generate(model=self.dalle_model, prompt=image_prompt_text, n=1, size=self.image_size_dalle3, quality="hd", response_format="url", style="vivid")
266
- image_url = response.data[0].url; revised_prompt = getattr(response.data[0], 'revised_prompt', None)
267
- if revised_prompt: logger.info(f"DALL-E 3 revised_prompt: {revised_prompt[:100]}...")
268
- image_response = requests.get(image_url, timeout=120); image_response.raise_for_status()
269
- img_data = Image.open(io.BytesIO(image_response.content));
270
- if img_data.mode != 'RGB': img_data = img_data.convert('RGB')
271
- img_data.save(filepath); logger.info(f"AI Image (DALL-E) saved: {filepath}");
272
- asset_info = {'path': filepath, 'type': 'image', 'error': False, 'prompt_used': image_prompt_text, 'revised_prompt': revised_prompt}
273
- return asset_info # Success
274
- except openai.RateLimitError as e_rate: logger.warning(f"OpenAI Rate Limit on attempt {attempt_num+1}: {e_rate}. Retrying..."); time.sleep(5 * (attempt_num + 1)); asset_info['error_message'] = str(e_rate)
275
- except openai.APIError as e_api: logger.error(f"OpenAI API Error: {e_api}"); asset_info['error_message'] = str(e_api); break
276
- except requests.exceptions.RequestException as e_req: logger.error(f"Requests Error (DALL-E download): {e_req}"); asset_info['error_message'] = str(e_req); break
277
- except Exception as e_gen: logger.error(f"Generic error (DALL-E gen): {e_gen}", exc_info=True); asset_info['error_message'] = str(e_gen); break
278
- if asset_info['error']: logger.warning(f"DALL-E generation failed after {attempt_num+1} attempts. Trying Pexels fallback...")
279
-
280
- if self.USE_PEXELS and (asset_info['error'] or not (self.USE_AI_IMAGE_GENERATION and self.openai_api_key)):
281
- pexels_query_text = scene_data.get('pexels_search_query_감독', f"{scene_data.get('emotional_beat','')} {scene_data.get('setting_description','')}")
282
- pexels_path = self._search_pexels_image(pexels_query_text, image_filename_with_ext)
283
- if pexels_path:
284
- asset_info = {'path': pexels_path, 'type': 'image', 'error': False, 'prompt_used': f"Pexels: {pexels_query_text}"}
285
- return asset_info
286
- current_error_msg = asset_info.get('error_message', "")
287
- asset_info['error_message'] = (current_error_msg + " Pexels search also failed or disabled.").strip()
288
- if not asset_info['error']: logger.warning("Pexels search failed or was disabled (DALL-E not attempted).")
289
 
 
 
 
 
 
 
 
290
  if asset_info['error']:
291
- logger.warning("All primary generation methods failed. Using placeholder image.")
292
- placeholder_prompt_text = asset_info.get('prompt_used', image_prompt_text)
293
- placeholder_path = self._create_placeholder_image_content(f"[Fallback Placeholder] {placeholder_prompt_text[:100]}...", image_filename_with_ext)
294
- if placeholder_path:
295
- asset_info = {'path': placeholder_path, 'type': 'image', 'error': False, 'prompt_used': placeholder_prompt_text}
296
- else:
297
- current_error_msg = asset_info.get('error_message', "")
298
- asset_info['error_message'] = (current_error_msg + " Placeholder creation also failed.").strip()
299
  return asset_info
300
 
301
  def generate_narration_audio(self, text_to_narrate, output_filename="narration_overall.mp3"):
302
- if not self.USE_ELEVENLABS or not self.elevenlabs_client or not text_to_narrate:
303
- logger.info("ElevenLabs conditions not met. Skipping audio generation.")
304
- return None
305
- audio_filepath = os.path.join(self.output_dir, output_filename)
306
  try:
307
- logger.info(f"Generating ElevenLabs audio (Voice ID: {self.elevenlabs_voice_id}) for: {text_to_narrate[:70]}...")
308
- audio_stream_method = None
309
- if hasattr(self.elevenlabs_client, 'text_to_speech') and hasattr(self.elevenlabs_client.text_to_speech, 'stream'):
310
- audio_stream_method = self.elevenlabs_client.text_to_speech.stream; logger.info("Using elevenlabs_client.text_to_speech.stream()")
311
- elif hasattr(self.elevenlabs_client, 'generate_stream') : audio_stream_method = self.elevenlabs_client.generate_stream; logger.info("Using elevenlabs_client.generate_stream()")
312
- elif hasattr(self.elevenlabs_client, 'generate'):
313
- logger.info("Using elevenlabs_client.generate() (non-streaming).")
314
- voice_param = Voice(voice_id=str(self.elevenlabs_voice_id), settings=self.elevenlabs_voice_settings) if Voice and self.elevenlabs_voice_settings else str(self.elevenlabs_voice_id)
315
- audio_bytes = self.elevenlabs_client.generate(text=text_to_narrate, voice=voice_param, model="eleven_multilingual_v2")
316
- with open(audio_filepath, "wb") as f: f.write(audio_bytes)
317
- logger.info(f"ElevenLabs audio (non-streamed) saved: {audio_filepath}"); return audio_filepath
318
- else: logger.error("No recognized audio generation method found on ElevenLabs client."); return None
319
-
320
- if audio_stream_method: # Streaming logic
321
- voice_param_for_stream = {"voice_id": str(self.elevenlabs_voice_id)}
322
  if self.elevenlabs_voice_settings:
323
- if hasattr(self.elevenlabs_voice_settings, 'model_dump'): voice_param_for_stream["voice_settings"] = self.elevenlabs_voice_settings.model_dump() # Pydantic v2
324
- elif hasattr(self.elevenlabs_voice_settings, 'dict'): voice_param_for_stream["voice_settings"] = self.elevenlabs_voice_settings.dict() # Pydantic v1
325
- else: voice_param_for_stream["voice_settings"] = self.elevenlabs_voice_settings
326
-
327
- audio_data_iterator = audio_stream_method(text=text_to_narrate, model_id="eleven_multilingual_v2", **voice_param_for_stream)
328
- with open(audio_filepath, "wb") as f:
329
- for chunk in audio_data_iterator:
330
  if chunk: f.write(chunk)
331
- logger.info(f"ElevenLabs audio (streamed) saved: {audio_filepath}"); return audio_filepath
332
- except AttributeError as ae: logger.error(f"AttributeError with ElevenLabs client: {ae}. SDK method/params might be different.", exc_info=True)
333
- except Exception as e: logger.error(f"Error generating ElevenLabs audio: {e}", exc_info=True)
334
  return None
335
 
 
 
 
 
336
  def assemble_animatic_from_assets(self, asset_data_list, overall_narration_path=None, output_filename="final_video.mp4", fps=24):
337
  if not asset_data_list:
338
  logger.warning("No asset data provided for animatic assembly.")
@@ -347,7 +257,7 @@ class VisualEngine:
347
  for i, asset_info in enumerate(asset_data_list):
348
  asset_path = asset_info.get('path')
349
  asset_type = asset_info.get('type')
350
- target_scene_duration = asset_info.get('duration', 4.5) # Duration for this scene in the animatic
351
  scene_num = asset_info.get('scene_num', i + 1)
352
  key_action = asset_info.get('key_action', '')
353
 
@@ -358,169 +268,138 @@ class VisualEngine:
358
  if target_scene_duration <= 0:
359
  logger.warning(f"S{scene_num}: Invalid duration ({target_scene_duration}s). Skipping."); continue
360
 
361
- current_scene_clip = None # The final MoviePy clip for this scene
362
  try:
363
  if asset_type == 'image':
364
  pil_img = Image.open(asset_path)
365
  logger.debug(f"S{scene_num}: Loaded image. Mode: {pil_img.mode}, Size: {pil_img.size}")
366
 
367
- # 1. Ensure image is RGBA for consistent alpha handling during processing
 
368
  img_rgba_source = pil_img.convert('RGBA') if pil_img.mode != 'RGBA' else pil_img.copy()
369
 
370
- # 2. Thumbnail the RGBA image
371
- img_thumbnail = img_rgba_source.copy() # Work on a copy
372
- resample_filter = Image.Resampling.LANCZOS if hasattr(Image.Resampling, 'LANCZOS') else (Image.ANTIALIAS if hasattr(Image, 'ANTIALIAS') else Image.BILINEAR)
373
  img_thumbnail.thumbnail(self.video_frame_size, resample_filter)
374
  logger.debug(f"S{scene_num}: Thumbnailed to: {img_thumbnail.size}")
375
 
376
- # 3. Create a target-sized RGBA canvas (fully transparent)
377
- canvas_rgba = Image.new('RGBA', self.video_frame_size, (0, 0, 0, 0))
378
-
379
- # 4. Paste the thumbnailed image (with its alpha) onto the center of the RGBA canvas
380
  xo = (self.video_frame_size[0] - img_thumbnail.width) // 2
381
  yo = (self.video_frame_size[1] - img_thumbnail.height) // 2
382
- canvas_rgba.paste(img_thumbnail, (xo, yo), img_thumbnail) # Use img_thumbnail's alpha as mask
383
- logger.debug(f"S{scene_num}: Image pasted onto transparent RGBA canvas.")
384
-
385
- # 5. Create a final RGB image by pasting the RGBA canvas onto an opaque background
386
- # This flattens transparency and ensures an RGB image for MoviePy.
387
- final_rgb_image_for_moviepy = Image.new("RGB", self.video_frame_size, (0, 0, 0)) # Opaque black background
388
- final_rgb_image_for_moviepy.paste(canvas_rgba, mask=canvas_rgba.split()[3]) # Paste using alpha from canvas_rgba
389
 
390
- # --- CRITICAL DEBUG STEP: Save the image that will be fed to MoviePy ---
391
- debug_canvas_path = os.path.join(self.output_dir, f"debug_final_rgb_FOR_MOVIEPY_scene_{scene_num}.png")
392
- try:
393
- final_rgb_image_for_moviepy.save(debug_canvas_path)
394
- logger.info(f"DEBUG: Saved final RGB image for MoviePy (S{scene_num}) to {debug_canvas_path}")
395
- except Exception as e_save_canvas:
396
- logger.error(f"DEBUG: Failed to save final_rgb_image_for_moviepy (S{scene_num}): {e_save_canvas}")
 
 
397
 
398
- frame_np = np.array(final_rgb_image_for_moviepy) # Should be (H, W, 3) dtype uint8
399
- logger.debug(f"S{scene_num}: Converted to NumPy. Shape: {frame_np.shape}, Dtype: {frame_np.dtype}, Size: {frame_np.size}")
400
 
401
- if frame_np.size == 0: logger.error(f"S{scene_num}: NumPy array is EMPTY. Skipping."); continue
402
- if frame_np.ndim != 3 or frame_np.shape[2] != 3: logger.error(f"S{scene_num}: NumPy array has unexpected shape {frame_np.shape}. Skipping."); continue
403
- if frame_np.dtype != np.uint8: frame_np = frame_np.astype(np.uint8); logger.warning(f"S{scene_num}: Converted NumPy array dtype to uint8.")
404
 
405
  current_clip_base = ImageClip(frame_np, transparent=False).set_duration(target_scene_duration)
406
- logger.debug(f"S{scene_num}: Base ImageClip created from NumPy array.")
 
 
 
 
 
 
 
 
 
407
 
408
- current_scene_clip_with_fx = current_clip_base # Start with base
409
  try: # Ken Burns
410
  end_scale = random.uniform(1.03, 1.08)
411
  current_scene_clip_with_fx = current_clip_base.fx(vfx.resize, lambda t: 1 + (end_scale - 1) * (t / target_scene_duration) if target_scene_duration > 0 else 1).set_position('center')
412
- logger.debug(f"S{scene_num}: Ken Burns effect applied.")
413
  except Exception as e_fx: logger.error(f"S{scene_num}: Ken Burns error: {e_fx}. Using static.", exc_info=False)
414
-
415
  current_scene_clip = current_scene_clip_with_fx
416
 
417
  elif asset_type == 'video':
418
- logger.debug(f"S{scene_num}: Loading video asset from {asset_path}")
419
- source_video_clip = None # Initialize
420
  try:
421
  source_video_clip = VideoFileClip(asset_path, target_resolution=(self.video_frame_size[1], self.video_frame_size[0]) if self.video_frame_size else None)
422
-
423
- temp_clip_for_video_asset = source_video_clip
424
  if source_video_clip.duration != target_scene_duration:
425
- if source_video_clip.duration > target_scene_duration:
426
- temp_clip_for_video_asset = source_video_clip.subclip(0, target_scene_duration)
427
  else: # Source is shorter
428
- if target_scene_duration / source_video_clip.duration > 1.5 and source_video_clip.duration > 0.1:
429
- temp_clip_for_video_asset = source_video_clip.loop(duration=target_scene_duration)
430
- else: # Let it play its native length, will be set to target_scene_duration for concat
431
- temp_clip_for_video_asset = source_video_clip.set_duration(source_video_clip.duration)
432
- logger.info(f"S{scene_num}: Video clip ({source_video_clip.duration:.2f}s) shorter than scene target ({target_scene_duration:.2f}s).")
433
-
434
- current_scene_clip = temp_clip_for_video_asset.set_duration(target_scene_duration)
435
-
436
- if current_scene_clip.size != list(self.video_frame_size):
437
- logger.debug(f"S{scene_num}: Resizing video clip from {current_scene_clip.size} to {self.video_frame_size}")
438
- current_scene_clip = current_scene_clip.resize(self.video_frame_size)
439
-
440
- logger.debug(f"S{scene_num}: Video asset processed. Final duration for scene: {current_scene_clip.duration:.2f}s")
441
- except Exception as e_vid_load:
442
- logger.error(f"S{scene_num}: Error loading/processing video file '{asset_path}': {e_vid_load}", exc_info=True)
443
- if source_video_clip and hasattr(source_video_clip, 'close'): source_video_clip.close()
444
- continue # Skip this asset
445
- finally: # Close original source if it was opened and different from the final clip
446
- if source_video_clip and source_video_clip is not current_scene_clip and hasattr(source_video_clip, 'close'):
447
- source_video_clip.close()
448
-
449
 
450
  else: logger.warning(f"S{scene_num}: Unknown asset type '{asset_type}'. Skipping."); continue
451
 
452
- # Add text overlay (common to both image and video assets)
453
- if current_scene_clip and key_action:
454
- logger.debug(f"S{scene_num}: Adding text overlay: '{key_action}'")
455
- text_overlay_duration = min(target_scene_duration - 0.5, target_scene_duration * 0.8) if target_scene_duration > 0.5 else target_scene_duration
456
- text_overlay_start = (target_scene_duration - text_overlay_duration) / 2.0
457
- if text_overlay_duration > 0:
458
- try:
459
- txt_clip = TextClip(f"Scene {scene_num}\n{key_action}",
460
- fontsize=self.video_overlay_font_size, color=self.video_overlay_font_color,
461
- font=self.video_overlay_font, bg_color='rgba(10,10,20,0.7)',
462
- method='caption', align='West', size=(self.video_frame_size[0] * 0.9, None),
463
- kerning=-1, stroke_color='black', stroke_width=1.5
464
- ).set_duration(text_overlay_duration).set_start(text_overlay_start).set_position(('center', 0.92), relative=True)
465
- current_scene_clip = CompositeVideoClip([current_scene_clip, txt_clip], size=self.video_frame_size, use_bgclip=True)
466
- logger.debug(f"S{scene_num}: Text overlay composited.")
467
- except Exception as e_txt: logger.error(f"S{scene_num}: Error creating TextClip or CompositeVideoClip for text: {e_txt}. Using clip without text.", exc_info=True)
468
 
469
- if current_scene_clip:
470
- processed_moviepy_clips.append(current_scene_clip)
471
- logger.info(f"S{scene_num}: Asset successfully processed. Clip duration: {current_scene_clip.duration:.2f}s, Added to final list.")
472
-
473
- except Exception as e_asset_proc:
474
- logger.error(f"MAJOR Error processing asset for Scene {scene_num} ({asset_path}): {e_asset_proc}", exc_info=True)
475
- # Ensure clip is closed if it was partially created
476
  if current_scene_clip and hasattr(current_scene_clip, 'reader') and current_scene_clip.reader:
477
  if hasattr(current_scene_clip, 'close'): current_scene_clip.close()
478
- elif current_scene_clip and hasattr(current_scene_clip, 'close'):
479
- current_scene_clip.close()
480
 
481
- if not processed_moviepy_clips: logger.warning("No MoviePy clips were successfully processed. Aborting animatic assembly."); return None
482
 
483
  transition_duration = 0.75
484
  try:
485
- if not processed_moviepy_clips: logger.error("No clips to concatenate after processing loop."); return None
486
- logger.info(f"Concatenating {len(processed_moviepy_clips)} processed clips.")
487
- if len(processed_moviepy_clips) > 1:
488
- final_composite_clip_obj = concatenate_videoclips(processed_moviepy_clips, padding = -transition_duration if transition_duration > 0 else 0, method="compose")
489
  elif processed_moviepy_clips: final_composite_clip_obj = processed_moviepy_clips[0]
490
-
491
- if not final_composite_clip_obj: logger.error("Concatenation resulted in a None clip."); return None
492
  logger.info(f"Concatenated clip duration: {final_composite_clip_obj.duration:.2f}s")
493
 
494
- if transition_duration > 0:
495
- if final_composite_clip_obj.duration > transition_duration * 2:
496
- final_composite_clip_obj = final_composite_clip_obj.fx(vfx.fadein, transition_duration).fx(vfx.fadeout, transition_duration)
497
- elif final_composite_clip_obj.duration > 0:
498
- final_composite_clip_obj = final_composite_clip_obj.fx(vfx.fadein, min(transition_duration, final_composite_clip_obj.duration/2.0))
499
- logger.debug("Applied fade in/out effects.")
500
 
501
  if overall_narration_path and os.path.exists(overall_narration_path) and final_composite_clip_obj.duration > 0:
502
- try:
503
- narration_audio_clip = AudioFileClip(overall_narration_path)
504
- logger.info(f"Adding narration. Video dur: {final_composite_clip_obj.duration:.2f}s, Audio dur: {narration_audio_clip.duration:.2f}s")
505
- final_composite_clip_obj = final_composite_clip_obj.set_audio(narration_audio_clip) # Audio will be cut/padded to video duration
506
- logger.info("Overall narration added to video.")
507
- except Exception as e_audio: logger.error(f"Error adding overall narration: {e_audio}", exc_info=True)
508
  elif final_composite_clip_obj.duration <= 0 : logger.warning("Video has no duration. Audio not added.")
509
 
510
  if final_composite_clip_obj and final_composite_clip_obj.duration > 0:
511
  output_path = os.path.join(self.output_dir, output_filename)
512
- logger.info(f"Attempting to write final animatic: {output_path} (Duration: {final_composite_clip_obj.duration:.2f}s)")
513
- moviepy_logger_setting = 'bar' # Default to progress bar
514
-
515
  final_composite_clip_obj.write_videofile(
516
- output_path, fps=fps, codec='libx264', preset='medium', audio_codec='aac',
 
 
517
  temp_audiofile=os.path.join(self.output_dir, f'temp-audio-{os.urandom(4).hex()}.m4a'),
518
- remove_temp=True, threads=os.cpu_count() or 2, logger=moviepy_logger_setting, bitrate="5000k"
 
519
  )
520
- logger.info(f"Animatic video successfully created: {output_path}")
521
- return output_path
522
- else: logger.error("Final animatic clip is invalid or has zero duration. Cannot write file."); return None
523
- except Exception as e_write: logger.error(f"Error during video file writing or final composition: {e_write}", exc_info=True); return None
524
  finally:
525
  logger.debug("Closing all MoviePy clips in `assemble_animatic_from_assets` finally block.")
526
  clips_to_close = processed_moviepy_clips + ([narration_audio_clip] if narration_audio_clip else []) + ([final_composite_clip_obj] if final_composite_clip_obj else [])
 
24
  import logging
25
 
26
  logger = logging.getLogger(__name__)
27
+ logger.setLevel(logging.INFO)
28
 
29
  # --- ElevenLabs Client Import ---
30
+ ELEVENLABS_CLIENT_IMPORTED = False; ElevenLabsAPIClient = None; Voice = None; VoiceSettings = None
 
 
 
31
  try:
32
  from elevenlabs.client import ElevenLabs as ImportedElevenLabsClient
33
  from elevenlabs import Voice as ImportedVoice, VoiceSettings as ImportedVoiceSettings
34
+ ElevenLabsAPIClient = ImportedElevenLabsClient; Voice = ImportedVoice; VoiceSettings = ImportedVoiceSettings
35
+ ELEVENLABS_CLIENT_IMPORTED = True; logger.info("ElevenLabs client components imported.")
36
+ except Exception as e_eleven: logger.warning(f"ElevenLabs client import failed: {e_eleven}. Audio disabled.")
 
 
 
 
37
 
38
  # --- RunwayML Client Import (Placeholder) ---
39
+ RUNWAYML_SDK_IMPORTED = False; RunwayMLClient = None
 
40
  try:
41
+ logger.info("RunwayML SDK import is a placeholder.")
42
+ except ImportError: logger.warning("RunwayML SDK (placeholder) not found. RunwayML disabled.")
43
+ except Exception as e_runway_sdk: logger.warning(f"Error importing RunwayML SDK (placeholder): {e_runway_sdk}. RunwayML disabled.")
 
 
 
 
 
44
 
45
 
46
  class VisualEngine:
47
  def __init__(self, output_dir="temp_cinegen_media", default_elevenlabs_voice_id="Rachel"):
48
  self.output_dir = output_dir
49
  os.makedirs(self.output_dir, exist_ok=True)
50
+ self.font_filename = "DejaVuSans-Bold.ttf" # More standard than arial.ttf
 
51
  font_paths_to_try = [
52
  self.font_filename,
53
  f"/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf",
54
  f"/usr/share/fonts/truetype/liberation/LiberationSans-Bold.ttf",
55
+ f"/System/Library/Fonts/Supplemental/Arial.ttf", f"C:/Windows/Fonts/arial.ttf",
56
+ f"/usr/local/share/fonts/truetype/mycustomfonts/arial.ttf" # Previous custom path
 
57
  ]
58
  self.font_path_pil = next((p for p in font_paths_to_try if os.path.exists(p)), None)
59
  self.font_size_pil = 20
60
  self.video_overlay_font_size = 30
61
  self.video_overlay_font_color = 'white'
62
+ self.video_overlay_font = 'DejaVu-Sans-Bold' # ImageMagick name for DejaVuSans-Bold
 
 
 
63
 
64
  try:
65
+ self.font = ImageFont.truetype(self.font_path_pil, self.font_size_pil) if self.font_path_pil else ImageFont.load_default()
66
+ if self.font_path_pil: logger.info(f"Pillow font loaded: {self.font_path_pil}.")
67
+ else: logger.warning("Using default Pillow font."); self.font_size_pil = 10
68
+ except IOError as e_font: logger.error(f"Pillow font loading IOError: {e_font}. Using default."); self.font = ImageFont.load_default(); self.font_size_pil = 10
 
 
 
 
 
 
 
69
 
70
  self.openai_api_key = None; self.USE_AI_IMAGE_GENERATION = False
71
  self.dalle_model = "dall-e-3"; self.image_size_dalle3 = "1792x1024"
72
  self.video_frame_size = (1280, 720)
73
+ self.elevenlabs_api_key = None; self.USE_ELEVENLABS = False; self.elevenlabs_client = None
 
 
74
  self.elevenlabs_voice_id = default_elevenlabs_voice_id
75
+ if VoiceSettings and ELEVENLABS_CLIENT_IMPORTED: self.elevenlabs_voice_settings = VoiceSettings(stability=0.60, similarity_boost=0.80, style=0.15, use_speaker_boost=True)
 
76
  else: self.elevenlabs_voice_settings = None
 
77
  self.pexels_api_key = None; self.USE_PEXELS = False
78
+ self.runway_api_key = None; self.USE_RUNWAYML = False; self.runway_client = None
 
 
79
  logger.info("VisualEngine initialized.")
80
 
81
+ def set_openai_api_key(self,k): self.openai_api_key=k; self.USE_AI_IMAGE_GENERATION=bool(k); logger.info(f"DALL-E ({self.dalle_model}) {'Ready.' if k else 'Disabled.'}")
 
 
 
82
  def set_elevenlabs_api_key(self,api_key, voice_id_from_secret=None):
83
  self.elevenlabs_api_key=api_key
84
  if voice_id_from_secret: self.elevenlabs_voice_id = voice_id_from_secret
85
  if api_key and ELEVENLABS_CLIENT_IMPORTED and ElevenLabsAPIClient:
86
+ try: self.elevenlabs_client = ElevenLabsAPIClient(api_key=api_key); self.USE_ELEVENLABS=bool(self.elevenlabs_client); logger.info(f"ElevenLabs Client {'Ready' if self.USE_ELEVENLABS else 'Failed Init'} (Voice ID: {self.elevenlabs_voice_id}).")
 
 
 
87
  except Exception as e: logger.error(f"ElevenLabs client init error: {e}. Disabled.", exc_info=True); self.USE_ELEVENLABS=False
88
  else: self.USE_ELEVENLABS=False; logger.info("ElevenLabs Disabled (no key or SDK).")
89
+ def set_pexels_api_key(self,k): self.pexels_api_key=k; self.USE_PEXELS=bool(k); logger.info(f"Pexels Search {'Ready.' if k else 'Disabled.'}")
 
 
 
 
90
  def set_runway_api_key(self, k):
91
  self.runway_api_key = k
92
  if k and RUNWAYML_SDK_IMPORTED and RunwayMLClient:
93
+ try: self.USE_RUNWAYML = True; logger.info(f"RunwayML Client (Placeholder SDK) {'Ready.' if self.USE_RUNWAYML else 'Failed Init.'}")
94
+ except Exception as e: logger.error(f"RunwayML client (Placeholder SDK) init error: {e}. Disabled.", exc_info=True); self.USE_RUNWAYML = False
95
+ elif k: self.USE_RUNWAYML = True; logger.info("RunwayML API Key set (direct API or placeholder).")
 
 
 
 
 
96
  else: self.USE_RUNWAYML = False; logger.info("RunwayML Disabled (no API key).")
97
 
98
+ def _get_text_dimensions(self,tc,fo): di=fo.size if hasattr(fo,'size') else self.font_size_pil; return (0,di) if not tc else (lambda b:(b[2]-b[0],b[3]-b[1] if b[3]-b[1]>0 else di))(fo.getbbox(tc)) if hasattr(fo,'getbbox') else (lambda s:(s[0],s[1] if s[1]>0 else di))(fo.getsize(tc)) if hasattr(fo,'getsize') else (int(len(tc)*di*0.6),int(di*1.2))
99
+ def _create_placeholder_image_content(self,td,fn,sz=None):
100
+ # ... (Keeping this method as it was, assuming it's not the source of video corruption) ...
101
+ if sz is None: sz = self.video_frame_size
102
+ img=Image.new('RGB',sz,color=(20,20,40));d=ImageDraw.Draw(img);pd=25;mw=sz[0]-(2*pd);ls=[];
103
+ if not td: td="(Placeholder: No prompt text)"
104
+ ws=td.split();cl=""
105
+ for w in ws:
106
+ tl=cl+w+" ";
107
+ if self._get_text_dimensions(tl,self.font)[0] <= mw: cl=tl
 
 
 
 
 
 
 
 
 
 
108
  else:
109
+ if cl: ls.append(cl.strip());
110
+ cl=w+" "
111
+ if cl.strip(): ls.append(cl.strip())
112
+ if not ls and td: ls.append(td[:int(mw//(self._get_text_dimensions("A",self.font)[0] or 10))]+"..." if td else "(Text too long)")
113
+ elif not ls: ls.append("(Placeholder Text Error)")
114
+ _,slh=self._get_text_dimensions("Ay",self.font); slh = slh if slh > 0 else self.font_size_pil + 2
115
+ mld=min(len(ls),(sz[1]-(2*pd))//(slh+2)) if slh > 0 else 1
116
+ if mld <=0: mld = 1
117
+ yts = pd + (sz[1]-(2*pd) - mld*(slh+2))/2.0
118
+ yt = yts
119
+ for i in range(mld):
120
+ lc=ls[i];lw,_=self._get_text_dimensions(lc,self.font);xt=(sz[0]-lw)/2.0
121
+ d.text((xt,yt),lc,font=self.font,fill=(200,200,180));yt+=slh+2
122
+ if i==6 and mld > 7: d.text((xt,yt),"...",font=self.font,fill=(200,200,180));break
123
+ fp=os.path.join(self.output_dir,fn);
124
+ try:img.save(fp);return fp
125
+ except Exception as e:logger.error(f"Saving placeholder image {fp}: {e}", exc_info=True);return None
126
+
127
+ def _search_pexels_image(self, q, ofnb):
128
+ # ... (Keeping this method as it was) ...
129
  if not self.USE_PEXELS or not self.pexels_api_key: return None
130
+ h = {"Authorization": self.pexels_api_key}; p = {"query": q, "per_page": 1, "orientation": "landscape", "size": "large2x"}
131
+ pfn = ofnb.replace(".png", f"_pexels_{random.randint(1000,9999)}.jpg").replace(".mp4", f"_pexels_{random.randint(1000,9999)}.jpg")
132
+ fp = os.path.join(self.output_dir, pfn)
133
  try:
134
+ logger.info(f"Pexels search: '{q}'"); eq = " ".join(q.split()[:5]); p["query"] = eq
135
+ r = requests.get("https://api.pexels.com/v1/search", headers=h, params=p, timeout=20)
136
+ r.raise_for_status(); d = r.json()
137
+ if d.get("photos") and len(d["photos"]) > 0:
138
+ pu = d["photos"][0]["src"]["large2x"]
139
+ ir = requests.get(pu, timeout=60); ir.raise_for_status()
140
+ id = Image.open(io.BytesIO(ir.content))
141
+ if id.mode != 'RGB': id = id.convert('RGB')
142
+ id.save(fp); logger.info(f"Pexels image saved: {fp}"); return fp
143
+ else: logger.info(f"No photos Pexels: '{eq}'")
144
+ except Exception as e: logger.error(f"Pexels error ('{q}'): {e}", exc_info=True)
145
  return None
146
 
147
+ def _generate_video_clip_with_runwayml(self, pt, sifnb, tds=4, iip=None):
148
+ # ... (Keeping placeholder logic) ...
149
+ if not self.USE_RUNWAYML or not self.runway_api_key: logger.warning("RunwayML disabled."); return None
150
+ ovfn = sifnb.replace(".png", "_runway.mp4")
151
+ ovfp = os.path.join(self.output_dir, ovfn)
152
+ logger.info(f"RunwayML (Placeholder) for: {pt[:100]}... (Dur: {tds}s)")
153
+ return self._create_placeholder_video_content(f"[RunwayML Placeholder] {pt}", ovfn, duration=tds)
154
+
155
+ def _create_placeholder_video_content(self, td, fn, dur=4, sz=None):
156
+ # ... (Keeping placeholder logic) ...
157
+ if sz is None: sz = self.video_frame_size
158
+ fp = os.path.join(self.output_dir, fn)
159
+ tc = None
 
 
 
 
 
 
 
 
 
 
 
 
 
160
  try:
161
+ tc = TextClip(td, fontsize=50, color='white', font=self.video_overlay_font, bg_color='black', size=sz, method='caption').set_duration(dur)
162
+ tc.write_videofile(fp, fps=24, codec='libx264', preset='ultrafast', logger=None, threads=2)
163
+ logger.info(f"Placeholder video: {fp}"); return fp
164
+ except Exception as e: logger.error(f"Placeholder video error {fp}: {e}", exc_info=True); return None
 
 
165
  finally:
166
+ if tc and hasattr(tc, 'close'): tc.close()
167
 
168
  def generate_scene_asset(self, image_prompt_text, scene_data, scene_identifier_filename_base,
169
  generate_as_video_clip=False, runway_target_duration=4, input_image_for_runway=None):
170
+ # ... (Keeping this method as it was, it calls the above helpers) ...
171
  base_name, _ = os.path.splitext(scene_identifier_filename_base)
172
  asset_info = {'path': None, 'type': 'none', 'error': True, 'prompt_used': image_prompt_text, 'error_message': 'Generation not attempted'}
 
173
  if generate_as_video_clip and self.USE_RUNWAYML:
174
+ video_path = self._generate_video_clip_with_runwayml(image_prompt_text, base_name, runway_target_duration, input_image_for_runway)
175
+ if video_path and os.path.exists(video_path): return {'path': video_path, 'type': 'video', 'error': False, 'prompt_used': image_prompt_text}
176
+ else: logger.warning(f"RunwayML failed for {base_name}. Fallback to image."); asset_info['error_message'] = "RunwayML failed."
177
+
178
+ image_filename_with_ext = base_name + ".png"; filepath = os.path.join(self.output_dir, image_filename_with_ext); asset_info['type'] = 'image'
 
 
 
 
 
 
 
 
 
 
179
  if self.USE_AI_IMAGE_GENERATION and self.openai_api_key:
180
+ max_r, att_n = 2, 0
181
+ for att_n in range(max_r):
182
  try:
183
+ logger.info(f"Attempt {att_n+1} DALL-E: {image_prompt_text[:100]}...")
184
+ cl = openai.OpenAI(api_key=self.openai_api_key, timeout=90.0)
185
+ r = cl.images.generate(model=self.dalle_model, prompt=image_prompt_text, n=1, size=self.image_size_dalle3, quality="hd", response_format="url", style="vivid")
186
+ iu = r.data[0].url; rp = getattr(r.data[0], 'revised_prompt', None)
187
+ if rp: logger.info(f"DALL-E revised: {rp[:100]}...")
188
+ ir = requests.get(iu, timeout=120); ir.raise_for_status()
189
+ id = Image.open(io.BytesIO(ir.content));
190
+ if id.mode != 'RGB': id = id.convert('RGB')
191
+ id.save(filepath); logger.info(f"DALL-E saved: {filepath}");
192
+ return {'path': filepath, 'type': 'image', 'error': False, 'prompt_used': image_prompt_text, 'revised_prompt': rp}
193
+ except openai.RateLimitError as e: logger.warning(f"OpenAI Rate Limit {att_n+1}: {e}. Retry..."); time.sleep(5*(att_n+1)); asset_info['error_message']=str(e)
194
+ except Exception as e: logger.error(f"DALL-E error: {e}", exc_info=True); asset_info['error_message']=str(e); break
195
+ if asset_info['error']: logger.warning(f"DALL-E failed after {att_n+1} attempts. Pexels fallback...")
 
 
 
 
 
 
 
 
 
 
 
 
 
196
 
197
+ if self.USE_PEXELS and (asset_info['error'] or not (self.USE_AI_IMAGE_GENERATION and self.openai_api_key)):
198
+ pqt = scene_data.get('pexels_search_query_감독', f"{scene_data.get('emotional_beat','')} {scene_data.get('setting_description','')}")
199
+ pp = self._search_pexels_image(pqt, image_filename_with_ext)
200
+ if pp: return {'path': pp, 'type': 'image', 'error': False, 'prompt_used': f"Pexels: {pqt}"}
201
+ cem = asset_info.get('error_message', ""); asset_info['error_message'] = (cem + " Pexels failed.").strip()
202
+ if not asset_info['error']: logger.warning("Pexels failed (DALL-E not tried).")
203
+
204
  if asset_info['error']:
205
+ logger.warning("All methods failed. Placeholder image.")
206
+ ppt = asset_info.get('prompt_used', image_prompt_text)
207
+ php = self._create_placeholder_image_content(f"[Fallback Placeholder] {ppt[:100]}...", image_filename_with_ext)
208
+ if php: return {'path': php, 'type': 'image', 'error': False, 'prompt_used': ppt}
209
+ else: cem=asset_info.get('error_message',"");asset_info['error_message']=(cem + " Placeholder failed.").strip()
 
 
 
210
  return asset_info
211
 
212
  def generate_narration_audio(self, text_to_narrate, output_filename="narration_overall.mp3"):
213
+ # ... (Keeping this method as it was - robust enough) ...
214
+ if not self.USE_ELEVENLABS or not self.elevenlabs_client or not text_to_narrate: logger.info("ElevenLabs conditions not met. Skip audio."); return None
215
+ afp = os.path.join(self.output_dir, output_filename)
 
216
  try:
217
+ logger.info(f"ElevenLabs audio (Voice: {self.elevenlabs_voice_id}) for: {text_to_narrate[:70]}...")
218
+ asm = None
219
+ if hasattr(self.elevenlabs_client,'text_to_speech') and hasattr(self.elevenlabs_client.text_to_speech,'stream'): asm=self.elevenlabs_client.text_to_speech.stream; logger.info("Using 11L .text_to_speech.stream()")
220
+ elif hasattr(self.elevenlabs_client,'generate_stream'): asm=self.elevenlabs_client.generate_stream; logger.info("Using 11L .generate_stream()")
221
+ elif hasattr(self.elevenlabs_client,'generate'):
222
+ logger.info("Using 11L .generate() (non-streaming).")
223
+ vp = Voice(voice_id=str(self.elevenlabs_voice_id),settings=self.elevenlabs_voice_settings) if Voice and self.elevenlabs_voice_settings else str(self.elevenlabs_voice_id)
224
+ ab = self.elevenlabs_client.generate(text=text_to_narrate, voice=vp, model="eleven_multilingual_v2")
225
+ with open(afp,"wb") as f: f.write(ab)
226
+ logger.info(f"11L audio (non-streamed): {afp}"); return afp
227
+ else: logger.error("No recognized 11L audio gen method."); return None
228
+ if asm:
229
+ vps = {"voice_id":str(self.elevenlabs_voice_id)}
 
 
230
  if self.elevenlabs_voice_settings:
231
+ if hasattr(self.elevenlabs_voice_settings,'model_dump'): vps["voice_settings"]=self.elevenlabs_voice_settings.model_dump()
232
+ elif hasattr(self.elevenlabs_voice_settings,'dict'): vps["voice_settings"]=self.elevenlabs_voice_settings.dict()
233
+ else: vps["voice_settings"]=self.elevenlabs_voice_settings
234
+ adi = asm(text=text_to_narrate,model_id="eleven_multilingual_v2",**vps)
235
+ with open(afp,"wb") as f:
236
+ for chunk in adi:
 
237
  if chunk: f.write(chunk)
238
+ logger.info(f"11L audio (streamed): {afp}"); return afp
239
+ except Exception as e: logger.error(f"11L audio error: {e}", exc_info=True)
 
240
  return None
241
 
242
+
243
+ # =========================================================================
244
+ # ASSEMBLE ANIMATIC - FOCUS OF CORRUPTION DEBUGGING
245
+ # =========================================================================
246
  def assemble_animatic_from_assets(self, asset_data_list, overall_narration_path=None, output_filename="final_video.mp4", fps=24):
247
  if not asset_data_list:
248
  logger.warning("No asset data provided for animatic assembly.")
 
257
  for i, asset_info in enumerate(asset_data_list):
258
  asset_path = asset_info.get('path')
259
  asset_type = asset_info.get('type')
260
+ target_scene_duration = asset_info.get('duration', 4.5)
261
  scene_num = asset_info.get('scene_num', i + 1)
262
  key_action = asset_info.get('key_action', '')
263
 
 
268
  if target_scene_duration <= 0:
269
  logger.warning(f"S{scene_num}: Invalid duration ({target_scene_duration}s). Skipping."); continue
270
 
271
+ current_scene_clip = None
272
  try:
273
  if asset_type == 'image':
274
  pil_img = Image.open(asset_path)
275
  logger.debug(f"S{scene_num}: Loaded image. Mode: {pil_img.mode}, Size: {pil_img.size}")
276
 
277
+ # --- Robust Image Processing Pipeline for MoviePy ---
278
+ # 1. Convert to RGBA for consistent alpha handling
279
  img_rgba_source = pil_img.convert('RGBA') if pil_img.mode != 'RGBA' else pil_img.copy()
280
 
281
+ # 2. Thumbnail
282
+ img_thumbnail = img_rgba_source.copy()
283
+ resample_filter = Image.Resampling.LANCZOS if hasattr(Image.Resampling, 'LANCZOS') else Image.BILINEAR
284
  img_thumbnail.thumbnail(self.video_frame_size, resample_filter)
285
  logger.debug(f"S{scene_num}: Thumbnailed to: {img_thumbnail.size}")
286
 
287
+ # 3. Create RGBA canvas and paste image onto it (centers and handles transparency)
288
+ canvas_rgba = Image.new('RGBA', self.video_frame_size, (0, 0, 0, 0)) # Transparent background
 
 
289
  xo = (self.video_frame_size[0] - img_thumbnail.width) // 2
290
  yo = (self.video_frame_size[1] - img_thumbnail.height) // 2
291
+ canvas_rgba.paste(img_thumbnail, (xo, yo), img_thumbnail) # Use thumbnail's alpha as mask
292
+
293
+ # 4. Convert to final RGB image (flattens alpha against black) for MoviePy
294
+ final_rgb_image_for_moviepy = Image.new("RGB", self.video_frame_size, (0, 0, 0)) # Black background
295
+ final_rgb_image_for_moviepy.paste(canvas_rgba, mask=canvas_rgba.split()[3]) # Use alpha of canvas_rgba as mask
 
 
296
 
297
+ debug_canvas_path = os.path.join(self.output_dir, f"debug_PRE_NUMPY_S{scene_num}.png")
298
+ try: final_rgb_image_for_moviepy.save(debug_canvas_path); logger.info(f"DEBUG: Saved PRE-NUMPY image for S{scene_num} to {debug_canvas_path}")
299
+ except Exception as e_save: logger.error(f"DEBUG: Error saving PRE-NUMPY image for S{scene_num}: {e_save}")
300
+
301
+ # 5. Convert to C-contiguous NumPy array, dtype uint8
302
+ frame_np = np.array(final_rgb_image_for_moviepy, dtype=np.uint8)
303
+ if not frame_np.flags['C_CONTIGUOUS']:
304
+ frame_np = np.ascontiguousarray(frame_np, dtype=np.uint8)
305
+ logger.debug(f"S{scene_num}: Ensured NumPy array is C-contiguous.")
306
 
307
+ logger.debug(f"S{scene_num}: Final NumPy for MoviePy. Shape: {frame_np.shape}, Dtype: {frame_np.dtype}, Contiguous: {frame_np.flags['C_CONTIGUOUS']}")
 
308
 
309
+ if frame_np.size == 0 or frame_np.ndim != 3 or frame_np.shape[2] != 3:
310
+ logger.error(f"S{scene_num}: Invalid NumPy array shape/size for ImageClip. Shape: {frame_np.shape}. Skipping."); continue
311
+ # --- End Robust Image Processing ---
312
 
313
  current_clip_base = ImageClip(frame_np, transparent=False).set_duration(target_scene_duration)
314
+ logger.debug(f"S{scene_num}: Base ImageClip created.")
315
+
316
+ # --- DEBUG: Save frame from MoviePy ImageClip object ---
317
+ moviepy_frame_debug_path = os.path.join(self.output_dir, f"debug_MOVIEPY_FRAME_S{scene_num}.png")
318
+ try:
319
+ current_clip_base.save_frame(moviepy_frame_debug_path, t=0.1) # Save a frame at 0.1s
320
+ logger.info(f"DEBUG: Saved frame FROM MOVIEPY ImageClip for S{scene_num} to {moviepy_frame_debug_path}")
321
+ except Exception as e_save_mv_frame:
322
+ logger.error(f"DEBUG: Error saving frame FROM MOVIEPY ImageClip for S{scene_num}: {e_save_mv_frame}", exc_info=True)
323
+ # --- End DEBUG ---
324
 
325
+ current_scene_clip_with_fx = current_clip_base
326
  try: # Ken Burns
327
  end_scale = random.uniform(1.03, 1.08)
328
  current_scene_clip_with_fx = current_clip_base.fx(vfx.resize, lambda t: 1 + (end_scale - 1) * (t / target_scene_duration) if target_scene_duration > 0 else 1).set_position('center')
 
329
  except Exception as e_fx: logger.error(f"S{scene_num}: Ken Burns error: {e_fx}. Using static.", exc_info=False)
 
330
  current_scene_clip = current_scene_clip_with_fx
331
 
332
  elif asset_type == 'video':
333
+ # ... (Video processing logic - keep as in previous good version) ...
334
+ source_video_clip = None
335
  try:
336
  source_video_clip = VideoFileClip(asset_path, target_resolution=(self.video_frame_size[1], self.video_frame_size[0]) if self.video_frame_size else None)
337
+ temp_clip = source_video_clip
 
338
  if source_video_clip.duration != target_scene_duration:
339
+ if source_video_clip.duration > target_scene_duration: temp_clip = source_video_clip.subclip(0, target_scene_duration)
 
340
  else: # Source is shorter
341
+ if target_scene_duration / source_video_clip.duration > 1.5 and source_video_clip.duration > 0.1: temp_clip = source_video_clip.loop(duration=target_scene_duration)
342
+ else: temp_clip = source_video_clip.set_duration(source_video_clip.duration); logger.info(f"S{scene_num}: Video clip ({source_video_clip.duration:.2f}s) shorter than target ({target_scene_duration:.2f}s).")
343
+ current_scene_clip = temp_clip.set_duration(target_scene_duration)
344
+ if current_scene_clip.size != list(self.video_frame_size): current_scene_clip = current_scene_clip.resize(self.video_frame_size)
345
+ except Exception as e_vid_load: logger.error(f"S{scene_num}: Error loading/processing video '{asset_path}': {e_vid_load}", exc_info=True); continue
346
+ finally:
347
+ if source_video_clip and source_video_clip is not current_scene_clip and hasattr(source_video_clip, 'close'): source_video_clip.close()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
348
 
349
  else: logger.warning(f"S{scene_num}: Unknown asset type '{asset_type}'. Skipping."); continue
350
 
351
+ if current_scene_clip and key_action: # Add text overlay
352
+ try:
353
+ txt_clip = TextClip(f"Scene {scene_num}\n{key_action}",
354
+ fontsize=self.video_overlay_font_size, color=self.video_overlay_font_color,
355
+ font=self.video_overlay_font, bg_color='rgba(10,10,20,0.7)',
356
+ method='caption', align='West', size=(self.video_frame_size[0] * 0.9, None),
357
+ kerning=-1, stroke_color='black', stroke_width=1.5
358
+ ).set_duration(min(current_scene_clip.duration - 0.5, current_scene_clip.duration * 0.8) if current_scene_clip.duration > 0.5 else current_scene_clip.duration).set_start(0.25).set_position(('center', 0.92), relative=True)
359
+ current_scene_clip = CompositeVideoClip([current_scene_clip, txt_clip], size=self.video_frame_size, use_bgclip=True)
360
+ except Exception as e_txt: logger.error(f"S{scene_num}: Error with TextClip: {e_txt}. Using clip without text.", exc_info=True)
 
 
 
 
 
 
361
 
362
+ if current_scene_clip: processed_moviepy_clips.append(current_scene_clip); logger.info(f"S{scene_num}: Asset processed. Clip duration: {current_scene_clip.duration:.2f}s.")
363
+ except Exception as e_asset_proc: logger.error(f"MAJOR Error S{scene_num} ({asset_path}): {e_asset_proc}", exc_info=True)
364
+ finally: # Close individual clips if an error occurred during their specific processing
 
 
 
 
365
  if current_scene_clip and hasattr(current_scene_clip, 'reader') and current_scene_clip.reader:
366
  if hasattr(current_scene_clip, 'close'): current_scene_clip.close()
367
+ elif current_scene_clip and hasattr(current_scene_clip, 'close'): current_scene_clip.close()
 
368
 
369
+ if not processed_moviepy_clips: logger.warning("No clips processed. Aborting."); return None
370
 
371
  transition_duration = 0.75
372
  try:
373
+ logger.info(f"Concatenating {len(processed_moviepy_clips)} clips.")
374
+ if len(processed_moviepy_clips) > 1: final_composite_clip_obj = concatenate_videoclips(processed_moviepy_clips, padding = -transition_duration if transition_duration > 0 else 0, method="compose")
 
 
375
  elif processed_moviepy_clips: final_composite_clip_obj = processed_moviepy_clips[0]
376
+ if not final_composite_clip_obj: logger.error("Concatenation failed."); return None
 
377
  logger.info(f"Concatenated clip duration: {final_composite_clip_obj.duration:.2f}s")
378
 
379
+ if transition_duration > 0 and final_composite_clip_obj.duration > 0:
380
+ if final_composite_clip_obj.duration > transition_duration * 2: final_composite_clip_obj = final_composite_clip_obj.fx(vfx.fadein, transition_duration).fx(vfx.fadeout, transition_duration)
381
+ else: final_composite_clip_obj = final_composite_clip_obj.fx(vfx.fadein, min(transition_duration, final_composite_clip_obj.duration/2.0))
 
 
 
382
 
383
  if overall_narration_path and os.path.exists(overall_narration_path) and final_composite_clip_obj.duration > 0:
384
+ try: narration_audio_clip = AudioFileClip(overall_narration_path); final_composite_clip_obj = final_composite_clip_obj.set_audio(narration_audio_clip); logger.info("Narration added.")
385
+ except Exception as e_audio: logger.error(f"Adding narration error: {e_audio}", exc_info=True)
 
 
 
 
386
  elif final_composite_clip_obj.duration <= 0 : logger.warning("Video has no duration. Audio not added.")
387
 
388
  if final_composite_clip_obj and final_composite_clip_obj.duration > 0:
389
  output_path = os.path.join(self.output_dir, output_filename)
390
+ logger.info(f"Writing final video: {output_path} (Duration: {final_composite_clip_obj.duration:.2f}s)")
391
+ # --- Test different write parameters if corruption persists ---
 
392
  final_composite_clip_obj.write_videofile(
393
+ output_path, fps=fps, codec='libx264',
394
+ preset='medium', # Changed from ultrafast for potentially better encoding
395
+ audio_codec='aac',
396
  temp_audiofile=os.path.join(self.output_dir, f'temp-audio-{os.urandom(4).hex()}.m4a'),
397
+ remove_temp=True, threads=os.cpu_count() or 2, logger='bar', bitrate="5000k"
398
+ # ffmpeg_params=["-pix_fmt", "yuv420p"] # Potentially force pixel format if issues persist
399
  )
400
+ logger.info(f"Video created: {output_path}"); return output_path
401
+ else: logger.error("Final clip invalid. Not writing."); return None
402
+ except Exception as e_write: logger.error(f"Video writing error: {e_write}", exc_info=True); return None
 
403
  finally:
404
  logger.debug("Closing all MoviePy clips in `assemble_animatic_from_assets` finally block.")
405
  clips_to_close = processed_moviepy_clips + ([narration_audio_clip] if narration_audio_clip else []) + ([final_composite_clip_obj] if final_composite_clip_obj else [])