mgbam commited on
Commit
4da81e5
·
verified ·
1 Parent(s): d44d308

Update core/visual_engine.py

Browse files
Files changed (1) hide show
  1. core/visual_engine.py +69 -94
core/visual_engine.py CHANGED
@@ -1,6 +1,6 @@
1
  # core/visual_engine.py
2
  from PIL import Image, ImageDraw, ImageFont, ImageOps
3
- import base64 # For Data URI conversion
4
 
5
  # --- MONKEY PATCH ---
6
  try:
@@ -22,7 +22,7 @@ import io
22
  import time
23
  import random
24
  import logging
25
- import mimetypes # For Data URI
26
 
27
  logger = logging.getLogger(__name__)
28
  logger.setLevel(logging.INFO)
@@ -75,12 +75,8 @@ class VisualEngine:
75
  self.runway_api_key = None; self.USE_RUNWAYML = False; self.runway_client = None
76
  if RUNWAYML_SDK_IMPORTED and RunwayMLAPIClient:
77
  try:
78
- if os.getenv("RUNWAYML_API_SECRET"):
79
- self.runway_client = RunwayMLAPIClient()
80
- logger.info("RunwayML Client initialized using RUNWAYML_API_SECRET env var.")
81
- # else: # No explicit else, will be handled by set_runway_api_key if key provided later
82
- except Exception as e_runway_init:
83
- logger.error(f"Failed to initialize RunwayML client during __init__: {e_runway_init}", exc_info=True)
84
 
85
  logger.info("VisualEngine initialized.")
86
 
@@ -100,17 +96,10 @@ class VisualEngine:
100
  if RUNWAYML_SDK_IMPORTED and RunwayMLAPIClient:
101
  if not self.runway_client:
102
  try:
103
- if not os.getenv("RUNWAYML_API_SECRET"):
104
- logger.info("Setting RUNWAYML_API_SECRET environment variable from provided key for SDK.")
105
- os.environ["RUNWAYML_API_SECRET"] = k
106
- self.runway_client = RunwayMLAPIClient()
107
- self.USE_RUNWAYML = True
108
- logger.info("RunwayML Client initialized successfully via set_runway_api_key.")
109
- except Exception as e_client_init:
110
- logger.error(f"RunwayML Client initialization failed in set_runway_api_key: {e_client_init}", exc_info=True)
111
- self.USE_RUNWAYML = False
112
- else: # Client already initialized
113
- self.USE_RUNWAYML = True; logger.info("RunwayML Client was already initialized.")
114
  else: logger.warning("RunwayML SDK not imported. API key set, but integration requires SDK."); self.USE_RUNWAYML = False
115
  else: self.USE_RUNWAYML = False; logger.info("RunwayML Disabled (no API key).")
116
 
@@ -128,76 +117,46 @@ class VisualEngine:
128
  except Exception as e: logger.error(f"Error converting {image_path} to data URI: {e}", exc_info=True); return None
129
 
130
  def _map_resolution_to_runway_ratio(self, width, height):
131
- # Based on Gen-4 supported ratios: "1280:720", "720:1280", "1104:832", "832:1104", "960:960", "1584:672"
132
- ratio_str = f"{width}:{height}"
133
- supported_ratios = ["1280:720", "720:1280", "1104:832", "832:1104", "960:960", "1584:672"]
134
  if ratio_str in supported_ratios: return ratio_str
135
- logger.warning(f"Resolution {ratio_str} not directly supported by Gen-4. Defaulting to 1280:720.")
136
- return "1280:720"
137
 
138
  def _get_text_dimensions(self,text_content,font_obj):
139
- # (Corrected version from previous, assuming font_obj.size exists or font_size_pil is fallback)
140
  default_char_height = getattr(font_obj, 'size', self.font_size_pil)
141
  if not text_content: return 0, default_char_height
142
  try:
143
- if hasattr(font_obj,'getbbox'): # Pillow 8.0.0+
144
- bbox=font_obj.getbbox(text_content);w=bbox[2]-bbox[0];h=bbox[3]-bbox[1]
145
- return w, h if h > 0 else default_char_height
146
- elif hasattr(font_obj,'getsize'): # Older Pillow
147
- w,h=font_obj.getsize(text_content)
148
- return w, h if h > 0 else default_char_height
149
- else: # Fallback if no standard method (should not happen for ImageFont)
150
- return int(len(text_content)*default_char_height*0.6),int(default_char_height*1.2)
151
- except Exception as e:
152
- logger.warning(f"Error in _get_text_dimensions for '{text_content[:20]}...': {e}")
153
- return int(len(text_content)*self.font_size_pil*0.6),int(self.font_size_pil*1.2) # Fallback to global default
154
 
155
  def _create_placeholder_image_content(self,text_description,filename,size=None):
156
- # <<< THIS IS THE CORRECTED METHOD >>>
157
  if size is None: size = self.video_frame_size
158
  img=Image.new('RGB',size,color=(20,20,40));d=ImageDraw.Draw(img);padding=25;max_w=size[0]-(2*padding);lines=[];
159
  if not text_description: text_description="(Placeholder Image)"
160
  words=text_description.split();current_line=""
161
  for word_idx, word in enumerate(words):
162
- # Add space correctly, not after the very last word of the text
163
  prospective_line_addition = word + (" " if word_idx < len(words) - 1 else "")
164
  test_line = current_line + prospective_line_addition
165
-
166
  current_line_width, _ = self._get_text_dimensions(test_line, self.font)
167
- if current_line_width == 0 and test_line.strip(): # Estimate if Pillow returns 0
168
- current_line_width = len(test_line) * (self.font_size_pil * 0.6)
169
-
170
- if current_line_width <= max_w:
171
- current_line = test_line
172
- else: # Word doesn't fit
173
- if current_line.strip(): # Add previous line if it had content
174
- lines.append(current_line.strip())
175
- current_line = prospective_line_addition # Start new line with current word (plus its space if not last)
176
- # If the word itself is too long for a line, it will just be one long line.
177
- # Pillow's d.text will handle overflow if text anchor isn't 'lt' (left-top).
178
- # For centered text, it might go off-canvas; more complex word splitting needed for that.
179
-
180
- if current_line.strip(): # Add any remaining part
181
- lines.append(current_line.strip())
182
-
183
  if not lines and text_description:
184
  avg_char_width, _ = self._get_text_dimensions("W", self.font)
185
- if avg_char_width == 0: avg_char_width = self.font_size_pil * 0.6 # Estimate
186
  chars_per_line = int(max_w / avg_char_width) if avg_char_width > 0 else 20
187
  lines.append(text_description[:chars_per_line] + ("..." if len(text_description) > chars_per_line else ""))
188
- elif not lines:
189
- lines.append("(Placeholder Error)")
190
-
191
  _,single_line_h=self._get_text_dimensions("Ay",self.font); single_line_h = single_line_h if single_line_h > 0 else self.font_size_pil + 2
192
  max_lines_to_display=min(len(lines),(size[1]-(2*padding))//(single_line_h+2)) if single_line_h > 0 else 1
193
  if max_lines_to_display <=0: max_lines_to_display = 1
194
-
195
- y_text_start = padding + (size[1]-(2*padding) - max_lines_to_display*(single_line_h+2))/2.0
196
- y_text = y_text_start
197
-
198
  for i in range(max_lines_to_display):
199
- line_content=lines[i]
200
- line_w,_=self._get_text_dimensions(line_content,self.font)
201
  if line_w == 0 and line_content.strip(): line_w = len(line_content) * (self.font_size_pil * 0.6)
202
  x_text=(size[0]-line_w)/2.0
203
  try: d.text((x_text,y_text),line_content,font=self.font,fill=(200,200,180))
@@ -211,20 +170,44 @@ class VisualEngine:
211
  try:img.save(filepath);return filepath
212
  except Exception as e:logger.error(f"Saving placeholder image {filepath}: {e}", exc_info=True);return None
213
 
214
- def _search_pexels_image(self, q, ofnb):
215
- # (Keep as before)
216
- if not self.USE_PEXELS or not self.pexels_api_key: return None; h={"Authorization":self.pexels_api_key};p={"query":q,"per_page":1,"orientation":"landscape","size":"large2x"}
217
- pfn=ofnb.replace(".png",f"_pexels_{random.randint(1000,9999)}.jpg").replace(".mp4",f"_pexels_{random.randint(1000,9999)}.jpg");fp=os.path.join(self.output_dir,pfn)
218
- try: logger.info(f"Pexels search: '{q}'");eq=" ".join(q.split()[:5]);p["query"]=eq;r=requests.get("https://api.pexels.com/v1/search",headers=h,params=p,timeout=20)
219
- r.raise_for_status();d=r.json()
220
- if d.get("photos") and len(d["photos"])>0:pu=d["photos"][0]["src"]["large2x"];ir=requests.get(pu,timeout=60);ir.raise_for_status();id_img=Image.open(io.BytesIO(ir.content))
221
- if id_img.mode!='RGB':id_img=id_img.convert('RGB');id_img.save(fp);logger.info(f"Pexels saved: {fp}");return fp # Fixed id to id_img
222
- else: id_img.save(fp);logger.info(f"Pexels saved (was RGB): {fp}");return fp # Save even if already RGB
223
- else: logger.info(f"No Pexels for: '{eq}'") # This else was misplaced
224
- except Exception as e:logger.error(f"Pexels error ('{q}'): {e}",exc_info=True);return None # Fixed indent
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
225
 
226
  def _generate_video_clip_with_runwayml(self, text_prompt_for_motion, input_image_path, scene_identifier_filename_base, target_duration_seconds=5):
227
- # (Updated RunwayML integration)
228
  if not self.USE_RUNWAYML or not self.runway_client: logger.warning("RunwayML not enabled/client not init. Skip video."); return None
229
  if not input_image_path or not os.path.exists(input_image_path): logger.error(f"Runway Gen-4 needs input image. Path invalid: {input_image_path}"); return None
230
  image_data_uri = self._image_to_data_uri(input_image_path)
@@ -237,7 +220,7 @@ class VisualEngine:
237
  try:
238
  task = self.runway_client.image_to_video.create(model='gen4_turbo', prompt_image=image_data_uri, prompt_text=text_prompt_for_motion, duration=runway_duration, ratio=runway_ratio_str)
239
  logger.info(f"Runway Gen-4 task ID: {task.id}. Polling...")
240
- poll_interval=10; max_polls=36 # Max 6 mins
241
  for _ in range(max_polls):
242
  time.sleep(poll_interval); task_details = self.runway_client.tasks.retrieve(id=task.id)
243
  logger.info(f"Runway task {task.id} status: {task_details.status}")
@@ -258,7 +241,7 @@ class VisualEngine:
258
  except AttributeError as ae: logger.error(f"RunwayML SDK AttributeError: {ae}. SDK/methods might differ.", exc_info=True); return None
259
  except Exception as e: logger.error(f"Runway Gen-4 API error: {e}", exc_info=True); return None
260
 
261
- def _create_placeholder_video_content(self, td, fn, dur=4, sz=None): # Generic placeholder if input_image not available
262
  if sz is None: sz = self.video_frame_size; fp = os.path.join(self.output_dir, fn); tc = None
263
  try: tc = TextClip(td, fontsize=50, color='white', font=self.video_overlay_font, bg_color='black', size=sz, method='caption').set_duration(dur); tc.write_videofile(fp, fps=24, codec='libx264', preset='ultrafast', logger=None, threads=2); logger.info(f"Generic placeholder video: {fp}"); return fp
264
  except Exception as e: logger.error(f"Generic placeholder video error {fp}: {e}", exc_info=True); return None
@@ -268,15 +251,13 @@ class VisualEngine:
268
  def generate_scene_asset(self, image_generation_prompt_text, motion_prompt_text_for_video,
269
  scene_data, scene_identifier_filename_base,
270
  generate_as_video_clip=False, runway_target_duration=5):
271
- # (Logic mostly as before, ensuring base image is robustly generated first)
272
  base_name, _ = os.path.splitext(scene_identifier_filename_base)
273
  asset_info = {'path': None, 'type': 'none', 'error': True, 'prompt_used': image_generation_prompt_text, 'error_message': 'Asset generation init failed'}
274
  input_image_for_runway_path = None
275
  base_image_filename = base_name + ("_base_for_video.png" if generate_as_video_clip else ".png")
276
  base_image_filepath = os.path.join(self.output_dir, base_image_filename)
277
 
278
- # Attempt base image generation
279
- if self.USE_AI_IMAGE_GENERATION and self.openai_api_key: # DALL-E
280
  max_r, att_n = 2,0;
281
  for att_n in range(max_r):
282
  try:logger.info(f"Att {att_n+1} DALL-E (base img): {image_generation_prompt_text[:70]}...");cl=openai.OpenAI(api_key=self.openai_api_key,timeout=90.0);r=cl.images.generate(model=self.dalle_model,prompt=image_generation_prompt_text,n=1,size=self.image_size_dalle3,quality="hd",response_format="url",style="vivid");iu=r.data[0].url;rp=getattr(r.data[0],'revised_prompt',None);
@@ -286,17 +267,17 @@ class VisualEngine:
286
  except Exception as e:logger.error(f"DALL-E base img error:{e}",exc_info=True);asset_info['error_message']=str(e);break
287
  if asset_info['error']:logger.warning(f"DALL-E failed after {att_n+1} attempts for base img.")
288
 
289
- if asset_info['error'] and self.USE_PEXELS: # Pexels Fallback
290
  logger.info("Trying Pexels for base img.");pqt=scene_data.get('pexels_search_query_감독',f"{scene_data.get('emotional_beat','')} {scene_data.get('setting_description','')}");pp=self._search_pexels_image(pqt,base_image_filename);
291
  if pp:input_image_for_runway_path=pp;asset_info={'path':pp,'type':'image','error':False,'prompt_used':f"Pexels:{pqt}"}
292
  else:current_em=asset_info.get('error_message',"");asset_info['error_message']=(current_em+" Pexels failed for base.").strip()
293
 
294
- if asset_info['error']: # Placeholder Fallback
295
  logger.warning("Base img (DALL-E/Pexels) failed. Using placeholder.");ppt=asset_info.get('prompt_used',image_generation_prompt_text);php=self._create_placeholder_image_content(f"[Base Placeholder]{ppt[:70]}...",base_image_filename);
296
  if php:input_image_for_runway_path=php;asset_info={'path':php,'type':'image','error':False,'prompt_used':ppt}
297
  else:current_em=asset_info.get('error_message',"");asset_info['error_message']=(current_em+" Base placeholder failed.").strip()
298
 
299
- if generate_as_video_clip: # Now attempt RunwayML if requested
300
  if not input_image_for_runway_path:logger.error("RunwayML video: base img failed.");asset_info['error']=True;asset_info['error_message']=(asset_info.get('error_message',"")+" Base img miss, Runway abort.").strip();asset_info['type']='none';return asset_info
301
  if self.USE_RUNWAYML:
302
  logger.info(f"Runway Gen-4 video for {base_name} using base: {input_image_for_runway_path}")
@@ -304,10 +285,9 @@ class VisualEngine:
304
  if video_path and os.path.exists(video_path):asset_info={'path':video_path,'type':'video','error':False,'prompt_used':motion_prompt_text_for_video,'base_image_path':input_image_for_runway_path}
305
  else:logger.warning(f"RunwayML video failed for {base_name}. Fallback to base img.");asset_info['error']=True;asset_info['error_message']=(asset_info.get('error_message',"Base img ok.")+" RunwayML video fail; use base img.").strip();asset_info['path']=input_image_for_runway_path;asset_info['type']='image';asset_info['prompt_used']=image_generation_prompt_text
306
  else:logger.warning("RunwayML selected but disabled. Use base img.");asset_info['error']=True;asset_info['error_message']=(asset_info.get('error_message',"Base img ok.")+" RunwayML disabled; use base img.").strip();asset_info['path']=input_image_for_runway_path;asset_info['type']='image';asset_info['prompt_used']=image_generation_prompt_text
307
- return asset_info # Return image info if not video, or video result
308
 
309
  def generate_narration_audio(self, text_to_narrate, output_filename="narration_overall.mp3"):
310
- # (Keep as before)
311
  if not self.USE_ELEVENLABS or not self.elevenlabs_client or not text_to_narrate: logger.info("11L skip."); return None; afp=os.path.join(self.output_dir,output_filename)
312
  try: logger.info(f"11L audio (Voice:{self.elevenlabs_voice_id}): {text_to_narrate[:70]}..."); asm=None
313
  if hasattr(self.elevenlabs_client,'text_to_speech')and hasattr(self.elevenlabs_client.text_to_speech,'stream'):asm=self.elevenlabs_client.text_to_speech.stream;logger.info("Using 11L .text_to_speech.stream()")
@@ -327,21 +307,16 @@ class VisualEngine:
327
  logger.info(f"11L audio (stream): {afp}");return afp
328
  except Exception as e:logger.error(f"11L audio error: {e}",exc_info=True);return None
329
 
330
-
331
  def assemble_animatic_from_assets(self, asset_data_list, overall_narration_path=None, output_filename="final_video.mp4", fps=24):
332
- # (Keep as in the version that has the robust image processing, C-contiguous array, and debug image saves)
333
  if not asset_data_list: logger.warning("No assets for animatic."); return None
334
  processed_clips = []; narration_clip = None; final_clip = None
335
  logger.info(f"Assembling from {len(asset_data_list)} assets. Frame: {self.video_frame_size}.")
336
-
337
  for i, asset_info in enumerate(asset_data_list):
338
  asset_path, asset_type, scene_dur = asset_info.get('path'), asset_info.get('type'), asset_info.get('duration', 4.5)
339
  scene_num, key_action = asset_info.get('scene_num', i + 1), asset_info.get('key_action', '')
340
  logger.info(f"S{scene_num}: Path='{asset_path}', Type='{asset_type}', Dur='{scene_dur}'s")
341
-
342
  if not (asset_path and os.path.exists(asset_path)): logger.warning(f"S{scene_num}: Not found '{asset_path}'. Skip."); continue
343
  if scene_dur <= 0: logger.warning(f"S{scene_num}: Invalid duration ({scene_dur}s). Skip."); continue
344
-
345
  current_scene_mvpy_clip = None
346
  try:
347
  if asset_type == 'image':
@@ -372,7 +347,7 @@ class VisualEngine:
372
  else:
373
  if scene_dur/src_clip.duration > 1.5 and src_clip.duration>0.1:tmp_clip=src_clip.loop(duration=scene_dur)
374
  else:tmp_clip=src_clip.set_duration(src_clip.duration);logger.info(f"S{scene_num} Video clip ({src_clip.duration:.2f}s) shorter than target ({scene_dur:.2f}s).")
375
- current_scene_mvpy_clip=tmp_clip.set_duration(scene_dur) # Ensure target duration for concatenation
376
  if current_scene_mvpy_clip.size!=list(self.video_frame_size):current_scene_mvpy_clip=current_scene_mvpy_clip.resize(self.video_frame_size)
377
  except Exception as e:logger.error(f"S{scene_num} Video load error '{asset_path}':{e}",exc_info=True);continue
378
  finally:
@@ -418,7 +393,7 @@ class VisualEngine:
418
  except Exception as e:logger.error(f"Video write error:{e}",exc_info=True);return None
419
  finally:
420
  logger.debug("Closing all MoviePy clips in `assemble_animatic_from_assets` finally block.")
421
- all_clips_to_close = processed_clips + ([narration_clip] if narration_clip else []) + ([final_clip] if final_clip else []) # Corrected variable name
422
  for clip_obj_to_close in all_clips_to_close:
423
  if clip_obj_to_close and hasattr(clip_obj_to_close, 'close'):
424
  try: clip_obj_to_close.close()
 
1
  # core/visual_engine.py
2
  from PIL import Image, ImageDraw, ImageFont, ImageOps
3
+ import base64
4
 
5
  # --- MONKEY PATCH ---
6
  try:
 
22
  import time
23
  import random
24
  import logging
25
+ import mimetypes
26
 
27
  logger = logging.getLogger(__name__)
28
  logger.setLevel(logging.INFO)
 
75
  self.runway_api_key = None; self.USE_RUNWAYML = False; self.runway_client = None
76
  if RUNWAYML_SDK_IMPORTED and RunwayMLAPIClient:
77
  try:
78
+ if os.getenv("RUNWAYML_API_SECRET"): self.runway_client = RunwayMLAPIClient(); logger.info("RunwayML Client initialized using RUNWAYML_API_SECRET env var.")
79
+ except Exception as e_runway_init: logger.error(f"Failed to initialize RunwayML client during __init__: {e_runway_init}", exc_info=True)
 
 
 
 
80
 
81
  logger.info("VisualEngine initialized.")
82
 
 
96
  if RUNWAYML_SDK_IMPORTED and RunwayMLAPIClient:
97
  if not self.runway_client:
98
  try:
99
+ if not os.getenv("RUNWAYML_API_SECRET"): os.environ["RUNWAYML_API_SECRET"] = k; logger.info("Setting RUNWAYML_API_SECRET env var from provided key.")
100
+ self.runway_client = RunwayMLAPIClient(); self.USE_RUNWAYML = True; logger.info("RunwayML Client initialized successfully via set_runway_api_key.")
101
+ except Exception as e_client_init: logger.error(f"RunwayML Client init failed in set_runway_api_key: {e_client_init}", exc_info=True); self.USE_RUNWAYML = False
102
+ else: self.USE_RUNWAYML = True; logger.info("RunwayML Client was already initialized.")
 
 
 
 
 
 
 
103
  else: logger.warning("RunwayML SDK not imported. API key set, but integration requires SDK."); self.USE_RUNWAYML = False
104
  else: self.USE_RUNWAYML = False; logger.info("RunwayML Disabled (no API key).")
105
 
 
117
  except Exception as e: logger.error(f"Error converting {image_path} to data URI: {e}", exc_info=True); return None
118
 
119
  def _map_resolution_to_runway_ratio(self, width, height):
120
+ ratio_str = f"{width}:{height}"; supported_ratios = ["1280:720", "720:1280", "1104:832", "832:1104", "960:960", "1584:672"]
 
 
121
  if ratio_str in supported_ratios: return ratio_str
122
+ logger.warning(f"Res {ratio_str} not directly Gen-4 supported. Default 1280:720."); return "1280:720"
 
123
 
124
  def _get_text_dimensions(self,text_content,font_obj):
 
125
  default_char_height = getattr(font_obj, 'size', self.font_size_pil)
126
  if not text_content: return 0, default_char_height
127
  try:
128
+ if hasattr(font_obj,'getbbox'): bbox=font_obj.getbbox(text_content);w=bbox[2]-bbox[0];h=bbox[3]-bbox[1]; return w, h if h > 0 else default_char_height
129
+ elif hasattr(font_obj,'getsize'): w,h=font_obj.getsize(text_content); return w, h if h > 0 else default_char_height
130
+ else: return int(len(text_content)*default_char_height*0.6),int(default_char_height*1.2)
131
+ except Exception as e: logger.warning(f"Error in _get_text_dimensions: {e}"); return int(len(text_content)*self.font_size_pil*0.6),int(self.font_size_pil*1.2)
 
 
 
 
 
 
 
132
 
133
  def _create_placeholder_image_content(self,text_description,filename,size=None):
 
134
  if size is None: size = self.video_frame_size
135
  img=Image.new('RGB',size,color=(20,20,40));d=ImageDraw.Draw(img);padding=25;max_w=size[0]-(2*padding);lines=[];
136
  if not text_description: text_description="(Placeholder Image)"
137
  words=text_description.split();current_line=""
138
  for word_idx, word in enumerate(words):
 
139
  prospective_line_addition = word + (" " if word_idx < len(words) - 1 else "")
140
  test_line = current_line + prospective_line_addition
 
141
  current_line_width, _ = self._get_text_dimensions(test_line, self.font)
142
+ if current_line_width == 0 and test_line.strip(): current_line_width = len(test_line) * (self.font_size_pil * 0.6)
143
+ if current_line_width <= max_w: current_line = test_line
144
+ else:
145
+ if current_line.strip(): lines.append(current_line.strip())
146
+ current_line = prospective_line_addition
147
+ if current_line.strip(): lines.append(current_line.strip())
 
 
 
 
 
 
 
 
 
 
148
  if not lines and text_description:
149
  avg_char_width, _ = self._get_text_dimensions("W", self.font)
150
+ if avg_char_width == 0: avg_char_width = self.font_size_pil * 0.6
151
  chars_per_line = int(max_w / avg_char_width) if avg_char_width > 0 else 20
152
  lines.append(text_description[:chars_per_line] + ("..." if len(text_description) > chars_per_line else ""))
153
+ elif not lines: lines.append("(Placeholder Error)")
 
 
154
  _,single_line_h=self._get_text_dimensions("Ay",self.font); single_line_h = single_line_h if single_line_h > 0 else self.font_size_pil + 2
155
  max_lines_to_display=min(len(lines),(size[1]-(2*padding))//(single_line_h+2)) if single_line_h > 0 else 1
156
  if max_lines_to_display <=0: max_lines_to_display = 1
157
+ y_text_start = padding + (size[1]-(2*padding) - max_lines_to_display*(single_line_h+2))/2.0; y_text = y_text_start
 
 
 
158
  for i in range(max_lines_to_display):
159
+ line_content=lines[i]; line_w,_=self._get_text_dimensions(line_content,self.font)
 
160
  if line_w == 0 and line_content.strip(): line_w = len(line_content) * (self.font_size_pil * 0.6)
161
  x_text=(size[0]-line_w)/2.0
162
  try: d.text((x_text,y_text),line_content,font=self.font,fill=(200,200,180))
 
170
  try:img.save(filepath);return filepath
171
  except Exception as e:logger.error(f"Saving placeholder image {filepath}: {e}", exc_info=True);return None
172
 
173
+ def _search_pexels_image(self, query, output_filename_base):
174
+ # <<< CORRECTED METHOD >>>
175
+ if not self.USE_PEXELS or not self.pexels_api_key: return None
176
+ headers = {"Authorization": self.pexels_api_key}
177
+ params = {"query": query, "per_page": 1, "orientation": "landscape", "size": "large2x"}
178
+ pexels_filename = output_filename_base.replace(".png", f"_pexels_{random.randint(1000,9999)}.jpg").replace(".mp4", f"_pexels_{random.randint(1000,9999)}.jpg")
179
+ filepath = os.path.join(self.output_dir, pexels_filename)
180
+ try:
181
+ logger.info(f"Pexels search: '{query}'")
182
+ effective_query = " ".join(query.split()[:5])
183
+ params["query"] = effective_query
184
+ response = requests.get("https://api.pexels.com/v1/search", headers=headers, params=params, timeout=20)
185
+ response.raise_for_status()
186
+ data = response.json()
187
+ if data.get("photos") and len(data["photos"]) > 0:
188
+ photo_url = data["photos"][0]["src"]["large2x"]
189
+ image_response = requests.get(photo_url, timeout=60)
190
+ image_response.raise_for_status()
191
+ img_data = Image.open(io.BytesIO(image_response.content))
192
+ if img_data.mode != 'RGB':
193
+ img_data = img_data.convert('RGB')
194
+ img_data.save(filepath)
195
+ logger.info(f"Pexels image saved: {filepath}")
196
+ return filepath
197
+ else:
198
+ logger.info(f"No photos found on Pexels for query: '{effective_query}'")
199
+ return None # Added explicit return
200
+ except requests.exceptions.RequestException as e_req:
201
+ logger.error(f"Pexels request error for query '{query}': {e_req}", exc_info=True)
202
+ except json.JSONDecodeError as e_json:
203
+ logger.error(f"Pexels JSON decode error for query '{query}': {e_json}", exc_info=True)
204
+ except IOError as e_io:
205
+ logger.error(f"Pexels image save error for query '{query}': {e_io}", exc_info=True)
206
+ except Exception as e:
207
+ logger.error(f"Unexpected Pexels error for query '{query}': {e}", exc_info=True)
208
+ return None # Ensure None is returned on any exception
209
 
210
  def _generate_video_clip_with_runwayml(self, text_prompt_for_motion, input_image_path, scene_identifier_filename_base, target_duration_seconds=5):
 
211
  if not self.USE_RUNWAYML or not self.runway_client: logger.warning("RunwayML not enabled/client not init. Skip video."); return None
212
  if not input_image_path or not os.path.exists(input_image_path): logger.error(f"Runway Gen-4 needs input image. Path invalid: {input_image_path}"); return None
213
  image_data_uri = self._image_to_data_uri(input_image_path)
 
220
  try:
221
  task = self.runway_client.image_to_video.create(model='gen4_turbo', prompt_image=image_data_uri, prompt_text=text_prompt_for_motion, duration=runway_duration, ratio=runway_ratio_str)
222
  logger.info(f"Runway Gen-4 task ID: {task.id}. Polling...")
223
+ poll_interval=10; max_polls=36
224
  for _ in range(max_polls):
225
  time.sleep(poll_interval); task_details = self.runway_client.tasks.retrieve(id=task.id)
226
  logger.info(f"Runway task {task.id} status: {task_details.status}")
 
241
  except AttributeError as ae: logger.error(f"RunwayML SDK AttributeError: {ae}. SDK/methods might differ.", exc_info=True); return None
242
  except Exception as e: logger.error(f"Runway Gen-4 API error: {e}", exc_info=True); return None
243
 
244
+ def _create_placeholder_video_content(self, td, fn, dur=4, sz=None):
245
  if sz is None: sz = self.video_frame_size; fp = os.path.join(self.output_dir, fn); tc = None
246
  try: tc = TextClip(td, fontsize=50, color='white', font=self.video_overlay_font, bg_color='black', size=sz, method='caption').set_duration(dur); tc.write_videofile(fp, fps=24, codec='libx264', preset='ultrafast', logger=None, threads=2); logger.info(f"Generic placeholder video: {fp}"); return fp
247
  except Exception as e: logger.error(f"Generic placeholder video error {fp}: {e}", exc_info=True); return None
 
251
  def generate_scene_asset(self, image_generation_prompt_text, motion_prompt_text_for_video,
252
  scene_data, scene_identifier_filename_base,
253
  generate_as_video_clip=False, runway_target_duration=5):
 
254
  base_name, _ = os.path.splitext(scene_identifier_filename_base)
255
  asset_info = {'path': None, 'type': 'none', 'error': True, 'prompt_used': image_generation_prompt_text, 'error_message': 'Asset generation init failed'}
256
  input_image_for_runway_path = None
257
  base_image_filename = base_name + ("_base_for_video.png" if generate_as_video_clip else ".png")
258
  base_image_filepath = os.path.join(self.output_dir, base_image_filename)
259
 
260
+ if self.USE_AI_IMAGE_GENERATION and self.openai_api_key:
 
261
  max_r, att_n = 2,0;
262
  for att_n in range(max_r):
263
  try:logger.info(f"Att {att_n+1} DALL-E (base img): {image_generation_prompt_text[:70]}...");cl=openai.OpenAI(api_key=self.openai_api_key,timeout=90.0);r=cl.images.generate(model=self.dalle_model,prompt=image_generation_prompt_text,n=1,size=self.image_size_dalle3,quality="hd",response_format="url",style="vivid");iu=r.data[0].url;rp=getattr(r.data[0],'revised_prompt',None);
 
267
  except Exception as e:logger.error(f"DALL-E base img error:{e}",exc_info=True);asset_info['error_message']=str(e);break
268
  if asset_info['error']:logger.warning(f"DALL-E failed after {att_n+1} attempts for base img.")
269
 
270
+ if asset_info['error'] and self.USE_PEXELS:
271
  logger.info("Trying Pexels for base img.");pqt=scene_data.get('pexels_search_query_감독',f"{scene_data.get('emotional_beat','')} {scene_data.get('setting_description','')}");pp=self._search_pexels_image(pqt,base_image_filename);
272
  if pp:input_image_for_runway_path=pp;asset_info={'path':pp,'type':'image','error':False,'prompt_used':f"Pexels:{pqt}"}
273
  else:current_em=asset_info.get('error_message',"");asset_info['error_message']=(current_em+" Pexels failed for base.").strip()
274
 
275
+ if asset_info['error']:
276
  logger.warning("Base img (DALL-E/Pexels) failed. Using placeholder.");ppt=asset_info.get('prompt_used',image_generation_prompt_text);php=self._create_placeholder_image_content(f"[Base Placeholder]{ppt[:70]}...",base_image_filename);
277
  if php:input_image_for_runway_path=php;asset_info={'path':php,'type':'image','error':False,'prompt_used':ppt}
278
  else:current_em=asset_info.get('error_message',"");asset_info['error_message']=(current_em+" Base placeholder failed.").strip()
279
 
280
+ if generate_as_video_clip:
281
  if not input_image_for_runway_path:logger.error("RunwayML video: base img failed.");asset_info['error']=True;asset_info['error_message']=(asset_info.get('error_message',"")+" Base img miss, Runway abort.").strip();asset_info['type']='none';return asset_info
282
  if self.USE_RUNWAYML:
283
  logger.info(f"Runway Gen-4 video for {base_name} using base: {input_image_for_runway_path}")
 
285
  if video_path and os.path.exists(video_path):asset_info={'path':video_path,'type':'video','error':False,'prompt_used':motion_prompt_text_for_video,'base_image_path':input_image_for_runway_path}
286
  else:logger.warning(f"RunwayML video failed for {base_name}. Fallback to base img.");asset_info['error']=True;asset_info['error_message']=(asset_info.get('error_message',"Base img ok.")+" RunwayML video fail; use base img.").strip();asset_info['path']=input_image_for_runway_path;asset_info['type']='image';asset_info['prompt_used']=image_generation_prompt_text
287
  else:logger.warning("RunwayML selected but disabled. Use base img.");asset_info['error']=True;asset_info['error_message']=(asset_info.get('error_message',"Base img ok.")+" RunwayML disabled; use base img.").strip();asset_info['path']=input_image_for_runway_path;asset_info['type']='image';asset_info['prompt_used']=image_generation_prompt_text
288
+ return asset_info
289
 
290
  def generate_narration_audio(self, text_to_narrate, output_filename="narration_overall.mp3"):
 
291
  if not self.USE_ELEVENLABS or not self.elevenlabs_client or not text_to_narrate: logger.info("11L skip."); return None; afp=os.path.join(self.output_dir,output_filename)
292
  try: logger.info(f"11L audio (Voice:{self.elevenlabs_voice_id}): {text_to_narrate[:70]}..."); asm=None
293
  if hasattr(self.elevenlabs_client,'text_to_speech')and hasattr(self.elevenlabs_client.text_to_speech,'stream'):asm=self.elevenlabs_client.text_to_speech.stream;logger.info("Using 11L .text_to_speech.stream()")
 
307
  logger.info(f"11L audio (stream): {afp}");return afp
308
  except Exception as e:logger.error(f"11L audio error: {e}",exc_info=True);return None
309
 
 
310
  def assemble_animatic_from_assets(self, asset_data_list, overall_narration_path=None, output_filename="final_video.mp4", fps=24):
 
311
  if not asset_data_list: logger.warning("No assets for animatic."); return None
312
  processed_clips = []; narration_clip = None; final_clip = None
313
  logger.info(f"Assembling from {len(asset_data_list)} assets. Frame: {self.video_frame_size}.")
 
314
  for i, asset_info in enumerate(asset_data_list):
315
  asset_path, asset_type, scene_dur = asset_info.get('path'), asset_info.get('type'), asset_info.get('duration', 4.5)
316
  scene_num, key_action = asset_info.get('scene_num', i + 1), asset_info.get('key_action', '')
317
  logger.info(f"S{scene_num}: Path='{asset_path}', Type='{asset_type}', Dur='{scene_dur}'s")
 
318
  if not (asset_path and os.path.exists(asset_path)): logger.warning(f"S{scene_num}: Not found '{asset_path}'. Skip."); continue
319
  if scene_dur <= 0: logger.warning(f"S{scene_num}: Invalid duration ({scene_dur}s). Skip."); continue
 
320
  current_scene_mvpy_clip = None
321
  try:
322
  if asset_type == 'image':
 
347
  else:
348
  if scene_dur/src_clip.duration > 1.5 and src_clip.duration>0.1:tmp_clip=src_clip.loop(duration=scene_dur)
349
  else:tmp_clip=src_clip.set_duration(src_clip.duration);logger.info(f"S{scene_num} Video clip ({src_clip.duration:.2f}s) shorter than target ({scene_dur:.2f}s).")
350
+ current_scene_mvpy_clip=tmp_clip.set_duration(scene_dur)
351
  if current_scene_mvpy_clip.size!=list(self.video_frame_size):current_scene_mvpy_clip=current_scene_mvpy_clip.resize(self.video_frame_size)
352
  except Exception as e:logger.error(f"S{scene_num} Video load error '{asset_path}':{e}",exc_info=True);continue
353
  finally:
 
393
  except Exception as e:logger.error(f"Video write error:{e}",exc_info=True);return None
394
  finally:
395
  logger.debug("Closing all MoviePy clips in `assemble_animatic_from_assets` finally block.")
396
+ all_clips_to_close = processed_clips + ([narration_clip] if narration_clip else []) + ([final_clip] if final_clip else [])
397
  for clip_obj_to_close in all_clips_to_close:
398
  if clip_obj_to_close and hasattr(clip_obj_to_close, 'close'):
399
  try: clip_obj_to_close.close()