mgbam commited on
Commit
d44d308
·
verified ·
1 Parent(s): 6977089

Update core/visual_engine.py

Browse files
Files changed (1) hide show
  1. core/visual_engine.py +182 -254
core/visual_engine.py CHANGED
@@ -36,9 +36,9 @@ try:
36
  ELEVENLABS_CLIENT_IMPORTED = True; logger.info("ElevenLabs client components imported.")
37
  except Exception as e_eleven: logger.warning(f"ElevenLabs client import failed: {e_eleven}. Audio disabled.")
38
 
39
- RUNWAYML_SDK_IMPORTED = False; RunwayMLAPIClient = None # Renamed for clarity
40
  try:
41
- from runwayml import RunwayML as ImportedRunwayMLClient # Actual SDK import
42
  RunwayMLAPIClient = ImportedRunwayMLClient
43
  RUNWAYML_SDK_IMPORTED = True
44
  logger.info("RunwayML SDK imported successfully.")
@@ -64,7 +64,7 @@ class VisualEngine:
64
  except IOError as e_font: logger.error(f"Pillow font IOError: {e_font}. Default."); self.font = ImageFont.load_default(); self.font_size_pil = 10
65
 
66
  self.openai_api_key = None; self.USE_AI_IMAGE_GENERATION = False; self.dalle_model = "dall-e-3"; self.image_size_dalle3 = "1792x1024"
67
- self.video_frame_size = (1280, 720) # Default, will be mapped to Runway ratio
68
 
69
  self.elevenlabs_api_key = None; self.USE_ELEVENLABS = False; self.elevenlabs_client = None; self.elevenlabs_voice_id = default_elevenlabs_voice_id
70
  if VoiceSettings and ELEVENLABS_CLIENT_IMPORTED: self.elevenlabs_voice_settings = VoiceSettings(stability=0.60, similarity_boost=0.80, style=0.15, use_speaker_boost=True)
@@ -73,15 +73,12 @@ class VisualEngine:
73
  self.pexels_api_key = None; self.USE_PEXELS = False
74
 
75
  self.runway_api_key = None; self.USE_RUNWAYML = False; self.runway_client = None
76
- if RUNWAYML_SDK_IMPORTED and RunwayMLAPIClient: # Initialize if SDK is available
77
  try:
78
- # The SDK expects RUNWAYML_API_SECRET env var.
79
- # If your key is passed directly, you might need to initialize differently or set the env var.
80
  if os.getenv("RUNWAYML_API_SECRET"):
81
  self.runway_client = RunwayMLAPIClient()
82
  logger.info("RunwayML Client initialized using RUNWAYML_API_SECRET env var.")
83
- else:
84
- logger.warning("RUNWAYML_API_SECRET env var not set. RunwayML client not initialized here (will try in set_runway_api_key).")
85
  except Exception as e_runway_init:
86
  logger.error(f"Failed to initialize RunwayML client during __init__: {e_runway_init}", exc_info=True)
87
 
@@ -91,295 +88,226 @@ class VisualEngine:
91
  def set_elevenlabs_api_key(self,api_key, voice_id_from_secret=None):
92
  self.elevenlabs_api_key=api_key
93
  if voice_id_from_secret: self.elevenlabs_voice_id = voice_id_from_secret
94
- if api_key and ELEVENLABS_CLIENT_IMPORTED and ElevenLabsAPIClient: # This API key is for the client
95
  try: self.elevenlabs_client = ElevenLabsAPIClient(api_key=api_key); self.USE_ELEVENLABS=bool(self.elevenlabs_client); logger.info(f"ElevenLabs Client {'Ready' if self.USE_ELEVENLABS else 'Failed Init'} (Voice ID: {self.elevenlabs_voice_id}).")
96
  except Exception as e: logger.error(f"ElevenLabs client init error: {e}. Disabled.", exc_info=True); self.USE_ELEVENLABS=False
97
  else: self.USE_ELEVENLABS=False; logger.info("ElevenLabs Disabled (no key or SDK issue).")
98
  def set_pexels_api_key(self,k): self.pexels_api_key=k; self.USE_PEXELS=bool(k); logger.info(f"Pexels Search {'Ready.' if k else 'Disabled.'}")
99
 
100
- def set_runway_api_key(self, k): # For RunwayML
101
- self.runway_api_key = k # Store the key
102
  if k:
103
  if RUNWAYML_SDK_IMPORTED and RunwayMLAPIClient:
104
- if not self.runway_client: # If not initialized in __init__
105
  try:
106
- # Ensure RUNWAYML_API_SECRET is set if SDK relies on it
107
- if not os.getenv("RUNWAYML_API_SECRET") and k:
108
  logger.info("Setting RUNWAYML_API_SECRET environment variable from provided key for SDK.")
109
- os.environ["RUNWAYML_API_SECRET"] = k # Make key available to SDK
110
-
111
  self.runway_client = RunwayMLAPIClient()
112
  self.USE_RUNWAYML = True
113
  logger.info("RunwayML Client initialized successfully via set_runway_api_key.")
114
  except Exception as e_client_init:
115
  logger.error(f"RunwayML Client initialization failed in set_runway_api_key: {e_client_init}", exc_info=True)
116
  self.USE_RUNWAYML = False
117
- else: # Client was already initialized (e.g., from env var in __init__)
118
- self.USE_RUNWAYML = True
119
- logger.info("RunwayML Client already initialized.")
120
- else: # SDK not imported
121
- logger.warning("RunwayML SDK not imported. API key set, but direct HTTP calls would be needed (not implemented).")
122
- self.USE_RUNWAYML = False # Can't use if SDK is the only implemented path
123
- else:
124
- self.USE_RUNWAYML = False
125
- logger.info("RunwayML Disabled (no API key provided to set_runway_api_key).")
126
-
127
 
128
  def _image_to_data_uri(self, image_path):
129
  try:
130
  mime_type, _ = mimetypes.guess_type(image_path)
131
  if not mime_type:
132
- # Fallback for common image types if mimetypes fails (e.g., on some systems)
133
  ext = os.path.splitext(image_path)[1].lower()
134
  if ext == ".png": mime_type = "image/png"
135
  elif ext in [".jpg", ".jpeg"]: mime_type = "image/jpeg"
136
- else:
137
- logger.warning(f"Could not determine MIME type for {image_path}. Defaulting to application/octet-stream.")
138
- mime_type = "application/octet-stream" # Fallback, Runway might reject this
139
-
140
- with open(image_path, "rb") as image_file:
141
- encoded_string = base64.b64encode(image_file.read()).decode('utf-8')
142
  data_uri = f"data:{mime_type};base64,{encoded_string}"
143
- logger.debug(f"Generated data URI for {image_path} (first 100 chars): {data_uri[:100]}")
144
- return data_uri
145
- except Exception as e:
146
- logger.error(f"Error converting image {image_path} to data URI: {e}", exc_info=True)
147
- return None
148
 
149
  def _map_resolution_to_runway_ratio(self, width, height):
150
- # Gen-4 supports specific ratios. Find the closest supported or default.
151
- # Example: 1280x720 -> "1280:720"
152
- # This needs to be robust. For now, we'll assume app.py sends a valid W:H string
153
- # or we use a default that matches self.video_frame_size if it's standard.
154
- if width == 1280 and height == 720: return "1280:720"
155
- if width == 720 and height == 1280: return "720:1280"
156
- # Add more mappings based on Gen-4 supported ratios if your self.video_frame_size can vary
157
- logger.warning(f"Unsupported resolution {width}x{height} for Runway Gen-4 mapping. Defaulting to 1280:720.")
158
- return "1280:720" # Default
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159
 
160
- def _generate_video_clip_with_runwayml(self, text_prompt_for_motion, input_image_path, scene_identifier_filename_base, target_duration_seconds=5):
161
- if not self.USE_RUNWAYML or not self.runway_client: # Check for initialized client
162
- logger.warning("RunwayML not enabled or client not initialized. Cannot generate video clip.")
163
- return None
164
- if not input_image_path or not os.path.exists(input_image_path):
165
- logger.error(f"Runway Gen-4 requires an input image. Path not provided or invalid: {input_image_path}")
166
- return None
 
 
 
 
167
 
 
 
 
 
168
  image_data_uri = self._image_to_data_uri(input_image_path)
169
- if not image_data_uri:
170
- return None
171
-
172
- runway_duration = 10 if target_duration_seconds > 7 else 5 # Map to 5s or 10s
173
  runway_ratio_str = self._map_resolution_to_runway_ratio(self.video_frame_size[0], self.video_frame_size[1])
174
-
175
  output_video_filename = scene_identifier_filename_base.replace(".png", f"_runway_gen4_d{runway_duration}s.mp4")
176
  output_video_filepath = os.path.join(self.output_dir, output_video_filename)
177
-
178
- logger.info(f"Initiating Runway Gen-4 task: motion='{text_prompt_for_motion[:100]}...', image='{os.path.basename(input_image_path)}', dur={runway_duration}s, ratio='{runway_ratio_str}'")
179
-
180
  try:
181
- task = self.runway_client.image_to_video.create(
182
- model='gen4_turbo',
183
- prompt_image=image_data_uri,
184
- prompt_text=text_prompt_for_motion,
185
- duration=runway_duration,
186
- ratio=runway_ratio_str, # e.g., "1280:720"
187
- # seed=random.randint(0, 4294967295), # Optional
188
- # Other Gen-4 params can be added here: motion_score, upscale etc.
189
- )
190
- logger.info(f"Runway Gen-4 task created with ID: {task.id}. Polling for completion...")
191
-
192
- poll_interval = 10 # seconds
193
- max_polls = 36 # Max 6 minutes (36 * 10s)
194
  for _ in range(max_polls):
195
- time.sleep(poll_interval)
196
- task_details = self.runway_client.tasks.retrieve(id=task.id)
197
  logger.info(f"Runway task {task.id} status: {task_details.status}")
198
  if task_details.status == 'SUCCEEDED':
199
- # The SDK docs don't explicitly show how to get the output URL from `task_details`.
200
- # Common patterns are `task_details.output.url` or `task_details.artifacts[0].url`.
201
- # This is a GUESS based on typical API structures. You MUST verify this.
202
- output_url = None
203
- if hasattr(task_details, 'output') and task_details.output and hasattr(task_details.output, 'url'):
204
- output_url = task_details.output.url
205
- elif hasattr(task_details, 'artifacts') and task_details.artifacts and isinstance(task_details.artifacts, list) and len(task_details.artifacts) > 0:
206
- # Assuming the first artifact is the video and has a URL
207
- if hasattr(task_details.artifacts[0], 'url'):
208
- output_url = task_details.artifacts[0].url
209
- elif hasattr(task_details.artifacts[0], 'download_url'): # Another common name
210
- output_url = task_details.artifacts[0].download_url
211
-
212
-
213
- if not output_url:
214
- logger.error(f"Runway task {task.id} SUCCEEDED, but no output URL found in task details: {task_details}")
215
- # Attempt to log the full task_details object for inspection
216
- try: logger.error(f"Full task details: {vars(task_details)}")
217
- except: pass
218
- return None
219
-
220
- logger.info(f"Runway task {task.id} SUCCEEDED. Downloading video from: {output_url}")
221
- video_response = requests.get(output_url, stream=True, timeout=300) # 5 min timeout for download
222
- video_response.raise_for_status()
223
  with open(output_video_filepath, 'wb') as f:
224
- for chunk in video_response.iter_content(chunk_size=8192):
225
- f.write(chunk)
226
- logger.info(f"Runway Gen-4 video successfully downloaded and saved to: {output_video_filepath}")
227
- return output_video_filepath
228
-
229
  elif task_details.status in ['FAILED', 'ABORTED']:
230
- error_message = "Unknown error"
231
- if hasattr(task_details, 'error_message') and task_details.error_message:
232
- error_message = task_details.error_message
233
- elif hasattr(task_details, 'output') and hasattr(task_details.output, 'error') and task_details.output.error:
234
- error_message = task_details.output.error
235
- logger.error(f"Runway task {task.id} status: {task_details.status}. Error: {error_message}")
236
- return None
237
-
238
- logger.warning(f"Runway task {task.id} timed out after {max_polls * poll_interval} seconds.")
239
- return None
240
-
241
- except AttributeError as ae: # If SDK methods are not as expected
242
- logger.error(f"AttributeError with RunwayML SDK: {ae}. Ensure SDK is up to date and methods match.", exc_info=True)
243
- return None
244
- except Exception as e_runway:
245
- logger.error(f"Error during Runway Gen-4 API call or processing: {e_runway}", exc_info=True)
246
- return None
247
 
248
- # --- Other helper methods (_get_text_dimensions, _create_placeholder_image_content, _search_pexels_image, _create_placeholder_video_content) ---
249
- # --- Keep these as they were in the previous full rewrite unless they need minor adjustments for the Gen-4 workflow ---
250
- def _get_text_dimensions(self,tc,fo): di=fo.size if hasattr(fo,'size') else self.font_size_pil; return (0,di) if not tc else (lambda b:(b[2]-b[0],b[3]-b[1] if b[3]-b[1]>0 else di))(fo.getbbox(tc)) if hasattr(fo,'getbbox') else (lambda s:(s[0],s[1] if s[1]>0 else di))(fo.getsize(tc)) if hasattr(fo,'getsize') else (int(len(tc)*di*0.6),int(di*1.2))
251
- def _create_placeholder_image_content(self,td,fn,sz=None):
252
- if sz is None: sz = self.video_frame_size; img=Image.new('RGB',sz,color=(20,20,40));d=ImageDraw.Draw(img);pd=25;mw=sz[0]-(2*pd);ls=[];
253
- if not td: td="(Placeholder Image)"
254
- ws=td.split();cl=""
255
- for w in ws: tl=cl+w+" ";raw_w,_=self._get_text_dimensions(tl,self.font);check_w=raw_w if raw_w > 0 else len(tl)*(self.font_size_pil*0.6); # Corrected w to check_w
256
- if check_w<=mw:cl=tl;else: # Corrected w to check_w
257
- if cl:ls.append(cl.strip());cl=w+" "
258
- if cl.strip():ls.append(cl.strip())
259
- if not ls and td:ls.append(td[:int(mw//(self._get_text_dimensions("A",self.font)[0]or 10))]+"..." if td else "(Text too long)");elif not ls:ls.append("(Placeholder Error)")
260
- _,slh=self._get_text_dimensions("Ay",self.font);slh=slh if slh>0 else self.font_size_pil+2;mld=min(len(ls),(sz[1]-(2*pd))//(slh+2)) if slh>0 else 1;
261
- if mld<=0:mld=1;yts=pd+(sz[1]-(2*pd)-mld*(slh+2))/2.0;yt=yts
262
- for i in range(mld):lc=ls[i];lw,_=self._get_text_dimensions(lc,self.font);xt=(sz[0]-lw)/2.0;d.text((xt,yt),lc,font=self.font,fill=(200,200,180));yt+=slh+2
263
- if i==6 and mld>7:d.text((xt,yt),"...",font=self.font,fill=(200,200,180));break
264
- fp=os.path.join(self.output_dir,fn);
265
- try:img.save(fp);return fp
266
- except Exception as e:logger.error(f"Save placeholder img {fp}: {e}",exc_info=True);return None
267
- def _search_pexels_image(self, q, ofnb):
268
- if not self.USE_PEXELS or not self.pexels_api_key: return None; h={"Authorization":self.pexels_api_key};p={"query":q,"per_page":1,"orientation":"landscape","size":"large2x"}
269
- pfn=ofnb.replace(".png",f"_pexels_{random.randint(1000,9999)}.jpg").replace(".mp4",f"_pexels_{random.randint(1000,9999)}.jpg");fp=os.path.join(self.output_dir,pfn)
270
- try: logger.info(f"Pexels search: '{q}'");eq=" ".join(q.split()[:5]);p["query"]=eq;r=requests.get("https://api.pexels.com/v1/search",headers=h,params=p,timeout=20)
271
- r.raise_for_status();d=r.json()
272
- if d.get("photos") and len(d["photos"])>0:pu=d["photos"][0]["src"]["large2x"];ir=requests.get(pu,timeout=60);ir.raise_for_status();id_img=Image.open(io.BytesIO(ir.content)) # Renamed id to id_img
273
- if id_img.mode!='RGB':id_img=id_img.convert('RGB');id_img.save(fp);logger.info(f"Pexels saved: {fp}");return fp
274
- else: logger.info(f"No Pexels for: '{eq}'")
275
- except Exception as e:logger.error(f"Pexels error ('{q}'): {e}",exc_info=True);return None
276
- def _create_placeholder_video_content(self, td, fn, dur=4, sz=None): # Generic placeholder
277
  if sz is None: sz = self.video_frame_size; fp = os.path.join(self.output_dir, fn); tc = None
278
- try:
279
- tc = TextClip(td, fontsize=50, color='white', font=self.video_overlay_font, bg_color='black', size=sz, method='caption').set_duration(dur)
280
- tc.write_videofile(fp, fps=24, codec='libx264', preset='ultrafast', logger=None, threads=2)
281
- logger.info(f"Generic placeholder video: {fp}"); return fp
282
  except Exception as e: logger.error(f"Generic placeholder video error {fp}: {e}", exc_info=True); return None
283
  finally:
284
  if tc and hasattr(tc, 'close'): tc.close()
285
 
286
-
287
- # --- generate_scene_asset (Main asset generation logic) ---
288
  def generate_scene_asset(self, image_generation_prompt_text, motion_prompt_text_for_video,
289
  scene_data, scene_identifier_filename_base,
290
  generate_as_video_clip=False, runway_target_duration=5):
 
291
  base_name, _ = os.path.splitext(scene_identifier_filename_base)
292
- # Default asset_info for error state
293
- asset_info = {'path': None, 'type': 'none', 'error': True,
294
- 'prompt_used': image_generation_prompt_text, # Default to image prompt
295
- 'error_message': 'Asset generation not fully attempted'}
296
-
297
- # STEP 1: Generate/acquire the base image for Runway Gen-4 or for direct image output
298
  input_image_for_runway_path = None
299
- # Use a distinct name for the base image if it's only an intermediate step for video
300
  base_image_filename = base_name + ("_base_for_video.png" if generate_as_video_clip else ".png")
301
  base_image_filepath = os.path.join(self.output_dir, base_image_filename)
302
 
303
- # Try DALL-E for base image
304
- if self.USE_AI_IMAGE_GENERATION and self.openai_api_key:
305
- max_r, att_n = 2, 0
306
  for att_n in range(max_r):
307
- try:
308
- logger.info(f"Attempt {att_n+1} DALL-E (base image): {image_generation_prompt_text[:100]}...")
309
- cl = openai.OpenAI(api_key=self.openai_api_key, timeout=90.0)
310
- r = cl.images.generate(model=self.dalle_model, prompt=image_generation_prompt_text, n=1, size=self.image_size_dalle3, quality="hd", response_format="url", style="vivid")
311
- iu = r.data[0].url; rp = getattr(r.data[0], 'revised_prompt', None)
312
- if rp: logger.info(f"DALL-E revised: {rp[:100]}...")
313
- ir = requests.get(iu, timeout=120); ir.raise_for_status()
314
- id_img = Image.open(io.BytesIO(ir.content))
315
- if id_img.mode != 'RGB': id_img = id_img.convert('RGB')
316
- id_img.save(base_image_filepath); logger.info(f"DALL-E base image saved: {base_image_filepath}");
317
- input_image_for_runway_path = base_image_filepath
318
- asset_info = {'path': base_image_filepath, 'type': 'image', 'error': False, 'prompt_used': image_generation_prompt_text, 'revised_prompt': rp}
319
- break # DALL-E success
320
- except openai.RateLimitError as e: logger.warning(f"OpenAI Rate Limit {att_n+1}: {e}. Retry..."); time.sleep(5*(att_n+1)); asset_info['error_message']=str(e)
321
- except Exception as e: logger.error(f"DALL-E base image error: {e}", exc_info=True); asset_info['error_message']=str(e); break
322
- if asset_info['error']: logger.warning(f"DALL-E failed after {att_n+1} attempts for base image.")
323
 
324
- # Try Pexels if DALL-E failed or not used
325
- if asset_info['error'] and self.USE_PEXELS:
326
- logger.info("Attempting Pexels for base image.")
327
- pqt = scene_data.get('pexels_search_query_감독', f"{scene_data.get('emotional_beat','')} {scene_data.get('setting_description','')}")
328
- pp = self._search_pexels_image(pqt, base_image_filename) # Pass base image filename
329
- if pp: input_image_for_runway_path = pp; asset_info = {'path': pp, 'type': 'image', 'error': False, 'prompt_used': f"Pexels: {pqt}"}
330
- else: current_em = asset_info.get('error_message',""); asset_info['error_message']=(current_em + " Pexels failed for base image.").strip()
331
-
332
- # Fallback to placeholder for base image if all above failed
333
- if asset_info['error']:
334
- logger.warning("Base image (DALL-E/Pexels) failed. Using placeholder for base image.")
335
- ppt = asset_info.get('prompt_used', image_generation_prompt_text) # Use the original image prompt
336
- php = self._create_placeholder_image_content(f"[Base Img Placeholder] {ppt[:100]}...", base_image_filename)
337
- if php: input_image_for_runway_path = php; asset_info = {'path': php, 'type': 'image', 'error': False, 'prompt_used': ppt}
338
- else: current_em=asset_info.get('error_message',"");asset_info['error_message']=(current_em + " Base placeholder failed.").strip()
339
-
340
- # STEP 2: If video clip is requested, use the generated base image with RunwayML
341
- if generate_as_video_clip:
342
- if not input_image_for_runway_path: # If base image generation totally failed
343
- logger.error("Cannot generate RunwayML video: base image path is missing or generation failed.")
344
- asset_info['error'] = True # Ensure error state is propagated
345
- asset_info['error_message'] = (asset_info.get('error_message',"") + " Base image missing, Runway video aborted.").strip()
346
- asset_info['type'] = 'none' # No valid asset produced
347
- return asset_info
348
-
349
  if self.USE_RUNWAYML:
350
- logger.info(f"Proceeding to Runway Gen-4 video for {base_name} using base image: {input_image_for_runway_path}")
351
- video_path = self._generate_video_clip_with_runwayml(
352
- text_prompt_for_motion=motion_prompt_text_for_video,
353
- input_image_path=input_image_for_runway_path,
354
- scene_identifier_filename_base=base_name, # _runway_gen4.mp4 will be appended
355
- target_duration_seconds=runway_target_duration
356
- )
357
- if video_path and os.path.exists(video_path):
358
- # Success generating video
359
- asset_info = {'path': video_path, 'type': 'video', 'error': False,
360
- 'prompt_used': motion_prompt_text_for_video, # This is the prompt for Runway
361
- 'base_image_path': input_image_for_runway_path}
362
- else:
363
- # RunwayML failed, return the base image info but mark video as failed
364
- logger.warning(f"RunwayML video generation failed for {base_name}. Using the base image as fallback.")
365
- asset_info['error'] = True # Video step specifically failed
366
- asset_info['error_message'] = (asset_info.get('error_message', "Base image generated.") + " RunwayML video step failed; using base image instead.").strip()
367
- asset_info['path'] = input_image_for_runway_path # Path of the base image
368
- asset_info['type'] = 'image' # Fallback asset type is image
369
- asset_info['prompt_used'] = image_generation_prompt_text # Prompt for the base image
370
- else: # RunwayML not enabled, use base image
371
- logger.warning("RunwayML selected but not enabled/configured. Using base image.")
372
- asset_info['error'] = True # Mark that video wasn't generated
373
- asset_info['error_message'] = (asset_info.get('error_message', "Base image generated.") + " RunwayML disabled; using base image.").strip()
374
- asset_info['path'] = input_image_for_runway_path
375
- asset_info['type'] = 'image'
376
- asset_info['prompt_used'] = image_generation_prompt_text
377
- # If not generate_as_video_clip, asset_info already holds the result of image generation
378
- return asset_info
379
-
380
 
381
- # --- generate_narration_audio (Keep as before) ---
382
  def generate_narration_audio(self, text_to_narrate, output_filename="narration_overall.mp3"):
 
383
  if not self.USE_ELEVENLABS or not self.elevenlabs_client or not text_to_narrate: logger.info("11L skip."); return None; afp=os.path.join(self.output_dir,output_filename)
384
  try: logger.info(f"11L audio (Voice:{self.elevenlabs_voice_id}): {text_to_narrate[:70]}..."); asm=None
385
  if hasattr(self.elevenlabs_client,'text_to_speech')and hasattr(self.elevenlabs_client.text_to_speech,'stream'):asm=self.elevenlabs_client.text_to_speech.stream;logger.info("Using 11L .text_to_speech.stream()")
@@ -400,10 +328,10 @@ class VisualEngine:
400
  except Exception as e:logger.error(f"11L audio error: {e}",exc_info=True);return None
401
 
402
 
403
- # --- assemble_animatic_from_assets (Keep robust image processing, C-contiguous, debug saves, pix_fmt) ---
404
  def assemble_animatic_from_assets(self, asset_data_list, overall_narration_path=None, output_filename="final_video.mp4", fps=24):
 
405
  if not asset_data_list: logger.warning("No assets for animatic."); return None
406
- processed_clips = []; narration_clip = None; final_clip = None # final_composite_clip_obj renamed to final_clip
407
  logger.info(f"Assembling from {len(asset_data_list)} assets. Frame: {self.video_frame_size}.")
408
 
409
  for i, asset_info in enumerate(asset_data_list):
@@ -437,14 +365,14 @@ class VisualEngine:
437
  elif asset_type == 'video':
438
  src_clip=None
439
  try:
440
- src_clip=VideoFileClip(asset_path,target_resolution=(self.video_frame_size[1],self.video_frame_size[0])if self.video_frame_size else None, audio=False) # Explicitly no audio from source video clips
441
  tmp_clip=src_clip
442
  if src_clip.duration!=scene_dur:
443
  if src_clip.duration>scene_dur:tmp_clip=src_clip.subclip(0,scene_dur)
444
  else:
445
  if scene_dur/src_clip.duration > 1.5 and src_clip.duration>0.1:tmp_clip=src_clip.loop(duration=scene_dur)
446
  else:tmp_clip=src_clip.set_duration(src_clip.duration);logger.info(f"S{scene_num} Video clip ({src_clip.duration:.2f}s) shorter than target ({scene_dur:.2f}s).")
447
- current_scene_mvpy_clip=tmp_clip.set_duration(scene_dur)
448
  if current_scene_mvpy_clip.size!=list(self.video_frame_size):current_scene_mvpy_clip=current_scene_mvpy_clip.resize(self.video_frame_size)
449
  except Exception as e:logger.error(f"S{scene_num} Video load error '{asset_path}':{e}",exc_info=True);continue
450
  finally:
@@ -455,17 +383,17 @@ class VisualEngine:
455
  try:
456
  to_dur=min(current_scene_mvpy_clip.duration-0.5,current_scene_mvpy_clip.duration*0.8)if current_scene_mvpy_clip.duration>0.5 else current_scene_mvpy_clip.duration
457
  to_start=0.25
458
- if to_dur > 0 : # Only add text if duration is positive
459
  txt_c=TextClip(f"Scene {scene_num}\n{key_action}",fontsize=self.video_overlay_font_size,color=self.video_overlay_font_color,font=self.video_overlay_font,bg_color='rgba(10,10,20,0.7)',method='caption',align='West',size=(self.video_frame_size[0]*0.9,None),kerning=-1,stroke_color='black',stroke_width=1.5).set_duration(to_dur).set_start(to_start).set_position(('center',0.92),relative=True)
460
  current_scene_mvpy_clip=CompositeVideoClip([current_scene_mvpy_clip,txt_c],size=self.video_frame_size,use_bgclip=True)
461
- else: logger.warning(f"S{scene_num}: Text overlay duration is zero or negative. Skipping text overlay.")
462
  except Exception as e:logger.error(f"S{scene_num} TextClip error:{e}. No text.",exc_info=True)
463
  if current_scene_mvpy_clip:processed_clips.append(current_scene_mvpy_clip);logger.info(f"S{scene_num} Processed. Dur:{current_scene_mvpy_clip.duration:.2f}s.")
464
  except Exception as e:logger.error(f"MAJOR Error S{scene_num} ({asset_path}):{e}",exc_info=True)
465
  finally:
466
- if current_scene_mvpy_clip and hasattr(current_scene_mvpy_clip,'close'): # Check if it's a VideoFileClip instance that needs closing
467
- if hasattr(current_scene_mvpy_clip, 'reader') and current_scene_mvpy_clip.reader: current_scene_mvpy_clip.close()
468
- elif not hasattr(current_scene_mvpy_clip, 'reader'): current_scene_mvpy_clip.close() # For ImageClip if close() is added
469
 
470
  if not processed_clips:logger.warning("No clips processed. Abort.");return None
471
  td=0.75
@@ -490,8 +418,8 @@ class VisualEngine:
490
  except Exception as e:logger.error(f"Video write error:{e}",exc_info=True);return None
491
  finally:
492
  logger.debug("Closing all MoviePy clips in `assemble_animatic_from_assets` finally block.")
493
- all_clips_to_close = processed_clips + ([narration_clip] if narration_clip else []) + ([final_clip] if final_clip else [])
494
- for clip_obj in all_clips_to_close: # Use a different name to avoid scope issues
495
- if clip_obj and hasattr(clip_obj, 'close'):
496
- try: clip_obj.close()
497
- except Exception as e_close: logger.warning(f"Ignoring error while closing a clip: {type(clip_obj).__name__} - {e_close}")
 
36
  ELEVENLABS_CLIENT_IMPORTED = True; logger.info("ElevenLabs client components imported.")
37
  except Exception as e_eleven: logger.warning(f"ElevenLabs client import failed: {e_eleven}. Audio disabled.")
38
 
39
+ RUNWAYML_SDK_IMPORTED = False; RunwayMLAPIClient = None
40
  try:
41
+ from runwayml import RunwayML as ImportedRunwayMLClient
42
  RunwayMLAPIClient = ImportedRunwayMLClient
43
  RUNWAYML_SDK_IMPORTED = True
44
  logger.info("RunwayML SDK imported successfully.")
 
64
  except IOError as e_font: logger.error(f"Pillow font IOError: {e_font}. Default."); self.font = ImageFont.load_default(); self.font_size_pil = 10
65
 
66
  self.openai_api_key = None; self.USE_AI_IMAGE_GENERATION = False; self.dalle_model = "dall-e-3"; self.image_size_dalle3 = "1792x1024"
67
+ self.video_frame_size = (1280, 720)
68
 
69
  self.elevenlabs_api_key = None; self.USE_ELEVENLABS = False; self.elevenlabs_client = None; self.elevenlabs_voice_id = default_elevenlabs_voice_id
70
  if VoiceSettings and ELEVENLABS_CLIENT_IMPORTED: self.elevenlabs_voice_settings = VoiceSettings(stability=0.60, similarity_boost=0.80, style=0.15, use_speaker_boost=True)
 
73
  self.pexels_api_key = None; self.USE_PEXELS = False
74
 
75
  self.runway_api_key = None; self.USE_RUNWAYML = False; self.runway_client = None
76
+ if RUNWAYML_SDK_IMPORTED and RunwayMLAPIClient:
77
  try:
 
 
78
  if os.getenv("RUNWAYML_API_SECRET"):
79
  self.runway_client = RunwayMLAPIClient()
80
  logger.info("RunwayML Client initialized using RUNWAYML_API_SECRET env var.")
81
+ # else: # No explicit else, will be handled by set_runway_api_key if key provided later
 
82
  except Exception as e_runway_init:
83
  logger.error(f"Failed to initialize RunwayML client during __init__: {e_runway_init}", exc_info=True)
84
 
 
88
  def set_elevenlabs_api_key(self,api_key, voice_id_from_secret=None):
89
  self.elevenlabs_api_key=api_key
90
  if voice_id_from_secret: self.elevenlabs_voice_id = voice_id_from_secret
91
+ if api_key and ELEVENLABS_CLIENT_IMPORTED and ElevenLabsAPIClient:
92
  try: self.elevenlabs_client = ElevenLabsAPIClient(api_key=api_key); self.USE_ELEVENLABS=bool(self.elevenlabs_client); logger.info(f"ElevenLabs Client {'Ready' if self.USE_ELEVENLABS else 'Failed Init'} (Voice ID: {self.elevenlabs_voice_id}).")
93
  except Exception as e: logger.error(f"ElevenLabs client init error: {e}. Disabled.", exc_info=True); self.USE_ELEVENLABS=False
94
  else: self.USE_ELEVENLABS=False; logger.info("ElevenLabs Disabled (no key or SDK issue).")
95
  def set_pexels_api_key(self,k): self.pexels_api_key=k; self.USE_PEXELS=bool(k); logger.info(f"Pexels Search {'Ready.' if k else 'Disabled.'}")
96
 
97
+ def set_runway_api_key(self, k):
98
+ self.runway_api_key = k
99
  if k:
100
  if RUNWAYML_SDK_IMPORTED and RunwayMLAPIClient:
101
+ if not self.runway_client:
102
  try:
103
+ if not os.getenv("RUNWAYML_API_SECRET"):
 
104
  logger.info("Setting RUNWAYML_API_SECRET environment variable from provided key for SDK.")
105
+ os.environ["RUNWAYML_API_SECRET"] = k
 
106
  self.runway_client = RunwayMLAPIClient()
107
  self.USE_RUNWAYML = True
108
  logger.info("RunwayML Client initialized successfully via set_runway_api_key.")
109
  except Exception as e_client_init:
110
  logger.error(f"RunwayML Client initialization failed in set_runway_api_key: {e_client_init}", exc_info=True)
111
  self.USE_RUNWAYML = False
112
+ else: # Client already initialized
113
+ self.USE_RUNWAYML = True; logger.info("RunwayML Client was already initialized.")
114
+ else: logger.warning("RunwayML SDK not imported. API key set, but integration requires SDK."); self.USE_RUNWAYML = False
115
+ else: self.USE_RUNWAYML = False; logger.info("RunwayML Disabled (no API key).")
 
 
 
 
 
 
116
 
117
  def _image_to_data_uri(self, image_path):
118
  try:
119
  mime_type, _ = mimetypes.guess_type(image_path)
120
  if not mime_type:
 
121
  ext = os.path.splitext(image_path)[1].lower()
122
  if ext == ".png": mime_type = "image/png"
123
  elif ext in [".jpg", ".jpeg"]: mime_type = "image/jpeg"
124
+ else: mime_type = "application/octet-stream"; logger.warning(f"Unknown MIME for {image_path}, using {mime_type}.")
125
+ with open(image_path, "rb") as image_file: encoded_string = base64.b64encode(image_file.read()).decode('utf-8')
 
 
 
 
126
  data_uri = f"data:{mime_type};base64,{encoded_string}"
127
+ logger.debug(f"Data URI for {image_path} (first 100): {data_uri[:100]}"); return data_uri
128
+ except Exception as e: logger.error(f"Error converting {image_path} to data URI: {e}", exc_info=True); return None
 
 
 
129
 
130
  def _map_resolution_to_runway_ratio(self, width, height):
131
+ # Based on Gen-4 supported ratios: "1280:720", "720:1280", "1104:832", "832:1104", "960:960", "1584:672"
132
+ ratio_str = f"{width}:{height}"
133
+ supported_ratios = ["1280:720", "720:1280", "1104:832", "832:1104", "960:960", "1584:672"]
134
+ if ratio_str in supported_ratios: return ratio_str
135
+ logger.warning(f"Resolution {ratio_str} not directly supported by Gen-4. Defaulting to 1280:720.")
136
+ return "1280:720"
137
+
138
+ def _get_text_dimensions(self,text_content,font_obj):
139
+ # (Corrected version from previous, assuming font_obj.size exists or font_size_pil is fallback)
140
+ default_char_height = getattr(font_obj, 'size', self.font_size_pil)
141
+ if not text_content: return 0, default_char_height
142
+ try:
143
+ if hasattr(font_obj,'getbbox'): # Pillow 8.0.0+
144
+ bbox=font_obj.getbbox(text_content);w=bbox[2]-bbox[0];h=bbox[3]-bbox[1]
145
+ return w, h if h > 0 else default_char_height
146
+ elif hasattr(font_obj,'getsize'): # Older Pillow
147
+ w,h=font_obj.getsize(text_content)
148
+ return w, h if h > 0 else default_char_height
149
+ else: # Fallback if no standard method (should not happen for ImageFont)
150
+ return int(len(text_content)*default_char_height*0.6),int(default_char_height*1.2)
151
+ except Exception as e:
152
+ logger.warning(f"Error in _get_text_dimensions for '{text_content[:20]}...': {e}")
153
+ return int(len(text_content)*self.font_size_pil*0.6),int(self.font_size_pil*1.2) # Fallback to global default
154
+
155
+ def _create_placeholder_image_content(self,text_description,filename,size=None):
156
+ # <<< THIS IS THE CORRECTED METHOD >>>
157
+ if size is None: size = self.video_frame_size
158
+ img=Image.new('RGB',size,color=(20,20,40));d=ImageDraw.Draw(img);padding=25;max_w=size[0]-(2*padding);lines=[];
159
+ if not text_description: text_description="(Placeholder Image)"
160
+ words=text_description.split();current_line=""
161
+ for word_idx, word in enumerate(words):
162
+ # Add space correctly, not after the very last word of the text
163
+ prospective_line_addition = word + (" " if word_idx < len(words) - 1 else "")
164
+ test_line = current_line + prospective_line_addition
165
+
166
+ current_line_width, _ = self._get_text_dimensions(test_line, self.font)
167
+ if current_line_width == 0 and test_line.strip(): # Estimate if Pillow returns 0
168
+ current_line_width = len(test_line) * (self.font_size_pil * 0.6)
169
+
170
+ if current_line_width <= max_w:
171
+ current_line = test_line
172
+ else: # Word doesn't fit
173
+ if current_line.strip(): # Add previous line if it had content
174
+ lines.append(current_line.strip())
175
+ current_line = prospective_line_addition # Start new line with current word (plus its space if not last)
176
+ # If the word itself is too long for a line, it will just be one long line.
177
+ # Pillow's d.text will handle overflow if text anchor isn't 'lt' (left-top).
178
+ # For centered text, it might go off-canvas; more complex word splitting needed for that.
179
+
180
+ if current_line.strip(): # Add any remaining part
181
+ lines.append(current_line.strip())
182
+
183
+ if not lines and text_description:
184
+ avg_char_width, _ = self._get_text_dimensions("W", self.font)
185
+ if avg_char_width == 0: avg_char_width = self.font_size_pil * 0.6 # Estimate
186
+ chars_per_line = int(max_w / avg_char_width) if avg_char_width > 0 else 20
187
+ lines.append(text_description[:chars_per_line] + ("..." if len(text_description) > chars_per_line else ""))
188
+ elif not lines:
189
+ lines.append("(Placeholder Error)")
190
+
191
+ _,single_line_h=self._get_text_dimensions("Ay",self.font); single_line_h = single_line_h if single_line_h > 0 else self.font_size_pil + 2
192
+ max_lines_to_display=min(len(lines),(size[1]-(2*padding))//(single_line_h+2)) if single_line_h > 0 else 1
193
+ if max_lines_to_display <=0: max_lines_to_display = 1
194
+
195
+ y_text_start = padding + (size[1]-(2*padding) - max_lines_to_display*(single_line_h+2))/2.0
196
+ y_text = y_text_start
197
+
198
+ for i in range(max_lines_to_display):
199
+ line_content=lines[i]
200
+ line_w,_=self._get_text_dimensions(line_content,self.font)
201
+ if line_w == 0 and line_content.strip(): line_w = len(line_content) * (self.font_size_pil * 0.6)
202
+ x_text=(size[0]-line_w)/2.0
203
+ try: d.text((x_text,y_text),line_content,font=self.font,fill=(200,200,180))
204
+ except Exception as e_draw: logger.error(f"Pillow d.text error: {e_draw} for line '{line_content}'")
205
+ y_text+=single_line_h+2
206
+ if i==6 and max_lines_to_display > 7:
207
+ try: d.text((x_text,y_text),"...",font=self.font,fill=(200,200,180))
208
+ except Exception as e_ellipsis: logger.error(f"Pillow d.text ellipsis error: {e_ellipsis}")
209
+ break
210
+ filepath=os.path.join(self.output_dir,filename);
211
+ try:img.save(filepath);return filepath
212
+ except Exception as e:logger.error(f"Saving placeholder image {filepath}: {e}", exc_info=True);return None
213
 
214
+ def _search_pexels_image(self, q, ofnb):
215
+ # (Keep as before)
216
+ if not self.USE_PEXELS or not self.pexels_api_key: return None; h={"Authorization":self.pexels_api_key};p={"query":q,"per_page":1,"orientation":"landscape","size":"large2x"}
217
+ pfn=ofnb.replace(".png",f"_pexels_{random.randint(1000,9999)}.jpg").replace(".mp4",f"_pexels_{random.randint(1000,9999)}.jpg");fp=os.path.join(self.output_dir,pfn)
218
+ try: logger.info(f"Pexels search: '{q}'");eq=" ".join(q.split()[:5]);p["query"]=eq;r=requests.get("https://api.pexels.com/v1/search",headers=h,params=p,timeout=20)
219
+ r.raise_for_status();d=r.json()
220
+ if d.get("photos") and len(d["photos"])>0:pu=d["photos"][0]["src"]["large2x"];ir=requests.get(pu,timeout=60);ir.raise_for_status();id_img=Image.open(io.BytesIO(ir.content))
221
+ if id_img.mode!='RGB':id_img=id_img.convert('RGB');id_img.save(fp);logger.info(f"Pexels saved: {fp}");return fp # Fixed id to id_img
222
+ else: id_img.save(fp);logger.info(f"Pexels saved (was RGB): {fp}");return fp # Save even if already RGB
223
+ else: logger.info(f"No Pexels for: '{eq}'") # This else was misplaced
224
+ except Exception as e:logger.error(f"Pexels error ('{q}'): {e}",exc_info=True);return None # Fixed indent
225
 
226
+ def _generate_video_clip_with_runwayml(self, text_prompt_for_motion, input_image_path, scene_identifier_filename_base, target_duration_seconds=5):
227
+ # (Updated RunwayML integration)
228
+ if not self.USE_RUNWAYML or not self.runway_client: logger.warning("RunwayML not enabled/client not init. Skip video."); return None
229
+ if not input_image_path or not os.path.exists(input_image_path): logger.error(f"Runway Gen-4 needs input image. Path invalid: {input_image_path}"); return None
230
  image_data_uri = self._image_to_data_uri(input_image_path)
231
+ if not image_data_uri: return None
232
+ runway_duration = 10 if target_duration_seconds > 7 else 5
 
 
233
  runway_ratio_str = self._map_resolution_to_runway_ratio(self.video_frame_size[0], self.video_frame_size[1])
 
234
  output_video_filename = scene_identifier_filename_base.replace(".png", f"_runway_gen4_d{runway_duration}s.mp4")
235
  output_video_filepath = os.path.join(self.output_dir, output_video_filename)
236
+ logger.info(f"Runway Gen-4 task: motion='{text_prompt_for_motion[:100]}...', img='{os.path.basename(input_image_path)}', dur={runway_duration}s, ratio='{runway_ratio_str}'")
 
 
237
  try:
238
+ task = self.runway_client.image_to_video.create(model='gen4_turbo', prompt_image=image_data_uri, prompt_text=text_prompt_for_motion, duration=runway_duration, ratio=runway_ratio_str)
239
+ logger.info(f"Runway Gen-4 task ID: {task.id}. Polling...")
240
+ poll_interval=10; max_polls=36 # Max 6 mins
 
 
 
 
 
 
 
 
 
 
241
  for _ in range(max_polls):
242
+ time.sleep(poll_interval); task_details = self.runway_client.tasks.retrieve(id=task.id)
 
243
  logger.info(f"Runway task {task.id} status: {task_details.status}")
244
  if task_details.status == 'SUCCEEDED':
245
+ output_url = getattr(getattr(task_details, 'output', None), 'url', None) or \
246
+ (getattr(task_details, 'artifacts', None) and task_details.artifacts[0].url if task_details.artifacts and hasattr(task_details.artifacts[0], 'url') else None) or \
247
+ (getattr(task_details, 'artifacts', None) and task_details.artifacts[0].download_url if task_details.artifacts and hasattr(task_details.artifacts[0], 'download_url') else None)
248
+ if not output_url: logger.error(f"Runway task {task.id} SUCCEEDED, but no output URL in details: {vars(task_details) if hasattr(task_details, '__dict__') else task_details}"); return None
249
+ logger.info(f"Runway task {task.id} SUCCEEDED. Downloading from: {output_url}")
250
+ video_response = requests.get(output_url, stream=True, timeout=300); video_response.raise_for_status()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
251
  with open(output_video_filepath, 'wb') as f:
252
+ for chunk in video_response.iter_content(chunk_size=8192): f.write(chunk)
253
+ logger.info(f"Runway Gen-4 video saved: {output_video_filepath}"); return output_video_filepath
 
 
 
254
  elif task_details.status in ['FAILED', 'ABORTED']:
255
+ em = getattr(task_details,'error_message',None) or getattr(getattr(task_details,'output',None),'error', "Unknown error")
256
+ logger.error(f"Runway task {task.id} status: {task_details.status}. Error: {em}"); return None
257
+ logger.warning(f"Runway task {task.id} timed out."); return None
258
+ except AttributeError as ae: logger.error(f"RunwayML SDK AttributeError: {ae}. SDK/methods might differ.", exc_info=True); return None
259
+ except Exception as e: logger.error(f"Runway Gen-4 API error: {e}", exc_info=True); return None
 
 
 
 
 
 
 
 
 
 
 
 
260
 
261
+ def _create_placeholder_video_content(self, td, fn, dur=4, sz=None): # Generic placeholder if input_image not available
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
262
  if sz is None: sz = self.video_frame_size; fp = os.path.join(self.output_dir, fn); tc = None
263
+ try: tc = TextClip(td, fontsize=50, color='white', font=self.video_overlay_font, bg_color='black', size=sz, method='caption').set_duration(dur); tc.write_videofile(fp, fps=24, codec='libx264', preset='ultrafast', logger=None, threads=2); logger.info(f"Generic placeholder video: {fp}"); return fp
 
 
 
264
  except Exception as e: logger.error(f"Generic placeholder video error {fp}: {e}", exc_info=True); return None
265
  finally:
266
  if tc and hasattr(tc, 'close'): tc.close()
267
 
 
 
268
  def generate_scene_asset(self, image_generation_prompt_text, motion_prompt_text_for_video,
269
  scene_data, scene_identifier_filename_base,
270
  generate_as_video_clip=False, runway_target_duration=5):
271
+ # (Logic mostly as before, ensuring base image is robustly generated first)
272
  base_name, _ = os.path.splitext(scene_identifier_filename_base)
273
+ asset_info = {'path': None, 'type': 'none', 'error': True, 'prompt_used': image_generation_prompt_text, 'error_message': 'Asset generation init failed'}
 
 
 
 
 
274
  input_image_for_runway_path = None
 
275
  base_image_filename = base_name + ("_base_for_video.png" if generate_as_video_clip else ".png")
276
  base_image_filepath = os.path.join(self.output_dir, base_image_filename)
277
 
278
+ # Attempt base image generation
279
+ if self.USE_AI_IMAGE_GENERATION and self.openai_api_key: # DALL-E
280
+ max_r, att_n = 2,0;
281
  for att_n in range(max_r):
282
+ try:logger.info(f"Att {att_n+1} DALL-E (base img): {image_generation_prompt_text[:70]}...");cl=openai.OpenAI(api_key=self.openai_api_key,timeout=90.0);r=cl.images.generate(model=self.dalle_model,prompt=image_generation_prompt_text,n=1,size=self.image_size_dalle3,quality="hd",response_format="url",style="vivid");iu=r.data[0].url;rp=getattr(r.data[0],'revised_prompt',None);
283
+ if rp:logger.info(f"DALL-E revised: {rp[:70]}...");ir=requests.get(iu,timeout=120);ir.raise_for_status();id_img=Image.open(io.BytesIO(ir.content));
284
+ if id_img.mode!='RGB':id_img=id_img.convert('RGB');id_img.save(base_image_filepath);logger.info(f"DALL-E base img saved: {base_image_filepath}");input_image_for_runway_path=base_image_filepath;asset_info={'path':base_image_filepath,'type':'image','error':False,'prompt_used':image_generation_prompt_text,'revised_prompt':rp};break
285
+ except openai.RateLimitError as e:logger.warning(f"OpenAI RateLimit {att_n+1}:{e}.Retry...");time.sleep(5*(att_n+1));asset_info['error_message']=str(e)
286
+ except Exception as e:logger.error(f"DALL-E base img error:{e}",exc_info=True);asset_info['error_message']=str(e);break
287
+ if asset_info['error']:logger.warning(f"DALL-E failed after {att_n+1} attempts for base img.")
 
 
 
 
 
 
 
 
 
 
288
 
289
+ if asset_info['error'] and self.USE_PEXELS: # Pexels Fallback
290
+ logger.info("Trying Pexels for base img.");pqt=scene_data.get('pexels_search_query_감독',f"{scene_data.get('emotional_beat','')} {scene_data.get('setting_description','')}");pp=self._search_pexels_image(pqt,base_image_filename);
291
+ if pp:input_image_for_runway_path=pp;asset_info={'path':pp,'type':'image','error':False,'prompt_used':f"Pexels:{pqt}"}
292
+ else:current_em=asset_info.get('error_message',"");asset_info['error_message']=(current_em+" Pexels failed for base.").strip()
293
+
294
+ if asset_info['error']: # Placeholder Fallback
295
+ logger.warning("Base img (DALL-E/Pexels) failed. Using placeholder.");ppt=asset_info.get('prompt_used',image_generation_prompt_text);php=self._create_placeholder_image_content(f"[Base Placeholder]{ppt[:70]}...",base_image_filename);
296
+ if php:input_image_for_runway_path=php;asset_info={'path':php,'type':'image','error':False,'prompt_used':ppt}
297
+ else:current_em=asset_info.get('error_message',"");asset_info['error_message']=(current_em+" Base placeholder failed.").strip()
298
+
299
+ if generate_as_video_clip: # Now attempt RunwayML if requested
300
+ if not input_image_for_runway_path:logger.error("RunwayML video: base img failed.");asset_info['error']=True;asset_info['error_message']=(asset_info.get('error_message',"")+" Base img miss, Runway abort.").strip();asset_info['type']='none';return asset_info
 
 
 
 
 
 
 
 
 
 
 
 
 
301
  if self.USE_RUNWAYML:
302
+ logger.info(f"Runway Gen-4 video for {base_name} using base: {input_image_for_runway_path}")
303
+ video_path=self._generate_video_clip_with_runwayml(motion_prompt_text_for_video,input_image_for_runway_path,base_name,runway_target_duration)
304
+ if video_path and os.path.exists(video_path):asset_info={'path':video_path,'type':'video','error':False,'prompt_used':motion_prompt_text_for_video,'base_image_path':input_image_for_runway_path}
305
+ else:logger.warning(f"RunwayML video failed for {base_name}. Fallback to base img.");asset_info['error']=True;asset_info['error_message']=(asset_info.get('error_message',"Base img ok.")+" RunwayML video fail; use base img.").strip();asset_info['path']=input_image_for_runway_path;asset_info['type']='image';asset_info['prompt_used']=image_generation_prompt_text
306
+ else:logger.warning("RunwayML selected but disabled. Use base img.");asset_info['error']=True;asset_info['error_message']=(asset_info.get('error_message',"Base img ok.")+" RunwayML disabled; use base img.").strip();asset_info['path']=input_image_for_runway_path;asset_info['type']='image';asset_info['prompt_used']=image_generation_prompt_text
307
+ return asset_info # Return image info if not video, or video result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
308
 
 
309
  def generate_narration_audio(self, text_to_narrate, output_filename="narration_overall.mp3"):
310
+ # (Keep as before)
311
  if not self.USE_ELEVENLABS or not self.elevenlabs_client or not text_to_narrate: logger.info("11L skip."); return None; afp=os.path.join(self.output_dir,output_filename)
312
  try: logger.info(f"11L audio (Voice:{self.elevenlabs_voice_id}): {text_to_narrate[:70]}..."); asm=None
313
  if hasattr(self.elevenlabs_client,'text_to_speech')and hasattr(self.elevenlabs_client.text_to_speech,'stream'):asm=self.elevenlabs_client.text_to_speech.stream;logger.info("Using 11L .text_to_speech.stream()")
 
328
  except Exception as e:logger.error(f"11L audio error: {e}",exc_info=True);return None
329
 
330
 
 
331
  def assemble_animatic_from_assets(self, asset_data_list, overall_narration_path=None, output_filename="final_video.mp4", fps=24):
332
+ # (Keep as in the version that has the robust image processing, C-contiguous array, and debug image saves)
333
  if not asset_data_list: logger.warning("No assets for animatic."); return None
334
+ processed_clips = []; narration_clip = None; final_clip = None
335
  logger.info(f"Assembling from {len(asset_data_list)} assets. Frame: {self.video_frame_size}.")
336
 
337
  for i, asset_info in enumerate(asset_data_list):
 
365
  elif asset_type == 'video':
366
  src_clip=None
367
  try:
368
+ src_clip=VideoFileClip(asset_path,target_resolution=(self.video_frame_size[1],self.video_frame_size[0])if self.video_frame_size else None, audio=False)
369
  tmp_clip=src_clip
370
  if src_clip.duration!=scene_dur:
371
  if src_clip.duration>scene_dur:tmp_clip=src_clip.subclip(0,scene_dur)
372
  else:
373
  if scene_dur/src_clip.duration > 1.5 and src_clip.duration>0.1:tmp_clip=src_clip.loop(duration=scene_dur)
374
  else:tmp_clip=src_clip.set_duration(src_clip.duration);logger.info(f"S{scene_num} Video clip ({src_clip.duration:.2f}s) shorter than target ({scene_dur:.2f}s).")
375
+ current_scene_mvpy_clip=tmp_clip.set_duration(scene_dur) # Ensure target duration for concatenation
376
  if current_scene_mvpy_clip.size!=list(self.video_frame_size):current_scene_mvpy_clip=current_scene_mvpy_clip.resize(self.video_frame_size)
377
  except Exception as e:logger.error(f"S{scene_num} Video load error '{asset_path}':{e}",exc_info=True);continue
378
  finally:
 
383
  try:
384
  to_dur=min(current_scene_mvpy_clip.duration-0.5,current_scene_mvpy_clip.duration*0.8)if current_scene_mvpy_clip.duration>0.5 else current_scene_mvpy_clip.duration
385
  to_start=0.25
386
+ if to_dur > 0:
387
  txt_c=TextClip(f"Scene {scene_num}\n{key_action}",fontsize=self.video_overlay_font_size,color=self.video_overlay_font_color,font=self.video_overlay_font,bg_color='rgba(10,10,20,0.7)',method='caption',align='West',size=(self.video_frame_size[0]*0.9,None),kerning=-1,stroke_color='black',stroke_width=1.5).set_duration(to_dur).set_start(to_start).set_position(('center',0.92),relative=True)
388
  current_scene_mvpy_clip=CompositeVideoClip([current_scene_mvpy_clip,txt_c],size=self.video_frame_size,use_bgclip=True)
389
+ else: logger.warning(f"S{scene_num}: Text overlay duration is zero. Skip text.")
390
  except Exception as e:logger.error(f"S{scene_num} TextClip error:{e}. No text.",exc_info=True)
391
  if current_scene_mvpy_clip:processed_clips.append(current_scene_mvpy_clip);logger.info(f"S{scene_num} Processed. Dur:{current_scene_mvpy_clip.duration:.2f}s.")
392
  except Exception as e:logger.error(f"MAJOR Error S{scene_num} ({asset_path}):{e}",exc_info=True)
393
  finally:
394
+ if current_scene_mvpy_clip and hasattr(current_scene_mvpy_clip,'close'):
395
+ try: current_scene_mvpy_clip.close()
396
+ except: pass
397
 
398
  if not processed_clips:logger.warning("No clips processed. Abort.");return None
399
  td=0.75
 
418
  except Exception as e:logger.error(f"Video write error:{e}",exc_info=True);return None
419
  finally:
420
  logger.debug("Closing all MoviePy clips in `assemble_animatic_from_assets` finally block.")
421
+ all_clips_to_close = processed_clips + ([narration_clip] if narration_clip else []) + ([final_clip] if final_clip else []) # Corrected variable name
422
+ for clip_obj_to_close in all_clips_to_close:
423
+ if clip_obj_to_close and hasattr(clip_obj_to_close, 'close'):
424
+ try: clip_obj_to_close.close()
425
+ except Exception as e_close: logger.warning(f"Ignoring error while closing a clip: {type(clip_obj_to_close).__name__} - {e_close}")