CingenAI / core /visual_engine.py
mgbam's picture
Update core/visual_engine.py
92cb699 verified
raw
history blame
20.1 kB
# core/visual_engine.py
from PIL import Image, ImageDraw, ImageFont, ImageOps # Pillow should be >= 10.1.0
# --- MONKEY PATCH FOR Image.ANTIALIAS ---
# This is applied at module load time.
try:
if hasattr(Image, 'Resampling') and hasattr(Image.Resampling, 'LANCZOS'): # Pillow 9+
if not hasattr(Image, 'ANTIALIAS'):
Image.ANTIALIAS = Image.Resampling.LANCZOS
print("INFO: Monkey-patched PIL.Image.ANTIALIAS with Image.Resampling.LANCZOS.")
elif hasattr(Image, 'LANCZOS'): # Pillow 8 used Image.LANCZOS directly
if not hasattr(Image, 'ANTIALIAS'):
Image.ANTIALIAS = Image.LANCZOS
print("INFO: Monkey-patched PIL.Image.ANTIALIAS with Image.LANCZOS.")
except AttributeError:
print("WARNING: Could not determine Pillow version or attributes for ANTIALIAS monkey-patch. Video effects might fail.")
# --- END MONKEY PATCH ---
from moviepy.editor import (ImageClip, concatenate_videoclips, TextClip,
CompositeVideoClip, AudioFileClip)
import moviepy.video.fx.all as vfx
import numpy as np
import os
import openai
import requests
import io
import time
import random
import subprocess
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
ELEVENLABS_CLIENT_IMPORTED = False
ElevenLabsAPIClient = None; Voice = None; VoiceSettings = None
try:
from elevenlabs.client import ElevenLabs as ImportedElevenLabsClient
from elevenlabs import Voice as ImportedVoice, VoiceSettings as ImportedVoiceSettings
ElevenLabsAPIClient = ImportedElevenLabsClient; Voice = ImportedVoice; VoiceSettings = ImportedVoiceSettings
ELEVENLABS_CLIENT_IMPORTED = True; logger.info("Successfully imported ElevenLabs client components.")
except ImportError as e: logger.warning(f"ElevenLabs client import failed: {e}. Audio disabled.")
except Exception as e: logger.warning(f"General ElevenLabs import error: {e}. Audio disabled.")
class VisualEngine:
def __init__(self, output_dir="temp_cinegen_media", default_elevenlabs_voice_id="Rachel"):
self.output_dir = output_dir; os.makedirs(self.output_dir, exist_ok=True)
self.font_filename = "arial.ttf"; self.font_path_in_container = f"/usr/local/share/fonts/truetype/mycustomfonts/{self.font_filename}"
self.font_size_pil = 20; self.video_overlay_font_size = 30; self.video_overlay_font_color = 'white'; self.video_overlay_font = 'Liberation-Sans-Bold' # Or 'Arial-Bold'
try: self.font = ImageFont.truetype(self.font_path_in_container, self.font_size_pil); logger.info(f"Placeholder font loaded: {self.font_path_in_container}.")
except IOError: logger.warning(f"Placeholder font '{self.font_path_in_container}' fail. Default."); self.font = ImageFont.load_default(); self.font_size_pil = 10
self.openai_api_key = None; self.USE_AI_IMAGE_GENERATION = False
self.dalle_model = "dall-e-3"; self.image_size_dalle3 = "1792x1024"; self.video_frame_size = (1280, 720)
self.elevenlabs_api_key = None; self.USE_ELEVENLABS = False; self.elevenlabs_client = None
self.elevenlabs_voice_id = default_elevenlabs_voice_id
if VoiceSettings and ELEVENLABS_CLIENT_IMPORTED:
self.elevenlabs_voice_settings = VoiceSettings(stability=0.60, similarity_boost=0.80, style=0.15, use_speaker_boost=True)
else: self.elevenlabs_voice_settings = None
self.pexels_api_key = None; self.USE_PEXELS = False
logger.info("VisualEngine initialized.")
def set_openai_api_key(self,k): self.openai_api_key=k; self.USE_AI_IMAGE_GENERATION=bool(k); logger.info(f"DALL-E {self.dalle_model} {'Ready.' if k else 'Disabled.'}")
def set_elevenlabs_api_key(self,api_key, voice_id_from_secret=None):
self.elevenlabs_api_key=api_key
if voice_id_from_secret: self.elevenlabs_voice_id = voice_id_from_secret; logger.info(f"ElevenLabs Voice ID set from config: {self.elevenlabs_voice_id}")
if api_key and ELEVENLABS_CLIENT_IMPORTED and ElevenLabsAPIClient:
try:
self.elevenlabs_client = ElevenLabsAPIClient(api_key=api_key)
if self.elevenlabs_client: self.USE_ELEVENLABS=True; logger.info(f"ElevenLabs Client Ready (Voice: {self.elevenlabs_voice_id}).")
else: self.USE_ELEVENLABS=False; logger.warning("ElevenLabs client is None post-init.")
except Exception as e: logger.error(f"Error initializing ElevenLabs client: {e}. Disabled.", exc_info=True); self.USE_ELEVENLABS=False; self.elevenlabs_client = None
else: self.USE_ELEVENLABS=False; self.elevenlabs_client = None;
def set_pexels_api_key(self,k): self.pexels_api_key=k; self.USE_PEXELS=bool(k); logger.info(f"Pexels Search {'Ready.' if k else 'Disabled.'}")
def _get_text_dimensions(self,text_content,font_obj): # Same
if not text_content: return 0,self.font_size_pil
try:
if hasattr(font_obj,'getbbox'): bbox=font_obj.getbbox(text_content);w=bbox[2]-bbox[0];h=bbox[3]-bbox[1];return w, h if h > 0 else self.font_size_pil
elif hasattr(font_obj,'getsize'): w,h=font_obj.getsize(text_content);return w, h if h > 0 else self.font_size_pil
else: return int(len(text_content)*self.font_size_pil*0.6),int(self.font_size_pil*1.2 if self.font_size_pil*1.2>0 else self.font_size_pil)
except: return int(len(text_content)*self.font_size_pil*0.6),int(self.font_size_pil*1.2)
def _create_placeholder_image_content(self,text_description,filename,size=None): # Same
if size is None: size = self.video_frame_size
img=Image.new('RGB',size,color=(20,20,40));d=ImageDraw.Draw(img);padding=25;max_w=size[0]-(2*padding);lines=[];
if not text_description: text_description="(Placeholder: No prompt text)"
words=text_description.split();current_line=""
for word in words:
test_line=current_line+word+" ";
if self._get_text_dimensions(test_line,self.font)[0] <= max_w: current_line=test_line
else:
if current_line: lines.append(current_line.strip()); current_line=word+" "
if current_line: lines.append(current_line.strip())
if not lines: lines.append("(Text error or too long for placeholder)")
_,single_line_h=self._get_text_dimensions("Ay",self.font); single_line_h = single_line_h if single_line_h > 0 else self.font_size_pil + 2
max_lines_to_display=min(len(lines),(size[1]-(2*padding))//(single_line_h+2))
y_text=padding + (size[1]-(2*padding) - max_lines_to_display*(single_line_h+2))/2.0
for i in range(max_lines_to_display):
line_content=lines[i];line_w,_=self._get_text_dimensions(line_content,self.font);x_text=(size[0]-line_w)/2.0
d.text((x_text,y_text),line_content,font=self.font,fill=(200,200,180));y_text+=single_line_h+2
if i==6 and max_lines_to_display > 7: d.text((x_text,y_text),"...",font=self.font,fill=(200,200,180));break
filepath=os.path.join(self.output_dir,filename);
try:img.save(filepath);return filepath
except Exception as e:logger.error(f"Saving placeholder image {filepath}: {e}", exc_info=True);return None
def _search_pexels_image(self, query, output_filename_base): # Same
if not self.USE_PEXELS or not self.pexels_api_key: return None
headers = {"Authorization": self.pexels_api_key}; params = {"query": query, "per_page": 1, "orientation": "landscape", "size": "large"}
pexels_filename = output_filename_base.replace(".png", f"_pexels_{random.randint(1000,9999)}.jpg")
filepath = os.path.join(self.output_dir, pexels_filename)
try:
logger.info(f"Searching Pexels for: '{query}'"); effective_query = " ".join(query.split()[:5]); params["query"] = effective_query
response = requests.get("https://api.pexels.com/v1/search", headers=headers, params=params, timeout=20)
response.raise_for_status(); data = response.json()
if data.get("photos") and len(data["photos"]) > 0:
photo_url = data["photos"][0]["src"]["large2x"]
image_response = requests.get(photo_url, timeout=60); image_response.raise_for_status()
img_data = Image.open(io.BytesIO(image_response.content))
if img_data.mode != 'RGB': img_data = img_data.convert('RGB')
img_data.save(filepath); logger.info(f"Pexels image saved: {filepath}"); return filepath
else: logger.info(f"No photos on Pexels for query: '{effective_query}'")
except Exception as e: logger.error(f"Pexels search/download for query '{query}': {e}", exc_info=True)
return None
def generate_image_visual(self, image_prompt_text, scene_data, scene_identifier_filename): # Same
filepath = os.path.join(self.output_dir, scene_identifier_filename)
if self.USE_AI_IMAGE_GENERATION and self.openai_api_key:
max_retries = 2
for attempt in range(max_retries):
try:
logger.info(f"Attempt {attempt+1}: DALL-E ({self.dalle_model}) for: {image_prompt_text[:100]}...")
client = openai.OpenAI(api_key=self.openai_api_key, timeout=90.0)
response = client.images.generate(model=self.dalle_model, prompt=image_prompt_text, n=1, size=self.image_size_dalle3, quality="hd", response_format="url", style="vivid")
image_url = response.data[0].url; revised_prompt = getattr(response.data[0], 'revised_prompt', None)
if revised_prompt: logger.info(f"DALL-E 3 revised_prompt: {revised_prompt[:100]}...")
image_response = requests.get(image_url, timeout=120); image_response.raise_for_status()
img_data = Image.open(io.BytesIO(image_response.content));
if img_data.mode != 'RGB': img_data = img_data.convert('RGB')
img_data.save(filepath); logger.info(f"AI Image (DALL-E) saved: {filepath}"); return filepath
except openai.RateLimitError as e:
logger.warning(f"OpenAI Rate Limit: {e}. Retrying after {5*(attempt+1)}s..."); time.sleep(5 * (attempt + 1))
if attempt == max_retries - 1: logger.error("Max retries for RateLimitError."); break
else: continue
except openai.APIError as e: logger.error(f"OpenAI API Error: {e}"); break
except requests.exceptions.RequestException as e: logger.error(f"Requests Error (DALL-E download): {e}"); break
except Exception as e: logger.error(f"Generic error (DALL-E gen): {e}", exc_info=True); break
logger.warning("DALL-E generation failed. Trying Pexels fallback...")
pexels_query_text = scene_data.get('pexels_search_query_감독', f"{scene_data.get('emotional_beat','')} {scene_data.get('setting_description','')}")
pexels_path = self._search_pexels_image(pexels_query_text, scene_identifier_filename)
if pexels_path: return pexels_path
logger.warning("Pexels also failed/disabled. Using placeholder.")
return self._create_placeholder_image_content(f"[AI/Pexels Failed] {image_prompt_text[:100]}...", scene_identifier_filename)
else:
return self._create_placeholder_image_content(image_prompt_text, scene_identifier_filename)
def generate_narration_audio(self, text_to_narrate, output_filename="narration_overall.mp3"): # Corrected client call
if not self.USE_ELEVENLABS or not self.elevenlabs_client or not text_to_narrate:
logger.info("ElevenLabs conditions not met. Skipping audio generation.")
return None
audio_filepath = os.path.join(self.output_dir, output_filename)
try:
logger.info(f"Generating ElevenLabs audio (Voice ID: {self.elevenlabs_voice_id}) for: {text_to_narrate[:70]}...")
# Use the client's text_to_speech.stream() method
if hasattr(self.elevenlabs_client, 'text_to_speech') and hasattr(self.elevenlabs_client.text_to_speech, 'stream'):
logger.info("Using elevenlabs_client.text_to_speech.stream()")
audio_data_iterator = self.elevenlabs_client.text_to_speech.stream(
text=text_to_narrate,
voice_id=str(self.elevenlabs_voice_id), # Must be string ID for this method
model_id="eleven_multilingual_v2",
# voice_settings=self.elevenlabs_voice_settings # Pass VoiceSettings if API supports it here
)
# Fallback if .generate() with Voice object is preferred by a slightly different client version
elif hasattr(self.elevenlabs_client, 'generate') and Voice and self.elevenlabs_voice_settings:
logger.info("Using elevenlabs_client.generate() with Voice object as fallback.")
voice_param = Voice(voice_id=str(self.elevenlabs_voice_id), settings=self.elevenlabs_voice_settings)
audio_data_iterator = self.elevenlabs_client.generate(
text=text_to_narrate, voice=voice_param, model="eleven_multilingual_v2")
else:
logger.error("No recognized audio generation method found on ElevenLabs client (tried text_to_speech.stream and generate).")
return None
with open(audio_filepath, "wb") as f:
for chunk in audio_data_iterator:
if chunk: f.write(chunk)
logger.info(f"ElevenLabs audio saved: {audio_filepath}")
return audio_filepath
except AttributeError as ae:
logger.error(f"AttributeError with ElevenLabs client: {ae}. SDK method/structure might be different.", exc_info=True)
except Exception as e:
logger.error(f"Error generating ElevenLabs audio: {e}", exc_info=True)
return None
def create_video_from_images(self, image_data_list, overall_narration_path=None, output_filename="final_video.mp4", fps=24, duration_per_image=4.5):
if not image_data_list: logger.warning("No image data for video."); return None
processed_clips=[]; narration_audio_clip=None; final_video_clip_obj=None
logger.info(f"Preparing {len(image_data_list)} clips. Target frame: {self.video_frame_size}. Duration/img: {duration_per_image}s.")
for i, data in enumerate(image_data_list):
img_path, scene_num, key_action = data.get('path'), data.get('scene_num', i+1), data.get('key_action', '')
if not (img_path and os.path.exists(img_path)): logger.warning(f"Img not found: {img_path}"); continue
try:
pil_img = Image.open(img_path)
if pil_img.mode != 'RGB': pil_img = pil_img.convert('RGB')
img_copy = pil_img.copy()
# Using modern Resampling.LANCZOS
img_copy.thumbnail(self.video_frame_size, Image.Resampling.LANCZOS)
canvas = Image.new('RGB', self.video_frame_size, (random.randint(0,5), random.randint(0,5), random.randint(0,5)))
xo, yo = (self.video_frame_size[0]-img_copy.width)//2, (self.video_frame_size[1]-img_copy.height)//2
canvas.paste(img_copy, (xo,yo))
frame_np = np.array(canvas)
img_clip_base = ImageClip(frame_np).set_duration(duration_per_image)
# Ken Burns Effect (vfx.resize) - THIS IS THE LINE THAT CAUSED ANTIALIAS ERROR
# If ANTIALIAS error persists even with monkey patch and correct Pillow/MoviePy versions,
# this specific effect might need to be disabled or re-implemented differently.
# For now, we assume the monkey patch or correct versions will fix it.
try:
end_scale = random.uniform(1.03, 1.08)
img_clip = img_clip_base.fx(vfx.resize, lambda t: 1 + (end_scale - 1) * (t / duration_per_image))
img_clip = img_clip.set_position('center')
except AttributeError as e_alias: # Specifically catch ANTIALIAS here if it happens
if 'ANTIALIAS' in str(e_alias):
logger.error(f"ANTIALIAS error during vfx.resize for {img_path}. Disabling Ken Burns for this clip. Error: {e_alias}")
img_clip = img_clip_base # Use the clip without the resize effect
else:
raise # Re-raise other AttributeErrors
except Exception as e_fx: # Catch other errors from fx
logger.error(f"Error applying vfx.resize for {img_path}. Using base clip. Error: {e_fx}")
img_clip = img_clip_base
if key_action:
txt_clip = TextClip(f"Scene {scene_num}\n{key_action}", fontsize=self.video_overlay_font_size,
color=self.video_overlay_font_color, font=self.video_overlay_font,
bg_color='rgba(10,10,20,0.8)', method='caption', align='West',
size=(self.video_frame_size[0]*0.9, None), kerning=-1, stroke_color='black', stroke_width=1.5
).set_duration(duration_per_image-1.0).set_start(0.5).set_position(('center',0.92),relative=True)
final_scene_clip = CompositeVideoClip([img_clip, txt_clip], size=self.video_frame_size, use_bgclip=True, bg_color=(0,0,0))
else: final_scene_clip = img_clip
processed_clips.append(final_scene_clip)
except Exception as e: logger.error(f"Creating video clip for {img_path}: {e}", exc_info=True)
if not processed_clips: logger.warning("No clips processed for video."); return None
transition = 0.75
try:
final_video_clip_obj = concatenate_videoclips(processed_clips, padding=-transition, method="compose")
if final_video_clip_obj.duration > transition*2:
final_video_clip_obj = final_video_clip_obj.fx(vfx.fadein, transition).fx(vfx.fadeout, transition)
if overall_narration_path and os.path.exists(overall_narration_path):
try:
narration_audio_clip = AudioFileClip(overall_narration_path)
if narration_audio_clip.duration < final_video_clip_obj.duration:
final_video_clip_obj = final_video_clip_obj.subclip(0, narration_audio_clip.duration)
final_video_clip_obj = final_video_clip_obj.set_audio(narration_audio_clip); logger.info("Overall narration added.")
except Exception as e: logger.error(f"Adding overall narration: {e}", exc_info=True)
output_path = os.path.join(self.output_dir, output_filename); logger.info(f"Writing final video to: {output_path}")
final_video_clip_obj.write_videofile(output_path, fps=fps, codec='libx264', preset='medium',
audio_codec='aac',
temp_audiofile=os.path.join(self.output_dir, f'temp-audio-{os.urandom(4).hex()}.m4a'),
remove_temp=True, threads=os.cpu_count() or 2, logger='bar', bitrate="5000k")
logger.info(f"Video successfully created: {output_path}"); return output_path
except Exception as e: logger.error(f"Writing video file: {e}", exc_info=True); return None
finally:
for c_item in processed_clips:
if hasattr(c_item, 'close'): c_item.close()
if narration_audio_clip and hasattr(narration_audio_clip, 'close'): narration_audio_clip.close()
if final_video_clip_obj and hasattr(final_video_clip_obj, 'close'): final_video_clip_obj.close()