CingenAI / core /visual_engine.py
mgbam's picture
Update core/visual_engine.py
9d84ba9 verified
raw
history blame
12 kB
# core/visual_engine.py
from PIL import Image, ImageDraw, ImageFont
from moviepy.editor import (ImageClip, concatenate_videoclips, TextClip,
CompositeVideoClip)
import moviepy.video.fx.all as vfx
import numpy as np
import os
import openai
import requests
import io
import time # For adding slight delay if API rate limits are hit
class VisualEngine:
def __init__(self, output_dir="temp_cinegen_media"):
self.output_dir = output_dir
os.makedirs(self.output_dir, exist_ok=True)
self.font_filename = "arial.ttf"
self.font_path_in_container = f"/usr/local/share/fonts/truetype/mycustomfonts/{self.font_filename}"
self.font_size_pil = 20 # Slightly smaller for placeholder text to fit more
self.video_overlay_font_size = 32
self.video_overlay_font_color = 'white'
self.video_overlay_font = 'Arial-Bold' # Try specific variant; ensure ImageMagick can find it or use full path
try:
self.font = ImageFont.truetype(self.font_path_in_container, self.font_size_pil)
print(f"Successfully loaded font: {self.font_path_in_container} for placeholders.")
except IOError:
print(f"Warning: Could not load font from '{self.font_path_in_container}'. Placeholders will use default font.")
self.font = ImageFont.load_default()
self.font_size_pil = 10 # Default font size estimate
self.openai_api_key = None
self.USE_AI_IMAGE_GENERATION = False
self.dalle_model = "dall-e-3"
# DALL-E 3 standard size for highest quality generally. Other options: "1792x1024", "1024x1792"
self.image_size_dalle3 = "1792x1024" # Landscape, good for cinematic
self.video_frame_size = (1280, 720) # 16:9 aspect ratio for video output
def set_openai_api_key(self, api_key):
if api_key:
self.openai_api_key = api_key
self.USE_AI_IMAGE_GENERATION = True
print(f"OpenAI API key set. AI Image Generation Enabled with {self.dalle_model}.")
else:
self.USE_AI_IMAGE_GENERATION = False
print("OpenAI API key not provided. AI Image Generation Disabled. Using placeholders.")
def _get_text_dimensions(self, text_content, font_obj): # Remains the same
if not text_content: return 0, self.font_size_pil
try:
if hasattr(font_obj, 'getbbox'):
bbox = font_obj.getbbox(text_content); width = bbox[2] - bbox[0]; height = bbox[3] - bbox[1]
return width, height if height > 0 else self.font_size_pil
elif hasattr(font_obj, 'getsize'):
width, height = font_obj.getsize(text_content)
return width, height if height > 0 else self.font_size_pil
else:
return int(len(text_content) * self.font_size_pil*0.6), int(self.font_size_pil*1.2 if self.font_size_pil*1.2 > 0 else self.font_size_pil)
except Exception: return int(len(text_content) * self.font_size_pil*0.6), int(self.font_size_pil*1.2)
def _create_placeholder_image_content(self, text_description, filename, size=(1280, 720)): # Default to video_frame_size
img = Image.new('RGB', size, color=(20, 20, 40)) # Darker
draw = ImageDraw.Draw(img)
padding = 25
max_text_width = size[0] - (2 * padding)
lines = []
if not text_description: text_description = "(Placeholder: No prompt provided)"
# Simplified text wrapping for placeholder
words = text_description.split()
current_line = ""
for word in words:
test_line = current_line + word + " "
if self._get_text_dimensions(test_line, self.font)[0] <= max_text_width:
current_line = test_line
else:
if current_line: lines.append(current_line.strip())
current_line = word + " "
if current_line: lines.append(current_line.strip())
if not lines: lines.append("(Text too long or unrenderable for placeholder)")
_, single_line_height = self._get_text_dimensions("Ay", self.font)
if single_line_height == 0: single_line_height = self.font_size_pil + 2
num_lines_to_display = min(len(lines), (size[1] - 2 * padding) // (single_line_height + 2)) # Max lines based on height
y_text = padding + (size[1] - 2*padding - num_lines_to_display * (single_line_height + 2)) / 2.0
for i in range(num_lines_to_display):
line = lines[i]
line_width, _ = self._get_text_dimensions(line, self.font)
x_text = (size[0] - line_width) / 2.0
draw.text((x_text, y_text), line, font=self.font, fill=(200, 200, 180))
y_text += single_line_height + 2 # Line spacing
if i == 6 and num_lines_to_display > 7: # Show ellipsis if more text
draw.text((x_text, y_text), "...", font=self.font, fill=(200, 200, 180))
break
filepath = os.path.join(self.output_dir, filename)
try: img.save(filepath); return filepath
except Exception as e: print(f"Error saving placeholder: {e}"); return None
def generate_image_visual(self, image_prompt_text, scene_identifier_filename):
filepath = os.path.join(self.output_dir, scene_identifier_filename)
if self.USE_AI_IMAGE_GENERATION and self.openai_api_key:
max_retries = 2
for attempt in range(max_retries):
try:
print(f"Attempt {attempt+1}: DALL-E ({self.dalle_model}) for: {image_prompt_text[:120]}...")
client = openai.OpenAI(api_key=self.openai_api_key, timeout=60.0) # Timeout for client
response = client.images.generate(
model=self.dalle_model,
prompt=image_prompt_text,
n=1,
size=self.image_size_dalle3,
quality="hd", # Use "hd" for DALL-E 3 for better detail, "standard" for faster/cheaper
response_format="url",
style="vivid" # "vivid" or "natural" for DALL-E 3
)
image_url = response.data[0].url
revised_prompt = getattr(response.data[0], 'revised_prompt', None)
if revised_prompt: print(f"DALL-E 3 revised_prompt: {revised_prompt[:100]}...")
image_response = requests.get(image_url, timeout=90) # Increased download timeout
image_response.raise_for_status()
img_data = Image.open(io.BytesIO(image_response.content))
if img_data.mode != 'RGB': img_data = img_data.convert('RGB')
img_data.save(filepath)
print(f"AI Image (DALL-E) saved: {filepath}")
return filepath
except openai.RateLimitError as e:
print(f"OpenAI Rate Limit Error: {e}. Retrying after delay...")
if attempt < max_retries - 1: time.sleep(5 * (attempt + 1)); continue
else: print("Max retries reached for RateLimitError."); break
except openai.APIError as e: print(f"OpenAI API Error: {e}"); break
except requests.exceptions.RequestException as e: print(f"Requests Error (DALL-E image download): {e}"); break
except Exception as e: print(f"Generic error (DALL-E image gen): {e}"); break
print("DALL-E generation failed after retries. Falling back to placeholder.")
return self._create_placeholder_image_content(
f"[AI Gen Failed] Prompt: {image_prompt_text[:100]}...",
scene_identifier_filename, size=self.video_frame_size
)
else:
return self._create_placeholder_image_content(
image_prompt_text, scene_identifier_filename, size=self.video_frame_size
)
def create_video_from_images(self, image_data_list, output_filename="final_video.mp4", fps=24, duration_per_image=4):
if not image_data_list: return None
print(f"Creating video from {len(image_data_list)} image sets.")
processed_clips = []
for i, data in enumerate(image_data_list):
img_path, scene_num, key_action = data.get('path'), data.get('scene_num', i+1), data.get('key_action', '')
if not (img_path and os.path.exists(img_path)):
print(f"Image not found: {img_path}. Skipping."); continue
try:
pil_img_orig = Image.open(img_path)
if pil_img_orig.mode != 'RGB': pil_img_orig = pil_img_orig.convert('RGB')
# Resize and letterbox/pillarbox to video_frame_size
img_for_frame = pil_img_orig.copy()
img_for_frame.thumbnail(self.video_frame_size, Image.Resampling.LANCZOS)
canvas = Image.new('RGB', self.video_frame_size, (0,0,0))
x_offset = (self.video_frame_size[0] - img_for_frame.width) // 2
y_offset = (self.video_frame_size[1] - img_for_frame.height) // 2
canvas.paste(img_for_frame, (x_offset, y_offset))
frame_np = np.array(canvas)
img_clip = ImageClip(frame_np).set_duration(duration_per_image)
# Ken Burns: zoom from 100% to 110%
img_clip = img_clip.fx(vfx.resize, lambda t: 1 + 0.1 * (t / duration_per_image))
img_clip = img_clip.set_position('center')
# Text Overlay
if key_action:
overlay_text = f"Scene {scene_num}\n{key_action}"
txt_clip = TextClip(overlay_text, fontsize=self.video_overlay_font_size,
color=self.video_overlay_font_color, font=self.video_overlay_font,
bg_color='rgba(0,0,0,0.7)', method='caption', align='West',
size=(self.video_frame_size[0]*0.85, None), kerning=-1, stroke_color='black', stroke_width=0.5
).set_duration(duration_per_image - 1.0).set_start(0.5) # Show for duration-1s, slight delay
txt_clip = txt_clip.set_position(('center', 0.88), relative=True)
final_scene_clip = CompositeVideoClip([img_clip, txt_clip], size=self.video_frame_size)
else:
final_scene_clip = img_clip
processed_clips.append(final_scene_clip)
except Exception as e: print(f"Error processing clip for {img_path}: {e}. Skipping.")
if not processed_clips: print("No clips processed for video."); return None
transition_duration = 0.75 # Crossfade duration
final_video = concatenate_videoclips(processed_clips, padding=-transition_duration, method="compose")
if final_video.duration > transition_duration*2: # Ensure enough duration for fades
final_video = final_video.fx(vfx.fadein, transition_duration).fx(vfx.fadeout, transition_duration)
output_path = os.path.join(self.output_dir, output_filename)
try:
final_video.write_videofile(output_path, fps=fps, codec='libx264', preset='medium', audio_codec='aac',
temp_audiofile=os.path.join(self.output_dir, f'temp-audio-{os.urandom(4).hex()}.m4a'),
remove_temp=True, threads=os.cpu_count() or 2, logger='bar')
print(f"Video created: {output_path}"); return output_path
except Exception as e: print(f"Error writing video file: {e}"); return None
finally:
for clip in processed_clips: clip.close()
if hasattr(final_video, 'close'): final_video.close()