Update core/visual_engine.py
Browse files- core/visual_engine.py +150 -108
core/visual_engine.py
CHANGED
@@ -1,11 +1,32 @@
|
|
1 |
# core/visual_engine.py
|
2 |
-
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
from moviepy.editor import (ImageClip, concatenate_videoclips, TextClip,
|
5 |
CompositeVideoClip, AudioFileClip)
|
6 |
import moviepy.video.fx.all as vfx
|
7 |
import numpy as np
|
8 |
-
import os
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
logger = logging.getLogger(__name__)
|
11 |
logger.setLevel(logging.INFO)
|
@@ -22,208 +43,229 @@ except Exception as e: logger.warning(f"General ElevenLabs import error: {e}. Au
|
|
22 |
|
23 |
|
24 |
class VisualEngine:
|
25 |
-
def __init__(self, output_dir="temp_cinegen_media", default_elevenlabs_voice_id="Rachel"):
|
26 |
self.output_dir = output_dir; os.makedirs(self.output_dir, exist_ok=True)
|
27 |
-
|
28 |
-
self.
|
29 |
-
self.
|
30 |
-
try: self.font = ImageFont.truetype(self.font_path_in_container, self.font_size_pil); # logger.info(f"Placeholder font: {self.font_path_in_container}.")
|
31 |
except IOError: logger.warning(f"Placeholder font '{self.font_path_in_container}' fail. Default."); self.font = ImageFont.load_default(); self.font_size_pil = 10
|
32 |
|
33 |
self.openai_api_key = None; self.USE_AI_IMAGE_GENERATION = False
|
34 |
-
self.dalle_model = "dall-e-3"; self.image_size_dalle3 = "1792x1024"; self.video_frame_size = (1280, 720)
|
35 |
-
|
36 |
-
self.
|
37 |
-
self.elevenlabs_client = None
|
38 |
-
self.elevenlabs_voice_id = default_elevenlabs_voice_id # Use passed default or "Rachel"
|
39 |
if VoiceSettings and ELEVENLABS_CLIENT_IMPORTED:
|
40 |
self.elevenlabs_voice_settings = VoiceSettings(stability=0.60, similarity_boost=0.80, style=0.15, use_speaker_boost=True)
|
41 |
else: self.elevenlabs_voice_settings = None
|
42 |
self.pexels_api_key = None; self.USE_PEXELS = False
|
43 |
logger.info("VisualEngine initialized.")
|
44 |
|
45 |
-
def set_openai_api_key(self,k):
|
46 |
-
|
47 |
-
logger.info(f"DALL-E ({self.dalle_model}) {'Ready.' if k else 'Disabled.'}")
|
48 |
-
|
49 |
-
def set_elevenlabs_api_key(self, api_key, voice_id_from_secret=None): # Modified to accept voice_id
|
50 |
self.elevenlabs_api_key=api_key
|
51 |
-
if voice_id_from_secret:
|
52 |
-
self.elevenlabs_voice_id = voice_id_from_secret
|
53 |
-
logger.info(f"ElevenLabs Voice ID set from secret/config: {self.elevenlabs_voice_id}")
|
54 |
-
|
55 |
if api_key and ELEVENLABS_CLIENT_IMPORTED and ElevenLabsAPIClient:
|
56 |
try:
|
57 |
self.elevenlabs_client = ElevenLabsAPIClient(api_key=api_key)
|
58 |
if self.elevenlabs_client: self.USE_ELEVENLABS=True; logger.info(f"ElevenLabs Client Ready (Voice: {self.elevenlabs_voice_id}).")
|
59 |
else: self.USE_ELEVENLABS=False; logger.warning("ElevenLabs client is None post-init.")
|
60 |
-
except Exception as e:
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
self.USE_ELEVENLABS=False; self.elevenlabs_client = None
|
65 |
-
if not (ELEVENLABS_CLIENT_IMPORTED and ElevenLabsAPIClient):pass
|
66 |
-
else: logger.info("ElevenLabs API Key not provided. Disabled.")
|
67 |
-
|
68 |
-
def set_pexels_api_key(self,k): # Same
|
69 |
-
self.pexels_api_key=k; self.USE_PEXELS=bool(k)
|
70 |
-
logger.info(f"Pexels Search {'Ready.' if k else 'Disabled.'}")
|
71 |
|
72 |
-
|
73 |
-
|
74 |
-
def _get_text_dimensions(self,t,f): # Same
|
75 |
-
if not t: return 0,self.font_size_pil
|
76 |
try:
|
77 |
-
if hasattr(
|
78 |
-
elif hasattr(
|
79 |
-
else: return int(len(
|
80 |
-
except: return int(len(
|
81 |
|
82 |
-
def _create_placeholder_image_content(self,
|
83 |
-
if
|
84 |
-
img=Image.new('RGB',
|
85 |
-
if not
|
86 |
-
|
87 |
-
for
|
88 |
-
|
89 |
-
if self._get_text_dimensions(
|
90 |
else:
|
91 |
-
if
|
92 |
-
if
|
93 |
-
if not
|
94 |
_,single_line_h=self._get_text_dimensions("Ay",self.font); single_line_h = single_line_h if single_line_h > 0 else self.font_size_pil + 2
|
95 |
-
|
96 |
-
|
97 |
-
for i in range(
|
98 |
-
|
99 |
-
d.text((
|
100 |
-
if i==6 and
|
101 |
-
|
102 |
-
try:img.save(
|
103 |
-
except Exception as e:logger.error(f"Saving placeholder image {
|
104 |
|
105 |
def _search_pexels_image(self, query, output_filename_base): # Same
|
106 |
if not self.USE_PEXELS or not self.pexels_api_key: return None
|
107 |
headers = {"Authorization": self.pexels_api_key}; params = {"query": query, "per_page": 1, "orientation": "landscape", "size": "large"}
|
108 |
pexels_filename = output_filename_base.replace(".png", f"_pexels_{random.randint(1000,9999)}.jpg")
|
109 |
-
|
110 |
try:
|
111 |
-
logger.info(f"Searching Pexels for: '{query}'");
|
112 |
-
|
113 |
-
|
114 |
if data.get("photos") and len(data["photos"]) > 0:
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
if
|
119 |
-
|
120 |
-
else: logger.info(f"No photos on Pexels for: '{
|
121 |
-
except Exception as e: logger.error(f"Pexels
|
122 |
return None
|
123 |
|
124 |
def generate_image_visual(self, image_prompt_text, scene_data, scene_identifier_filename): # Same
|
125 |
-
|
126 |
if self.USE_AI_IMAGE_GENERATION and self.openai_api_key:
|
127 |
-
|
128 |
-
for attempt in range(
|
129 |
try:
|
130 |
logger.info(f"Attempt {attempt+1}: DALL-E ({self.dalle_model}) for: {image_prompt_text[:100]}...")
|
131 |
client = openai.OpenAI(api_key=self.openai_api_key, timeout=90.0)
|
132 |
-
|
133 |
-
|
134 |
-
if
|
135 |
-
|
136 |
-
|
137 |
-
if
|
138 |
-
|
139 |
except openai.RateLimitError as e:
|
140 |
logger.warning(f"OpenAI Rate Limit: {e}. Retrying after {5*(attempt+1)}s..."); time.sleep(5 * (attempt + 1))
|
141 |
-
if attempt ==
|
142 |
else: continue
|
143 |
except openai.APIError as e: logger.error(f"OpenAI API Error: {e}"); break
|
144 |
except requests.exceptions.RequestException as e: logger.error(f"Requests Error (DALL-E download): {e}"); break
|
145 |
except Exception as e: logger.error(f"Generic error (DALL-E gen): {e}", exc_info=True); break
|
146 |
-
logger.warning("DALL-E failed. Trying Pexels fallback...")
|
147 |
-
|
148 |
-
|
149 |
-
if
|
150 |
logger.warning("Pexels also failed/disabled. Using placeholder.")
|
151 |
return self._create_placeholder_image_content(f"[AI/Pexels Failed] {image_prompt_text[:100]}...", scene_identifier_filename)
|
152 |
else:
|
153 |
return self._create_placeholder_image_content(image_prompt_text, scene_identifier_filename)
|
154 |
|
155 |
-
def generate_narration_audio(self, text_to_narrate, output_filename="narration_overall.mp3"): #
|
156 |
if not self.USE_ELEVENLABS or not self.elevenlabs_client or not text_to_narrate:
|
157 |
logger.info("ElevenLabs conditions not met. Skipping audio generation.")
|
158 |
return None
|
159 |
|
160 |
audio_filepath = os.path.join(self.output_dir, output_filename)
|
161 |
try:
|
162 |
-
# self.elevenlabs_voice_id is now set during set_elevenlabs_api_key or by UI
|
163 |
logger.info(f"Generating ElevenLabs audio (Voice ID: {self.elevenlabs_voice_id}) for: {text_to_narrate[:70]}...")
|
164 |
|
|
|
165 |
if hasattr(self.elevenlabs_client, 'text_to_speech') and hasattr(self.elevenlabs_client.text_to_speech, 'stream'):
|
166 |
logger.info("Using elevenlabs_client.text_to_speech.stream()")
|
167 |
audio_data_iterator = self.elevenlabs_client.text_to_speech.stream(
|
168 |
text=text_to_narrate,
|
169 |
-
voice_id=str(self.elevenlabs_voice_id), #
|
170 |
model_id="eleven_multilingual_v2",
|
|
|
171 |
)
|
172 |
-
|
173 |
-
|
174 |
-
|
|
|
175 |
audio_data_iterator = self.elevenlabs_client.generate(
|
176 |
text=text_to_narrate, voice=voice_param, model="eleven_multilingual_v2")
|
177 |
else:
|
178 |
-
logger.error("No recognized audio generation method on ElevenLabs client.")
|
|
|
179 |
|
180 |
with open(audio_filepath, "wb") as f:
|
181 |
for chunk in audio_data_iterator:
|
182 |
if chunk: f.write(chunk)
|
183 |
logger.info(f"ElevenLabs audio saved: {audio_filepath}")
|
184 |
return audio_filepath
|
185 |
-
except AttributeError as ae:
|
186 |
-
|
|
|
|
|
187 |
return None
|
188 |
|
189 |
-
def create_video_from_images(self, image_data_list, overall_narration_path=None, output_filename="final_video.mp4", fps=24, duration_per_image=4.5):
|
190 |
if not image_data_list: logger.warning("No image data for video."); return None
|
191 |
processed_clips=[]; narration_audio_clip=None; final_video_clip_obj=None
|
192 |
logger.info(f"Preparing {len(image_data_list)} clips. Target frame: {self.video_frame_size}. Duration/img: {duration_per_image}s.")
|
|
|
193 |
for i, data in enumerate(image_data_list):
|
194 |
img_path, scene_num, key_action = data.get('path'), data.get('scene_num', i+1), data.get('key_action', '')
|
195 |
if not (img_path and os.path.exists(img_path)): logger.warning(f"Img not found: {img_path}"); continue
|
196 |
try:
|
197 |
-
pil_img = Image.open(img_path)
|
198 |
if pil_img.mode != 'RGB': pil_img = pil_img.convert('RGB')
|
199 |
-
|
|
|
|
|
|
|
|
|
200 |
canvas = Image.new('RGB', self.video_frame_size, (random.randint(0,5), random.randint(0,5), random.randint(0,5)))
|
201 |
-
xo,yo=(self.video_frame_size[0]-img_copy.width)//2, (self.video_frame_size[1]-img_copy.height)//2
|
202 |
-
canvas.paste(img_copy, (xo,yo))
|
|
|
|
|
203 |
img_clip_base = ImageClip(frame_np).set_duration(duration_per_image)
|
204 |
-
|
205 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
206 |
if key_action:
|
207 |
-
txt_clip = TextClip(f"Scene {scene_num}\n{key_action}", fontsize=self.video_overlay_font_size,
|
208 |
-
|
|
|
|
|
209 |
).set_duration(duration_per_image-1.0).set_start(0.5).set_position(('center',0.92),relative=True)
|
210 |
final_scene_clip = CompositeVideoClip([img_clip, txt_clip], size=self.video_frame_size, use_bgclip=True, bg_color=(0,0,0))
|
211 |
else: final_scene_clip = img_clip
|
212 |
processed_clips.append(final_scene_clip)
|
213 |
except Exception as e: logger.error(f"Creating video clip for {img_path}: {e}", exc_info=True)
|
|
|
214 |
if not processed_clips: logger.warning("No clips processed for video."); return None
|
|
|
215 |
transition = 0.75
|
216 |
try:
|
217 |
final_video_clip_obj = concatenate_videoclips(processed_clips, padding=-transition, method="compose")
|
218 |
-
if final_video_clip_obj.duration > transition*2:
|
|
|
|
|
219 |
if overall_narration_path and os.path.exists(overall_narration_path):
|
220 |
try:
|
221 |
narration_audio_clip = AudioFileClip(overall_narration_path)
|
222 |
-
if narration_audio_clip.duration < final_video_clip_obj.duration:
|
|
|
223 |
final_video_clip_obj = final_video_clip_obj.set_audio(narration_audio_clip); logger.info("Overall narration added.")
|
224 |
except Exception as e: logger.error(f"Adding overall narration: {e}", exc_info=True)
|
|
|
225 |
output_path = os.path.join(self.output_dir, output_filename); logger.info(f"Writing final video to: {output_path}")
|
226 |
-
final_video_clip_obj.write_videofile(output_path, fps=fps, codec='libx264', preset='medium',
|
|
|
227 |
temp_audiofile=os.path.join(self.output_dir, f'temp-audio-{os.urandom(4).hex()}.m4a'),
|
228 |
remove_temp=True, threads=os.cpu_count() or 2, logger='bar', bitrate="5000k")
|
229 |
logger.info(f"Video successfully created: {output_path}"); return output_path
|
|
|
1 |
# core/visual_engine.py
|
2 |
+
from PIL import Image, ImageDraw, ImageFont, ImageOps # Pillow should be >= 10.1.0
|
3 |
+
# --- MONKEY PATCH FOR Image.ANTIALIAS ---
|
4 |
+
# This is applied at module load time.
|
5 |
+
try:
|
6 |
+
if hasattr(Image, 'Resampling') and hasattr(Image.Resampling, 'LANCZOS'): # Pillow 9+
|
7 |
+
if not hasattr(Image, 'ANTIALIAS'):
|
8 |
+
Image.ANTIALIAS = Image.Resampling.LANCZOS
|
9 |
+
print("INFO: Monkey-patched PIL.Image.ANTIALIAS with Image.Resampling.LANCZOS.")
|
10 |
+
elif hasattr(Image, 'LANCZOS'): # Pillow 8 used Image.LANCZOS directly
|
11 |
+
if not hasattr(Image, 'ANTIALIAS'):
|
12 |
+
Image.ANTIALIAS = Image.LANCZOS
|
13 |
+
print("INFO: Monkey-patched PIL.Image.ANTIALIAS with Image.LANCZOS.")
|
14 |
+
except AttributeError:
|
15 |
+
print("WARNING: Could not determine Pillow version or attributes for ANTIALIAS monkey-patch. Video effects might fail.")
|
16 |
+
# --- END MONKEY PATCH ---
|
17 |
+
|
18 |
from moviepy.editor import (ImageClip, concatenate_videoclips, TextClip,
|
19 |
CompositeVideoClip, AudioFileClip)
|
20 |
import moviepy.video.fx.all as vfx
|
21 |
import numpy as np
|
22 |
+
import os
|
23 |
+
import openai
|
24 |
+
import requests
|
25 |
+
import io
|
26 |
+
import time
|
27 |
+
import random
|
28 |
+
import subprocess
|
29 |
+
import logging
|
30 |
|
31 |
logger = logging.getLogger(__name__)
|
32 |
logger.setLevel(logging.INFO)
|
|
|
43 |
|
44 |
|
45 |
class VisualEngine:
|
46 |
+
def __init__(self, output_dir="temp_cinegen_media", default_elevenlabs_voice_id="Rachel"):
|
47 |
self.output_dir = output_dir; os.makedirs(self.output_dir, exist_ok=True)
|
48 |
+
self.font_filename = "arial.ttf"; self.font_path_in_container = f"/usr/local/share/fonts/truetype/mycustomfonts/{self.font_filename}"
|
49 |
+
self.font_size_pil = 20; self.video_overlay_font_size = 30; self.video_overlay_font_color = 'white'; self.video_overlay_font = 'Liberation-Sans-Bold' # Or 'Arial-Bold'
|
50 |
+
try: self.font = ImageFont.truetype(self.font_path_in_container, self.font_size_pil); logger.info(f"Placeholder font loaded: {self.font_path_in_container}.")
|
|
|
51 |
except IOError: logger.warning(f"Placeholder font '{self.font_path_in_container}' fail. Default."); self.font = ImageFont.load_default(); self.font_size_pil = 10
|
52 |
|
53 |
self.openai_api_key = None; self.USE_AI_IMAGE_GENERATION = False
|
54 |
+
self.dalle_model = "dall-e-3"; self.image_size_dalle3 = "1792x1024"; self.video_frame_size = (1280, 720)
|
55 |
+
self.elevenlabs_api_key = None; self.USE_ELEVENLABS = False; self.elevenlabs_client = None
|
56 |
+
self.elevenlabs_voice_id = default_elevenlabs_voice_id
|
|
|
|
|
57 |
if VoiceSettings and ELEVENLABS_CLIENT_IMPORTED:
|
58 |
self.elevenlabs_voice_settings = VoiceSettings(stability=0.60, similarity_boost=0.80, style=0.15, use_speaker_boost=True)
|
59 |
else: self.elevenlabs_voice_settings = None
|
60 |
self.pexels_api_key = None; self.USE_PEXELS = False
|
61 |
logger.info("VisualEngine initialized.")
|
62 |
|
63 |
+
def set_openai_api_key(self,k): self.openai_api_key=k; self.USE_AI_IMAGE_GENERATION=bool(k); logger.info(f"DALL-E {self.dalle_model} {'Ready.' if k else 'Disabled.'}")
|
64 |
+
def set_elevenlabs_api_key(self,api_key, voice_id_from_secret=None):
|
|
|
|
|
|
|
65 |
self.elevenlabs_api_key=api_key
|
66 |
+
if voice_id_from_secret: self.elevenlabs_voice_id = voice_id_from_secret; logger.info(f"ElevenLabs Voice ID set from config: {self.elevenlabs_voice_id}")
|
|
|
|
|
|
|
67 |
if api_key and ELEVENLABS_CLIENT_IMPORTED and ElevenLabsAPIClient:
|
68 |
try:
|
69 |
self.elevenlabs_client = ElevenLabsAPIClient(api_key=api_key)
|
70 |
if self.elevenlabs_client: self.USE_ELEVENLABS=True; logger.info(f"ElevenLabs Client Ready (Voice: {self.elevenlabs_voice_id}).")
|
71 |
else: self.USE_ELEVENLABS=False; logger.warning("ElevenLabs client is None post-init.")
|
72 |
+
except Exception as e: logger.error(f"Error initializing ElevenLabs client: {e}. Disabled.", exc_info=True); self.USE_ELEVENLABS=False; self.elevenlabs_client = None
|
73 |
+
else: self.USE_ELEVENLABS=False; self.elevenlabs_client = None;
|
74 |
+
|
75 |
+
def set_pexels_api_key(self,k): self.pexels_api_key=k; self.USE_PEXELS=bool(k); logger.info(f"Pexels Search {'Ready.' if k else 'Disabled.'}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
76 |
|
77 |
+
def _get_text_dimensions(self,text_content,font_obj): # Same
|
78 |
+
if not text_content: return 0,self.font_size_pil
|
|
|
|
|
79 |
try:
|
80 |
+
if hasattr(font_obj,'getbbox'): bbox=font_obj.getbbox(text_content);w=bbox[2]-bbox[0];h=bbox[3]-bbox[1];return w, h if h > 0 else self.font_size_pil
|
81 |
+
elif hasattr(font_obj,'getsize'): w,h=font_obj.getsize(text_content);return w, h if h > 0 else self.font_size_pil
|
82 |
+
else: return int(len(text_content)*self.font_size_pil*0.6),int(self.font_size_pil*1.2 if self.font_size_pil*1.2>0 else self.font_size_pil)
|
83 |
+
except: return int(len(text_content)*self.font_size_pil*0.6),int(self.font_size_pil*1.2)
|
84 |
|
85 |
+
def _create_placeholder_image_content(self,text_description,filename,size=None): # Same
|
86 |
+
if size is None: size = self.video_frame_size
|
87 |
+
img=Image.new('RGB',size,color=(20,20,40));d=ImageDraw.Draw(img);padding=25;max_w=size[0]-(2*padding);lines=[];
|
88 |
+
if not text_description: text_description="(Placeholder: No prompt text)"
|
89 |
+
words=text_description.split();current_line=""
|
90 |
+
for word in words:
|
91 |
+
test_line=current_line+word+" ";
|
92 |
+
if self._get_text_dimensions(test_line,self.font)[0] <= max_w: current_line=test_line
|
93 |
else:
|
94 |
+
if current_line: lines.append(current_line.strip()); current_line=word+" "
|
95 |
+
if current_line: lines.append(current_line.strip())
|
96 |
+
if not lines: lines.append("(Text error or too long for placeholder)")
|
97 |
_,single_line_h=self._get_text_dimensions("Ay",self.font); single_line_h = single_line_h if single_line_h > 0 else self.font_size_pil + 2
|
98 |
+
max_lines_to_display=min(len(lines),(size[1]-(2*padding))//(single_line_h+2))
|
99 |
+
y_text=padding + (size[1]-(2*padding) - max_lines_to_display*(single_line_h+2))/2.0
|
100 |
+
for i in range(max_lines_to_display):
|
101 |
+
line_content=lines[i];line_w,_=self._get_text_dimensions(line_content,self.font);x_text=(size[0]-line_w)/2.0
|
102 |
+
d.text((x_text,y_text),line_content,font=self.font,fill=(200,200,180));y_text+=single_line_h+2
|
103 |
+
if i==6 and max_lines_to_display > 7: d.text((x_text,y_text),"...",font=self.font,fill=(200,200,180));break
|
104 |
+
filepath=os.path.join(self.output_dir,filename);
|
105 |
+
try:img.save(filepath);return filepath
|
106 |
+
except Exception as e:logger.error(f"Saving placeholder image {filepath}: {e}", exc_info=True);return None
|
107 |
|
108 |
def _search_pexels_image(self, query, output_filename_base): # Same
|
109 |
if not self.USE_PEXELS or not self.pexels_api_key: return None
|
110 |
headers = {"Authorization": self.pexels_api_key}; params = {"query": query, "per_page": 1, "orientation": "landscape", "size": "large"}
|
111 |
pexels_filename = output_filename_base.replace(".png", f"_pexels_{random.randint(1000,9999)}.jpg")
|
112 |
+
filepath = os.path.join(self.output_dir, pexels_filename)
|
113 |
try:
|
114 |
+
logger.info(f"Searching Pexels for: '{query}'"); effective_query = " ".join(query.split()[:5]); params["query"] = effective_query
|
115 |
+
response = requests.get("https://api.pexels.com/v1/search", headers=headers, params=params, timeout=20)
|
116 |
+
response.raise_for_status(); data = response.json()
|
117 |
if data.get("photos") and len(data["photos"]) > 0:
|
118 |
+
photo_url = data["photos"][0]["src"]["large2x"]
|
119 |
+
image_response = requests.get(photo_url, timeout=60); image_response.raise_for_status()
|
120 |
+
img_data = Image.open(io.BytesIO(image_response.content))
|
121 |
+
if img_data.mode != 'RGB': img_data = img_data.convert('RGB')
|
122 |
+
img_data.save(filepath); logger.info(f"Pexels image saved: {filepath}"); return filepath
|
123 |
+
else: logger.info(f"No photos on Pexels for query: '{effective_query}'")
|
124 |
+
except Exception as e: logger.error(f"Pexels search/download for query '{query}': {e}", exc_info=True)
|
125 |
return None
|
126 |
|
127 |
def generate_image_visual(self, image_prompt_text, scene_data, scene_identifier_filename): # Same
|
128 |
+
filepath = os.path.join(self.output_dir, scene_identifier_filename)
|
129 |
if self.USE_AI_IMAGE_GENERATION and self.openai_api_key:
|
130 |
+
max_retries = 2
|
131 |
+
for attempt in range(max_retries):
|
132 |
try:
|
133 |
logger.info(f"Attempt {attempt+1}: DALL-E ({self.dalle_model}) for: {image_prompt_text[:100]}...")
|
134 |
client = openai.OpenAI(api_key=self.openai_api_key, timeout=90.0)
|
135 |
+
response = client.images.generate(model=self.dalle_model, prompt=image_prompt_text, n=1, size=self.image_size_dalle3, quality="hd", response_format="url", style="vivid")
|
136 |
+
image_url = response.data[0].url; revised_prompt = getattr(response.data[0], 'revised_prompt', None)
|
137 |
+
if revised_prompt: logger.info(f"DALL-E 3 revised_prompt: {revised_prompt[:100]}...")
|
138 |
+
image_response = requests.get(image_url, timeout=120); image_response.raise_for_status()
|
139 |
+
img_data = Image.open(io.BytesIO(image_response.content));
|
140 |
+
if img_data.mode != 'RGB': img_data = img_data.convert('RGB')
|
141 |
+
img_data.save(filepath); logger.info(f"AI Image (DALL-E) saved: {filepath}"); return filepath
|
142 |
except openai.RateLimitError as e:
|
143 |
logger.warning(f"OpenAI Rate Limit: {e}. Retrying after {5*(attempt+1)}s..."); time.sleep(5 * (attempt + 1))
|
144 |
+
if attempt == max_retries - 1: logger.error("Max retries for RateLimitError."); break
|
145 |
else: continue
|
146 |
except openai.APIError as e: logger.error(f"OpenAI API Error: {e}"); break
|
147 |
except requests.exceptions.RequestException as e: logger.error(f"Requests Error (DALL-E download): {e}"); break
|
148 |
except Exception as e: logger.error(f"Generic error (DALL-E gen): {e}", exc_info=True); break
|
149 |
+
logger.warning("DALL-E generation failed. Trying Pexels fallback...")
|
150 |
+
pexels_query_text = scene_data.get('pexels_search_query_감독', f"{scene_data.get('emotional_beat','')} {scene_data.get('setting_description','')}")
|
151 |
+
pexels_path = self._search_pexels_image(pexels_query_text, scene_identifier_filename)
|
152 |
+
if pexels_path: return pexels_path
|
153 |
logger.warning("Pexels also failed/disabled. Using placeholder.")
|
154 |
return self._create_placeholder_image_content(f"[AI/Pexels Failed] {image_prompt_text[:100]}...", scene_identifier_filename)
|
155 |
else:
|
156 |
return self._create_placeholder_image_content(image_prompt_text, scene_identifier_filename)
|
157 |
|
158 |
+
def generate_narration_audio(self, text_to_narrate, output_filename="narration_overall.mp3"): # Corrected client call
|
159 |
if not self.USE_ELEVENLABS or not self.elevenlabs_client or not text_to_narrate:
|
160 |
logger.info("ElevenLabs conditions not met. Skipping audio generation.")
|
161 |
return None
|
162 |
|
163 |
audio_filepath = os.path.join(self.output_dir, output_filename)
|
164 |
try:
|
|
|
165 |
logger.info(f"Generating ElevenLabs audio (Voice ID: {self.elevenlabs_voice_id}) for: {text_to_narrate[:70]}...")
|
166 |
|
167 |
+
# Use the client's text_to_speech.stream() method
|
168 |
if hasattr(self.elevenlabs_client, 'text_to_speech') and hasattr(self.elevenlabs_client.text_to_speech, 'stream'):
|
169 |
logger.info("Using elevenlabs_client.text_to_speech.stream()")
|
170 |
audio_data_iterator = self.elevenlabs_client.text_to_speech.stream(
|
171 |
text=text_to_narrate,
|
172 |
+
voice_id=str(self.elevenlabs_voice_id), # Must be string ID for this method
|
173 |
model_id="eleven_multilingual_v2",
|
174 |
+
# voice_settings=self.elevenlabs_voice_settings # Pass VoiceSettings if API supports it here
|
175 |
)
|
176 |
+
# Fallback if .generate() with Voice object is preferred by a slightly different client version
|
177 |
+
elif hasattr(self.elevenlabs_client, 'generate') and Voice and self.elevenlabs_voice_settings:
|
178 |
+
logger.info("Using elevenlabs_client.generate() with Voice object as fallback.")
|
179 |
+
voice_param = Voice(voice_id=str(self.elevenlabs_voice_id), settings=self.elevenlabs_voice_settings)
|
180 |
audio_data_iterator = self.elevenlabs_client.generate(
|
181 |
text=text_to_narrate, voice=voice_param, model="eleven_multilingual_v2")
|
182 |
else:
|
183 |
+
logger.error("No recognized audio generation method found on ElevenLabs client (tried text_to_speech.stream and generate).")
|
184 |
+
return None
|
185 |
|
186 |
with open(audio_filepath, "wb") as f:
|
187 |
for chunk in audio_data_iterator:
|
188 |
if chunk: f.write(chunk)
|
189 |
logger.info(f"ElevenLabs audio saved: {audio_filepath}")
|
190 |
return audio_filepath
|
191 |
+
except AttributeError as ae:
|
192 |
+
logger.error(f"AttributeError with ElevenLabs client: {ae}. SDK method/structure might be different.", exc_info=True)
|
193 |
+
except Exception as e:
|
194 |
+
logger.error(f"Error generating ElevenLabs audio: {e}", exc_info=True)
|
195 |
return None
|
196 |
|
197 |
+
def create_video_from_images(self, image_data_list, overall_narration_path=None, output_filename="final_video.mp4", fps=24, duration_per_image=4.5):
|
198 |
if not image_data_list: logger.warning("No image data for video."); return None
|
199 |
processed_clips=[]; narration_audio_clip=None; final_video_clip_obj=None
|
200 |
logger.info(f"Preparing {len(image_data_list)} clips. Target frame: {self.video_frame_size}. Duration/img: {duration_per_image}s.")
|
201 |
+
|
202 |
for i, data in enumerate(image_data_list):
|
203 |
img_path, scene_num, key_action = data.get('path'), data.get('scene_num', i+1), data.get('key_action', '')
|
204 |
if not (img_path and os.path.exists(img_path)): logger.warning(f"Img not found: {img_path}"); continue
|
205 |
try:
|
206 |
+
pil_img = Image.open(img_path)
|
207 |
if pil_img.mode != 'RGB': pil_img = pil_img.convert('RGB')
|
208 |
+
|
209 |
+
img_copy = pil_img.copy()
|
210 |
+
# Using modern Resampling.LANCZOS
|
211 |
+
img_copy.thumbnail(self.video_frame_size, Image.Resampling.LANCZOS)
|
212 |
+
|
213 |
canvas = Image.new('RGB', self.video_frame_size, (random.randint(0,5), random.randint(0,5), random.randint(0,5)))
|
214 |
+
xo, yo = (self.video_frame_size[0]-img_copy.width)//2, (self.video_frame_size[1]-img_copy.height)//2
|
215 |
+
canvas.paste(img_copy, (xo,yo))
|
216 |
+
frame_np = np.array(canvas)
|
217 |
+
|
218 |
img_clip_base = ImageClip(frame_np).set_duration(duration_per_image)
|
219 |
+
|
220 |
+
# Ken Burns Effect (vfx.resize) - THIS IS THE LINE THAT CAUSED ANTIALIAS ERROR
|
221 |
+
# If ANTIALIAS error persists even with monkey patch and correct Pillow/MoviePy versions,
|
222 |
+
# this specific effect might need to be disabled or re-implemented differently.
|
223 |
+
# For now, we assume the monkey patch or correct versions will fix it.
|
224 |
+
try:
|
225 |
+
end_scale = random.uniform(1.03, 1.08)
|
226 |
+
img_clip = img_clip_base.fx(vfx.resize, lambda t: 1 + (end_scale - 1) * (t / duration_per_image))
|
227 |
+
img_clip = img_clip.set_position('center')
|
228 |
+
except AttributeError as e_alias: # Specifically catch ANTIALIAS here if it happens
|
229 |
+
if 'ANTIALIAS' in str(e_alias):
|
230 |
+
logger.error(f"ANTIALIAS error during vfx.resize for {img_path}. Disabling Ken Burns for this clip. Error: {e_alias}")
|
231 |
+
img_clip = img_clip_base # Use the clip without the resize effect
|
232 |
+
else:
|
233 |
+
raise # Re-raise other AttributeErrors
|
234 |
+
except Exception as e_fx: # Catch other errors from fx
|
235 |
+
logger.error(f"Error applying vfx.resize for {img_path}. Using base clip. Error: {e_fx}")
|
236 |
+
img_clip = img_clip_base
|
237 |
+
|
238 |
+
|
239 |
if key_action:
|
240 |
+
txt_clip = TextClip(f"Scene {scene_num}\n{key_action}", fontsize=self.video_overlay_font_size,
|
241 |
+
color=self.video_overlay_font_color, font=self.video_overlay_font,
|
242 |
+
bg_color='rgba(10,10,20,0.8)', method='caption', align='West',
|
243 |
+
size=(self.video_frame_size[0]*0.9, None), kerning=-1, stroke_color='black', stroke_width=1.5
|
244 |
).set_duration(duration_per_image-1.0).set_start(0.5).set_position(('center',0.92),relative=True)
|
245 |
final_scene_clip = CompositeVideoClip([img_clip, txt_clip], size=self.video_frame_size, use_bgclip=True, bg_color=(0,0,0))
|
246 |
else: final_scene_clip = img_clip
|
247 |
processed_clips.append(final_scene_clip)
|
248 |
except Exception as e: logger.error(f"Creating video clip for {img_path}: {e}", exc_info=True)
|
249 |
+
|
250 |
if not processed_clips: logger.warning("No clips processed for video."); return None
|
251 |
+
|
252 |
transition = 0.75
|
253 |
try:
|
254 |
final_video_clip_obj = concatenate_videoclips(processed_clips, padding=-transition, method="compose")
|
255 |
+
if final_video_clip_obj.duration > transition*2:
|
256 |
+
final_video_clip_obj = final_video_clip_obj.fx(vfx.fadein, transition).fx(vfx.fadeout, transition)
|
257 |
+
|
258 |
if overall_narration_path and os.path.exists(overall_narration_path):
|
259 |
try:
|
260 |
narration_audio_clip = AudioFileClip(overall_narration_path)
|
261 |
+
if narration_audio_clip.duration < final_video_clip_obj.duration:
|
262 |
+
final_video_clip_obj = final_video_clip_obj.subclip(0, narration_audio_clip.duration)
|
263 |
final_video_clip_obj = final_video_clip_obj.set_audio(narration_audio_clip); logger.info("Overall narration added.")
|
264 |
except Exception as e: logger.error(f"Adding overall narration: {e}", exc_info=True)
|
265 |
+
|
266 |
output_path = os.path.join(self.output_dir, output_filename); logger.info(f"Writing final video to: {output_path}")
|
267 |
+
final_video_clip_obj.write_videofile(output_path, fps=fps, codec='libx264', preset='medium',
|
268 |
+
audio_codec='aac',
|
269 |
temp_audiofile=os.path.join(self.output_dir, f'temp-audio-{os.urandom(4).hex()}.m4a'),
|
270 |
remove_temp=True, threads=os.cpu_count() or 2, logger='bar', bitrate="5000k")
|
271 |
logger.info(f"Video successfully created: {output_path}"); return output_path
|