Spaces:
Running
Running
File size: 12,968 Bytes
15a5b8a 68b40f6 15a5b8a 0ed501e b4a28c6 15a5b8a 44d5e00 15a5b8a 68b40f6 e326181 ca63f09 15a5b8a 44d5e00 68b40f6 0ed501e 126385a ca63f09 15a5b8a 44d5e00 15a5b8a 44d5e00 15a5b8a b4a28c6 15a5b8a 68b40f6 15a5b8a 68b40f6 0ed501e 15a5b8a ca63f09 15a5b8a ca63f09 15a5b8a ca63f09 15a5b8a 68b40f6 15a5b8a e326181 68b40f6 e326181 68b40f6 15a5b8a 68b40f6 126385a 15a5b8a 0ed501e 15a5b8a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 |
"""
project @ images_to_video
created @ 2024-12-17
author @ github.com/ishworrsubedii
"""
import os
from moviepy.audio.io.AudioFileClip import AudioFileClip
from moviepy.video.VideoClip import ImageClip, ColorClip, TextClip
from moviepy.video.compositing.CompositeVideoClip import CompositeVideoClip
from moviepy.video.compositing.concatenate import concatenate_videoclips
from moviepy.video.compositing.transitions import slide_in, crossfadein, make_loopable, crossfadeout, slide_out
from moviepy.video.fx.resize import resize
from moviepy.video.io.VideoFileClip import VideoFileClip
class EachVideoCreator:
def __init__(self, necklace_title, backgrounds: list[tuple], nto_title, cto_title, makeup_title,
intro_video_path=None, necklace_image=None,
nto_outputs=None,
nto_cto_outputs=None, makeup_outputs=None, font_path=None, output_path=None,
audio_path=None, image_display_duration=2.5, box_color=(131, 42, 48), box_opacity=0.8,
font_size=28, text_color="white", fps=1, outro_title=None, logo_image=None, address=None,
phone_numbers=None, transition_duration=0.5, transition_type='None', direction='left',
outro_video_path: str = "", skip_necklace_video_portion=False):
self.intro_video_path = intro_video_path
self.necklace_images = necklace_image if necklace_image else []
self.nto_outputs = nto_outputs if nto_outputs else []
self.nto_cto_outputs = nto_cto_outputs if nto_cto_outputs else []
self.makeup_outputs = makeup_outputs if makeup_outputs else []
self.output_video_path = output_path
self.font_path = font_path
self.audio_path = audio_path
self.image_display_duration = image_display_duration
self.box_color = box_color
self.box_opacity = box_opacity
self.font_size = font_size
self.text_color = text_color
self.fps = fps
self.necklace_title = necklace_title
self.nto_title = nto_title
self.cto_title = cto_title
self.makeup_title = makeup_title
self.backgrounds = backgrounds
self.outro_title = outro_title
self.logo_image = logo_image
self.address = address
self.phone_numbers = phone_numbers
self.transition_duration = transition_duration
self.transition_type = transition_type
self.direction = direction
self.outro_video_path = outro_video_path
self.skip_necklace_video_portion = skip_necklace_video_portion
def create_necklace_clips(self, necklace_image, index, label):
if not necklace_image:
print(f"Skipping necklace {index + 1}: No image provided.")
return []
# backgrounds = [
# (245, 245, 245), # Soft White
# (220, 245, 245), # Light Blue
# (230, 230, 235) # Pearl Gray
# ]
necklace_clips = []
for bg_color in self.backgrounds:
bg_clip = ColorClip((1080, 1080), col=bg_color, duration=self.image_display_duration)
necklace = resize(ImageClip(necklace_image), width=500)
necklace = necklace.set_duration(self.image_display_duration).set_position('center')
txt_overlay = self.create_text_overlay(f"{label}")
final_clip = CompositeVideoClip([bg_clip, necklace, txt_overlay.set_position(('center', 'bottom'))])
necklace_clips.append(final_clip)
return necklace_clips
def create_grouped_clips(self, grouped_images, label):
clips = []
for idx, group in enumerate(grouped_images):
for img_path in group:
if os.path.exists(img_path) and img_path.lower().endswith(('.png', '.jpg', '.jpeg')):
print(f"Processing {label} image: {img_path}")
img_clip = resize(ImageClip(img_path), (1080, 1080))
txt_overlay = self.create_text_overlay(f"{label}")
final_clip = CompositeVideoClip([
img_clip.set_duration(self.image_display_duration),
txt_overlay.set_position(('center', 'bottom'))
])
clips.append(final_clip)
return clips
def create_text_overlay(self, text, duration=None):
box = ColorClip((1080, 80), col=self.box_color, duration=duration or self.image_display_duration)
box = box.set_opacity(self.box_opacity)
txt = TextClip(text, font=self.font_path, fontsize=self.font_size, color=self.text_color)
return CompositeVideoClip([box, txt.set_position('center')])
def create_last_clip(self, title, logo_image, address, phone_numbers, font_path):
# Background clip (1080x1080, light gray color)
bg_clip = ColorClip((1080, 1080), col=(245, 245, 245), duration=self.image_display_duration)
# Resize logo to fit well within the 1080x1080 frame
logo = resize(ImageClip(logo_image), width=400)
logo = logo.set_duration(self.image_display_duration)
logo = logo.set_position(lambda t: ('center', 200)) # Place logo near top
# Title overlay text
txt_overlay_title = TextClip(title, fontsize=50, color='black', font=font_path)
txt_overlay_title = txt_overlay_title.set_duration(self.image_display_duration)
txt_overlay_title = txt_overlay_title.set_position(lambda t: ('center', 600)) # Below logo
# Address overlay text
txt_overlay_address = TextClip(address, fontsize=30, color='black', font=font_path)
txt_overlay_address = txt_overlay_address.set_duration(self.image_display_duration)
txt_overlay_address = txt_overlay_address.set_position(lambda t: ('center', 680)) # Below title
# Phone number overlay text
txt_overlay_phone = TextClip(phone_numbers, fontsize=30, color='black', font=font_path)
txt_overlay_phone = txt_overlay_phone.set_duration(self.image_display_duration)
txt_overlay_phone = txt_overlay_phone.set_position(lambda t: ('center', 730)) # Below address
# Combine everything into the final clip
final_clip = CompositeVideoClip([bg_clip, logo, txt_overlay_title, txt_overlay_address, txt_overlay_phone])
return final_clip
def apply_slideout_transition(self, clip, direction='left'):
if direction == 'left':
return slide_in(clip, duration=self.transition_duration, side='right')
return slide_in(clip, duration=self.transition_duration, side='left')
def apply_slidein_transition(self, clip, direction='left'):
if direction == 'left':
return slide_out(clip, duration=self.transition_duration, side='right')
return slide_out(clip, duration=self.transition_duration, side='left')
def apply_loopable_transition(self, clip):
return make_loopable(clip, duration=self.transition_duration)
def apply_crossfadein_transition(self, clip):
return crossfadein(clip, self.transition_duration)
def apply_crossfadeout_transition(self, clip):
return crossfadeout(clip, self.transition_duration)
def create_final_video(self):
try:
print("Starting video creation...")
clips = []
# Step 1: Process Intro Video
if self.intro_video_path and os.path.exists(self.intro_video_path):
print(f"Adding intro video from path: {self.intro_video_path}")
intro_clip = resize(VideoFileClip(self.intro_video_path), (1080, 1080))
clips.append(intro_clip)
else:
print("Skipping intro video: Path not provided or invalid.")
# Step 2: Process Necklaces and Associated Outputs
for idx, necklace_image in enumerate(self.necklace_images):
if self.skip_necklace_video_portion:
pass
else:
print(f"Processing Necklace {idx + 1}...")
# Necklace preview clips
necklace_clips = self.create_necklace_clips(necklace_image, idx, self.necklace_title[idx])
if necklace_clips:
clips.extend(necklace_clips)
else:
print(f"Skipping Necklace {idx + 1} preview: No valid clips created.")
# NTO outputs
if idx < len(self.nto_outputs):
for idj in range(len(self.nto_outputs[idx])):
print(f"Total NTO outputs{len(self.nto_outputs)}")
print(f"Adding NTO outputs for Necklace {idx + 1}")
nto_clips = self.create_grouped_clips([self.nto_outputs[idx]], self.nto_title[idx][idj])
if nto_clips:
clips.extend(nto_clips)
if idx < len(self.nto_cto_outputs):
for idj in range(len(self.nto_cto_outputs[idx])):
print(f"Total CTO outputs{len(self.nto_cto_outputs)}")
print(f"Adding CTO outputs for Necklace {idx + 1}")
cto_clips = self.create_grouped_clips([self.nto_cto_outputs[idx]], self.cto_title[idx][idj])
if cto_clips:
clips.extend(cto_clips)
else:
print(f"No valid CTO clips for Necklace {idx + 1}")
if idx < len(self.makeup_outputs):
for idj in range(len(self.makeup_outputs[idx])):
print(f"Total Makeup outputs{len(self.makeup_outputs)}")
print(f"Adding Makeup outputs for Necklace {idx + 1}")
makeup_clips = self.create_grouped_clips([self.makeup_outputs[idx]],
self.makeup_title[idx][idj])
if makeup_clips:
clips.extend(makeup_clips)
else:
print(f"No valid Makeup clips for Necklace {idx + 1}")
# Step 3: Process Outro Video
if self.outro_video_path and os.path.exists(self.outro_video_path):
print(f"Adding outro video from path: {self.outro_video_path}")
outro_clip = resize(VideoFileClip(self.outro_video_path), (1080, 1080))
clips.append(outro_clip)
final_clips = []
for clip in clips:
clip = clip.set_duration(self.image_display_duration)
try:
if self.transition_type == 'crossfadein':
clip_with_transition = self.apply_crossfadein_transition(clip)
elif self.transition_type == 'crossfadeout':
clip_with_transition = self.apply_crossfadeout_transition(clip)
elif self.transition_type == 'slideout':
clip_with_transition = self.apply_slideout_transition(clip, self.direction)
elif self.transition_type == 'slidein':
clip_with_transition = self.apply_slidein_transition(clip, self.direction)
elif self.transition_type == 'loop':
clip_with_transition = self.apply_loopable_transition(clip)
else:
clip_with_transition = clip
final_clips.append(clip_with_transition)
except Exception as e:
print(f"Error applying transition: {e}")
final_clips.append(clip)
clips = final_clips
if not clips:
print("No valid clips to combine. Exiting.")
return
print(f"Total clips to concatenate: {len(clips)}")
final_video = concatenate_videoclips(clips, method="compose")
if self.audio_path and os.path.exists(self.audio_path):
print(f"Adding background audio from path: {self.audio_path}")
try:
audio = AudioFileClip(self.audio_path).subclip(0, final_video.duration)
final_video = final_video.set_audio(audio)
except Exception as e:
print(f"Error adding audio: {e}")
else:
print("Skipping background audio: Path not provided or invalid.")
print("Rendering final video...")
final_video.write_videofile(
self.output_video_path,
fps=self.fps,
codec="libx264",
audio_codec="aac",
threads=4,
preset="ultrafast"
)
print(f"Video successfully saved to: {self.output_video_path}")
except Exception as e:
print(f"An error occurred during video creation: {e}")
|