Spaces:
Running
Running
nalin0503
commited on
Commit
·
963b1a5
1
Parent(s):
9f0a675
Upload code from latest repo
Browse files- FILM.py +102 -0
- Image-Morpher +1 -1
- Makefile +51 -0
- app.py +4 -1
- metamorphLogo_nobg.png +0 -0
- requirements.txt +4 -1
- run_morphing.py +248 -0
FILM.py
ADDED
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
FILM-based Video Frame Interpolation
|
3 |
+
|
4 |
+
This script uses the FILM (Frame Interpolation for Large Motion) model to generate
|
5 |
+
smooth transitions between keyframes, creating a high-quality interpolated video.
|
6 |
+
It takes a series of PNG images as input and produces an MP4 video as output.
|
7 |
+
|
8 |
+
The script performs the following main steps:
|
9 |
+
1. Loads keyframes from a specified input folder
|
10 |
+
2. Preprocesses the images
|
11 |
+
3. Applies recursive frame interpolation using the FILM model
|
12 |
+
4. Generates a video from the interpolated frames
|
13 |
+
5. Saves the output video with a unique timestamp
|
14 |
+
|
15 |
+
Usage:
|
16 |
+
Set the 'input_folder' to the directory containing your PNG keyframes
|
17 |
+
Set the 'output_folder' to the desired location for the generated video
|
18 |
+
Adjust 'fps' and 'num_recursions' parameters as needed
|
19 |
+
"""
|
20 |
+
import os
|
21 |
+
import tensorflow as tf
|
22 |
+
import tensorflow_hub as hub
|
23 |
+
import cv2
|
24 |
+
import numpy as np
|
25 |
+
from glob import glob
|
26 |
+
from datetime import datetime
|
27 |
+
import time
|
28 |
+
|
29 |
+
# Load the FILM model
|
30 |
+
model = hub.load('https://tfhub.dev/google/film/1')
|
31 |
+
|
32 |
+
def preprocess_image(image_path):
|
33 |
+
"""Load and preprocess an image for the FILM model."""
|
34 |
+
img = tf.io.read_file(image_path)
|
35 |
+
img = tf.image.decode_png(img, channels=3) # remove alpha transparency
|
36 |
+
img = tf.image.convert_image_dtype(img, tf.float32)
|
37 |
+
return img
|
38 |
+
|
39 |
+
class Interpolator:
|
40 |
+
"""Wrapper class for the FILM model to perform frame interpolation."""
|
41 |
+
def __init__(self, align=64):
|
42 |
+
self._model = model
|
43 |
+
self._align = align
|
44 |
+
|
45 |
+
def __call__(self, x0, x1, dt):
|
46 |
+
"""Interpolate between two frames at a given time step."""
|
47 |
+
inputs = {'x0': x0, 'x1': x1, 'time': dt[..., np.newaxis]} # Prepare input- 2 frames and timestamp
|
48 |
+
result = self._model(inputs, training=False) # FILM call for interpolated frame
|
49 |
+
return result['image'].numpy()
|
50 |
+
|
51 |
+
def _recursive_generator(frame1, frame2, num_recursions, interpolator):
|
52 |
+
"""Recursively generate interpolated frames between two input frames."""
|
53 |
+
if num_recursions == 0:
|
54 |
+
yield frame1 # exit condition
|
55 |
+
else:
|
56 |
+
time = np.full(shape=(1,), fill_value=0.5, dtype=np.float32)
|
57 |
+
mid_frame = interpolator(
|
58 |
+
np.expand_dims(frame1, axis=0), np.expand_dims(frame2, axis=0), time)[0]
|
59 |
+
yield from _recursive_generator(frame1, mid_frame, num_recursions - 1, interpolator) # 1st half
|
60 |
+
yield from _recursive_generator(mid_frame, frame2, num_recursions - 1, interpolator) # 2nd half
|
61 |
+
|
62 |
+
def interpolate_recursively(frames, num_recursions, interpolator):
|
63 |
+
"""Apply recursive interpolation to a list of input frames."""
|
64 |
+
n = len(frames)
|
65 |
+
for i in range(1, n):
|
66 |
+
yield from _recursive_generator(frames[i - 1], frames[i], num_recursions, interpolator)
|
67 |
+
yield frames[-1]
|
68 |
+
|
69 |
+
def process_keyframes(input_folder, output_folder, fps=30, num_recursions=3):
|
70 |
+
"""Process keyframes to create an interpolated video, using functions above"""
|
71 |
+
keyframes = sorted(glob(os.path.join(input_folder, '*.png')))
|
72 |
+
frames = [preprocess_image(frame).numpy() for frame in keyframes]
|
73 |
+
|
74 |
+
interpolator = Interpolator()
|
75 |
+
interpolated_frames = list(interpolate_recursively(frames, num_recursions, interpolator))
|
76 |
+
|
77 |
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") # For unique output..
|
78 |
+
output_video = os.path.join(output_folder, f'output_video_{timestamp}.mp4')
|
79 |
+
|
80 |
+
# Set up for fusing into a morphing video
|
81 |
+
first_frame = cv2.imread(keyframes[0])
|
82 |
+
height, width, _ = first_frame.shape
|
83 |
+
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
84 |
+
out = cv2.VideoWriter(output_video, fourcc, fps, (width, height))
|
85 |
+
|
86 |
+
for frame in interpolated_frames:
|
87 |
+
frame_bgr = cv2.cvtColor((frame * 255).astype(np.uint8), cv2.COLOR_RGB2BGR)
|
88 |
+
out.write(frame_bgr) # writes
|
89 |
+
|
90 |
+
out.release()
|
91 |
+
print(f'Video created with {len(interpolated_frames)} frames: {output_video}')
|
92 |
+
|
93 |
+
# Usage
|
94 |
+
# input_folder = 'sample_keyframes'
|
95 |
+
# output_folder = 'FILM_Results'
|
96 |
+
|
97 |
+
# start_time = time.time()
|
98 |
+
# process_keyframes(input_folder, output_folder, fps=30, num_recursions=3)
|
99 |
+
# end_time = time.time()
|
100 |
+
|
101 |
+
# total_execution_time = end_time - start_time
|
102 |
+
# print(f'Total script execution time: {total_execution_time:.2f} seconds')
|
Image-Morpher
CHANGED
@@ -1 +1 @@
|
|
1 |
-
Subproject commit
|
|
|
1 |
+
Subproject commit 678bc8264fd301eb17cd1150ee25d44cd5f3dd6b
|
Makefile
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Variables
|
2 |
+
PYTHON = python # or python3
|
3 |
+
MAIN_SCRIPT = run_morphing.py
|
4 |
+
OUTPUT_DIR = ./results
|
5 |
+
FILM_OUTPUT = ./FILM_Results
|
6 |
+
|
7 |
+
# Default target: Run the entire morphing pipeline
|
8 |
+
all: submodule morph
|
9 |
+
|
10 |
+
# Ensure the DiffMorpher submodule is initialized and updated
|
11 |
+
submodule:
|
12 |
+
@echo "Initializing and updating submodules..."
|
13 |
+
git submodule init
|
14 |
+
git submodule sync
|
15 |
+
git submodule update --remote
|
16 |
+
|
17 |
+
|
18 |
+
# Initialize environment by installing dependencies and (optionally) submodules
|
19 |
+
init: submodule
|
20 |
+
@echo "Installing required Python packages..."
|
21 |
+
$(PYTHON) -m pip install -r requirements.txt
|
22 |
+
|
23 |
+
# Run morphing pipeline with FILM
|
24 |
+
# (TODO - configure CLI run command to be able to run make morph FRAMES=30 FILM=true)
|
25 |
+
morph:
|
26 |
+
$(PYTHON) $(MAIN_SCRIPT) \
|
27 |
+
--image_path_0 ./assets/Trump.jpg \
|
28 |
+
--prompt_0 "A photo of an American man" \
|
29 |
+
--image_path_1 ./assets/Biden.jpg \
|
30 |
+
--prompt_1 "A photo of an American man" \
|
31 |
+
--output_path $(OUTPUT_DIR) \
|
32 |
+
--use_adain \
|
33 |
+
--use_reschedule \
|
34 |
+
--save_inter \
|
35 |
+
--num_frames 16 \
|
36 |
+
--duration 100 \
|
37 |
+
--use_film \
|
38 |
+
--film_fps 30 \
|
39 |
+
--film_num_recursions 3
|
40 |
+
|
41 |
+
### Add more options here later, make install, make init (install reqs)
|
42 |
+
|
43 |
+
# Help message
|
44 |
+
help:
|
45 |
+
@echo "Makefile for Image Morphing Project"
|
46 |
+
@echo "Available commands:"
|
47 |
+
@echo " make - Initialize submodules and run the entire morphing pipeline"
|
48 |
+
@echo " make init - Install dependencies and set up the environment"
|
49 |
+
@echo " make submodule - Initialize and update Git submodules"
|
50 |
+
@echo " make morph - Run the morphing pipeline with FILM"
|
51 |
+
@echo " make help - Show this help message"
|
app.py
CHANGED
@@ -1,3 +1,6 @@
|
|
|
|
|
|
|
|
1 |
import os
|
2 |
import sys
|
3 |
import subprocess
|
@@ -114,7 +117,7 @@ def main():
|
|
114 |
)
|
115 |
|
116 |
# ---------------- HEADER & LOGO ----------------
|
117 |
-
logo_path =
|
118 |
if os.path.exists(logo_path):
|
119 |
try:
|
120 |
logo = Image.open(logo_path)
|
|
|
1 |
+
"""
|
2 |
+
Cleaned up version, Close-to-Final UI features and functionality logic.
|
3 |
+
"""
|
4 |
import os
|
5 |
import sys
|
6 |
import subprocess
|
|
|
117 |
)
|
118 |
|
119 |
# ---------------- HEADER & LOGO ----------------
|
120 |
+
logo_path = "metamorphLogo_nobg.png"
|
121 |
if os.path.exists(logo_path):
|
122 |
try:
|
123 |
logo = Image.open(logo_path)
|
metamorphLogo_nobg.png
ADDED
![]() |
requirements.txt
CHANGED
@@ -13,4 +13,7 @@ transformers==4.34.1
|
|
13 |
torch
|
14 |
torchvision
|
15 |
lpips
|
16 |
-
# peft
|
|
|
|
|
|
|
|
13 |
torch
|
14 |
torchvision
|
15 |
lpips
|
16 |
+
# peft
|
17 |
+
tensorflow==2.18.0
|
18 |
+
tensorflow_hub==0.16.1
|
19 |
+
|
run_morphing.py
ADDED
@@ -0,0 +1,248 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# T.B.C.
|
2 |
+
|
3 |
+
import os
|
4 |
+
import sys
|
5 |
+
import time
|
6 |
+
import subprocess
|
7 |
+
import argparse
|
8 |
+
|
9 |
+
from FILM import process_keyframes
|
10 |
+
|
11 |
+
def parse_arguments():
|
12 |
+
parser = argparse.ArgumentParser(
|
13 |
+
description="Orchestrate DiffMorpher || LCM-LoRa || LCM, and FILM for smooth morphing between two images.")
|
14 |
+
|
15 |
+
# ------------------- KEYFRAME METHOD SELECTION -------------------
|
16 |
+
# TODO multiple methods not supported yet
|
17 |
+
# parser.add_argument(
|
18 |
+
# "--keyframe_method", type=str, default="diffmorpher",
|
19 |
+
# choices=["diffmorpher", "lcm_lora", "diffmorpher_lcm"],
|
20 |
+
# help="Choose which method to use for keyframe generation."
|
21 |
+
# )
|
22 |
+
# i think this would be to select the models. so theres
|
23 |
+
# stable diffusion 2-1 (without lcm)
|
24 |
+
# stable diffusion 1-5 (has lcm support)
|
25 |
+
# dreamshaper 7, fine-tuned 1-5, more glam, (has lcm support)
|
26 |
+
|
27 |
+
# ------------------- DIFFMORPHER ARGS -------------------
|
28 |
+
parser.add_argument(
|
29 |
+
"--model_path", type=str, default="stabilityai/stable-diffusion-2-1-base", # yeah this needs to be chnaged TODO
|
30 |
+
help="Pretrained model to use for DiffMorpher (default: %(default)s)"
|
31 |
+
)
|
32 |
+
parser.add_argument(
|
33 |
+
"--image_path_0", type=str, required=True,
|
34 |
+
help="Path of the first image"
|
35 |
+
)
|
36 |
+
parser.add_argument(
|
37 |
+
"--prompt_0", type=str, default="",
|
38 |
+
help="Prompt describing the first image (default: %(default)s)"
|
39 |
+
)
|
40 |
+
parser.add_argument(
|
41 |
+
"--image_path_1", type=str, required=True,
|
42 |
+
help="Path of the second image"
|
43 |
+
)
|
44 |
+
parser.add_argument(
|
45 |
+
"--prompt_1", type=str, default="",
|
46 |
+
help="Prompt describing the second image (default: %(default)s)"
|
47 |
+
)
|
48 |
+
parser.add_argument(
|
49 |
+
"--output_path", type=str, default="./results",
|
50 |
+
help="Output folder for DiffMorpher keyframes/gif (default: %(default)s)"
|
51 |
+
)
|
52 |
+
parser.add_argument(
|
53 |
+
"--save_lora_dir", type=str, default="./lora",
|
54 |
+
help="Directory to save LoRA outputs (default: %(default)s)"
|
55 |
+
)
|
56 |
+
parser.add_argument(
|
57 |
+
"--load_lora_path_0", type=str, default="",
|
58 |
+
help="Path to LoRA checkpoint for image 0 (default: %(default)s)"
|
59 |
+
)
|
60 |
+
parser.add_argument(
|
61 |
+
"--load_lora_path_1", type=str, default="",
|
62 |
+
help="Path to LoRA checkpoint for image 1 (default: %(default)s)"
|
63 |
+
)
|
64 |
+
parser.add_argument(
|
65 |
+
"--use_adain", action="store_true",
|
66 |
+
help="Use AdaIN in DiffMorpher pipeline"
|
67 |
+
)
|
68 |
+
parser.add_argument(
|
69 |
+
"--use_reschedule", action="store_true",
|
70 |
+
help="Use reschedule sampling in DiffMorpher"
|
71 |
+
)
|
72 |
+
parser.add_argument(
|
73 |
+
"--lamb", type=float, default=0.6,
|
74 |
+
help="Lambda for self-attention replacement in DiffMorpher (default: %(default)s)"
|
75 |
+
)
|
76 |
+
parser.add_argument(
|
77 |
+
"--fix_lora_value", type=float, default=None,
|
78 |
+
help="Fix LoRA value in DiffMorpher (default: LoRA interpolation)"
|
79 |
+
)
|
80 |
+
parser.add_argument(
|
81 |
+
"--save_inter", action="store_true",
|
82 |
+
help="Save intermediate frames as individual images (e.g. .png) in DiffMorpher"
|
83 |
+
)
|
84 |
+
parser.add_argument(
|
85 |
+
"--num_frames", type=int, default=16,
|
86 |
+
help="Number of keyframes to generate (default: %(default)s)"
|
87 |
+
)
|
88 |
+
parser.add_argument(
|
89 |
+
"--duration", type=int, default=100,
|
90 |
+
help="Duration of each keyframe in the final .gif (default: %(default)s ms)"
|
91 |
+
)
|
92 |
+
parser.add_argument(
|
93 |
+
"--no_lora", action="store_true",
|
94 |
+
help="Disable LoRA usage in DiffMorpher"
|
95 |
+
)
|
96 |
+
|
97 |
+
# ------------------- FILM ARGS -------------------
|
98 |
+
parser.add_argument(
|
99 |
+
"--use_film", action="store_true",
|
100 |
+
help="Flag to indicate whether to run FILM after generating keyframes"
|
101 |
+
)
|
102 |
+
parser.add_argument(
|
103 |
+
"--film_input_folder", type=str, default="",
|
104 |
+
help="Folder containing keyframes for FILM. If empty, will use DiffMorpher output folder."
|
105 |
+
)
|
106 |
+
parser.add_argument(
|
107 |
+
"--film_output_folder", type=str, default="./FILM_Results",
|
108 |
+
help="Folder where FILM’s final interpolated video is saved (default: %(default)s)"
|
109 |
+
)
|
110 |
+
parser.add_argument(
|
111 |
+
"--film_fps", type=int, default=40,
|
112 |
+
help="FPS for the final video - 'Pseudo-Playback-Speed', since total frames are same (default: %(default)s)"
|
113 |
+
)
|
114 |
+
parser.add_argument(
|
115 |
+
"--film_num_recursions", type=int, default=3,
|
116 |
+
help="Number of recursive interpolations to perform in FILM (default: %(default)s)"
|
117 |
+
)
|
118 |
+
|
119 |
+
return parser.parse_args()
|
120 |
+
|
121 |
+
def run_diffmorpher(args):
|
122 |
+
"""
|
123 |
+
Calls DiffMorpher's main.py via subprocess using the CLI arguments.
|
124 |
+
Expects `DiffMorpher/` to be a submodule in the current repo.
|
125 |
+
"""
|
126 |
+
diffmorpher_script = os.path.join("DiffMorpher", "main.py")
|
127 |
+
|
128 |
+
cmd = [
|
129 |
+
sys.executable, diffmorpher_script,
|
130 |
+
"--model_path", args.model_path,
|
131 |
+
"--image_path_0", args.image_path_0,
|
132 |
+
"--prompt_0", args.prompt_0,
|
133 |
+
"--image_path_1", args.image_path_1,
|
134 |
+
"--prompt_1", args.prompt_1,
|
135 |
+
"--output_path", args.output_path,
|
136 |
+
"--save_lora_dir", args.save_lora_dir,
|
137 |
+
"--lamb", str(args.lamb),
|
138 |
+
"--num_frames", str(args.num_frames),
|
139 |
+
"--duration", str(args.duration),
|
140 |
+
]
|
141 |
+
|
142 |
+
if args.load_lora_path_0:
|
143 |
+
cmd += ["--load_lora_path_0", args.load_lora_path_0]
|
144 |
+
if args.load_lora_path_1:
|
145 |
+
cmd += ["--load_lora_path_1", args.load_lora_path_1]
|
146 |
+
if args.use_adain:
|
147 |
+
cmd.append("--use_adain")
|
148 |
+
if args.use_reschedule:
|
149 |
+
cmd.append("--use_reschedule")
|
150 |
+
if args.fix_lora_value is not None:
|
151 |
+
cmd += ["--fix_lora_value", str(args.fix_lora_value)]
|
152 |
+
if args.save_inter:
|
153 |
+
cmd.append("--save_inter")
|
154 |
+
if args.no_lora:
|
155 |
+
cmd.append("--no_lora")
|
156 |
+
|
157 |
+
print("[INFO] Running DiffMorpher with command:")
|
158 |
+
print(" ".join(cmd))
|
159 |
+
|
160 |
+
start = time.time()
|
161 |
+
subprocess.run(cmd, check=True)
|
162 |
+
end = time.time()
|
163 |
+
print(f"[INFO] DiffMorpher completed in {end - start:.2f} seconds.")
|
164 |
+
|
165 |
+
def run_lcm_lora(args):
|
166 |
+
"""
|
167 |
+
Run the LCM-LoRA accelerated Keyframe generator
|
168 |
+
|
169 |
+
"""
|
170 |
+
# THIS would be a flag passed to diffmorpher instead.
|
171 |
+
pass
|
172 |
+
|
173 |
+
|
174 |
+
def create_simple_video_from_keyframes(keyframes_folder, output_folder, fps=40):
|
175 |
+
"""
|
176 |
+
If the user does NOT want FILM, we still make a basic video from keyframes.
|
177 |
+
Assumes frames are saved as .png or .jpg in keyframes_folder.
|
178 |
+
"""
|
179 |
+
import cv2
|
180 |
+
from glob import glob
|
181 |
+
import os
|
182 |
+
from datetime import datetime
|
183 |
+
|
184 |
+
os.makedirs(output_folder, exist_ok=True)
|
185 |
+
|
186 |
+
images = sorted(glob(os.path.join(keyframes_folder, "*.png")))
|
187 |
+
if not images:
|
188 |
+
images = sorted(glob(os.path.join(keyframes_folder, "*.jpg")))
|
189 |
+
if not images:
|
190 |
+
print(f"[WARN] No .png or .jpg frames found in {keyframes_folder}.")
|
191 |
+
return
|
192 |
+
|
193 |
+
# Prepare video writer
|
194 |
+
first_frame = cv2.imread(images[0])
|
195 |
+
height, width, _ = first_frame.shape
|
196 |
+
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
197 |
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
198 |
+
out_video_path = os.path.join(output_folder, f"simple_morph_{timestamp}.mp4")
|
199 |
+
out = cv2.VideoWriter(out_video_path, fourcc, fps, (width, height))
|
200 |
+
|
201 |
+
for img_path in images:
|
202 |
+
frame = cv2.imread(img_path)
|
203 |
+
out.write(frame)
|
204 |
+
|
205 |
+
out.release()
|
206 |
+
print(f"[INFO] Basic morphing video saved at: {out_video_path}")
|
207 |
+
|
208 |
+
def main():
|
209 |
+
args = parse_arguments()
|
210 |
+
overall_start_time = time.time()
|
211 |
+
|
212 |
+
# 1) Run DiffMorpher to generate keyframes
|
213 |
+
run_diffmorpher(args)
|
214 |
+
|
215 |
+
# 2) Determine the folder containing the keyframes
|
216 |
+
# If user didn’t explicitly give `--film_input_folder`, use `args.output_path`
|
217 |
+
keyframes_folder = args.film_input_folder if args.film_input_folder else args.output_path
|
218 |
+
|
219 |
+
# 3) If user wants to use FILM, perform high-quality interpolation on the keyframes
|
220 |
+
if args.use_film:
|
221 |
+
print("[INFO] Running FILM to enhance the keyframes...")
|
222 |
+
|
223 |
+
start_film_time = time.time()
|
224 |
+
# from FILM.py:
|
225 |
+
process_keyframes(
|
226 |
+
input_folder=keyframes_folder,
|
227 |
+
output_folder=args.film_output_folder,
|
228 |
+
fps=args.film_fps,
|
229 |
+
num_recursions=args.film_num_recursions
|
230 |
+
)
|
231 |
+
end_film_time = time.time()
|
232 |
+
print(f"[INFO] FILM interpolation completed in {end_film_time - start_film_time:.2f} seconds.")
|
233 |
+
|
234 |
+
else:
|
235 |
+
# 4) If user does NOT want FILM, create a simple .mp4 from the keyframes
|
236 |
+
print("[INFO] Skipping FILM interpolation. Creating a basic video from DiffMorpher keyframes...")
|
237 |
+
create_simple_video_from_keyframes(
|
238 |
+
keyframes_folder=keyframes_folder,
|
239 |
+
output_folder=args.film_output_folder,
|
240 |
+
fps=args.film_fps
|
241 |
+
)
|
242 |
+
|
243 |
+
# 5) Print total execution time
|
244 |
+
overall_end_time = time.time()
|
245 |
+
print(f"[INFO] Entire pipeline completed in {overall_end_time - overall_start_time:.2f} seconds.")
|
246 |
+
|
247 |
+
if __name__ == "__main__":
|
248 |
+
main()
|