Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -69,13 +69,14 @@ def resize_image_to_bucket(image: Union[Image.Image, np.ndarray], bucket_reso: T
|
|
69 |
"""
|
70 |
Resize the image to the bucket resolution.
|
71 |
"""
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
|
|
77 |
if bucket_reso == (image_width, image_height):
|
78 |
-
return
|
79 |
bucket_width, bucket_height = bucket_reso
|
80 |
scale_width = bucket_width / image_width
|
81 |
scale_height = bucket_height / image_height
|
@@ -83,32 +84,31 @@ def resize_image_to_bucket(image: Union[Image.Image, np.ndarray], bucket_reso: T
|
|
83 |
image_width = int(image_width * scale + 0.5)
|
84 |
image_height = int(image_height * scale + 0.5)
|
85 |
if scale > 1:
|
86 |
-
image = Image.fromarray(image)
|
87 |
image = image.resize((image_width, image_height), Image.LANCZOS)
|
88 |
image = np.array(image)
|
89 |
else:
|
90 |
-
image = np.array(image) if is_pil_image else image
|
91 |
image = cv2.resize(image, (image_width, image_height), interpolation=cv2.INTER_AREA)
|
92 |
# crop the image to the bucket resolution
|
93 |
crop_left = (image_width - bucket_width) // 2
|
94 |
crop_top = (image_height - bucket_height) // 2
|
95 |
-
image = image[crop_top
|
96 |
return image
|
97 |
|
98 |
|
99 |
def generate_video(pipe, prompt: str, frame1: Image.Image, frame2: Image.Image, guidance_scale: float, num_frames: int, num_inference_steps: int) -> bytes:
|
100 |
-
#
|
101 |
-
|
102 |
-
|
103 |
|
104 |
# Load and preprocess frames
|
105 |
-
cond_frame1 = frame1
|
106 |
-
cond_frame2 = frame2
|
107 |
height, width = 720, 1280
|
108 |
cond_frame1 = resize_image_to_bucket(cond_frame1, bucket_reso=(width, height))
|
109 |
cond_frame2 = resize_image_to_bucket(cond_frame2, bucket_reso=(width, height))
|
110 |
cond_video = np.zeros(shape=(num_frames, height, width, 3))
|
111 |
-
cond_video[0], cond_video[-1] =
|
112 |
cond_video = torch.from_numpy(cond_video.copy()).permute(0, 3, 1, 2)
|
113 |
cond_video = torch.stack([video_transforms(x) for x in cond_video], dim=0).unsqueeze(0)
|
114 |
with torch.no_grad():
|
@@ -324,7 +324,7 @@ def main():
|
|
324 |
)
|
325 |
|
326 |
# Launch the Gradio app
|
327 |
-
iface.launch()
|
328 |
|
329 |
if __name__ == "__main__":
|
330 |
main()
|
|
|
69 |
"""
|
70 |
Resize the image to the bucket resolution.
|
71 |
"""
|
72 |
+
if isinstance(image, Image.Image):
|
73 |
+
image = np.array(image)
|
74 |
+
elif not isinstance(image, np.ndarray):
|
75 |
+
raise ValueError("Image must be a PIL Image or NumPy array")
|
76 |
+
|
77 |
+
image_height, image_width = image.shape[:2]
|
78 |
if bucket_reso == (image_width, image_height):
|
79 |
+
return image
|
80 |
bucket_width, bucket_height = bucket_reso
|
81 |
scale_width = bucket_width / image_width
|
82 |
scale_height = bucket_height / image_height
|
|
|
84 |
image_width = int(image_width * scale + 0.5)
|
85 |
image_height = int(image_height * scale + 0.5)
|
86 |
if scale > 1:
|
87 |
+
image = Image.fromarray(image)
|
88 |
image = image.resize((image_width, image_height), Image.LANCZOS)
|
89 |
image = np.array(image)
|
90 |
else:
|
|
|
91 |
image = cv2.resize(image, (image_width, image_height), interpolation=cv2.INTER_AREA)
|
92 |
# crop the image to the bucket resolution
|
93 |
crop_left = (image_width - bucket_width) // 2
|
94 |
crop_top = (image_height - bucket_height) // 2
|
95 |
+
image = image[crop_top:crop_top + bucket_height, crop_left:crop_left + bucket_width]
|
96 |
return image
|
97 |
|
98 |
|
99 |
def generate_video(pipe, prompt: str, frame1: Image.Image, frame2: Image.Image, guidance_scale: float, num_frames: int, num_inference_steps: int) -> bytes:
|
100 |
+
# Debugging print statements
|
101 |
+
print(f"Frame 1 Type: {type(frame1)}")
|
102 |
+
print(f"Frame 2 Type: {type(frame2)}")
|
103 |
|
104 |
# Load and preprocess frames
|
105 |
+
cond_frame1 = np.array(frame1)
|
106 |
+
cond_frame2 = np.array(frame2)
|
107 |
height, width = 720, 1280
|
108 |
cond_frame1 = resize_image_to_bucket(cond_frame1, bucket_reso=(width, height))
|
109 |
cond_frame2 = resize_image_to_bucket(cond_frame2, bucket_reso=(width, height))
|
110 |
cond_video = np.zeros(shape=(num_frames, height, width, 3))
|
111 |
+
cond_video[0], cond_video[-1] = cond_frame1, cond_frame2
|
112 |
cond_video = torch.from_numpy(cond_video.copy()).permute(0, 3, 1, 2)
|
113 |
cond_video = torch.stack([video_transforms(x) for x in cond_video], dim=0).unsqueeze(0)
|
114 |
with torch.no_grad():
|
|
|
324 |
)
|
325 |
|
326 |
# Launch the Gradio app
|
327 |
+
iface.launch(show_error=True)
|
328 |
|
329 |
if __name__ == "__main__":
|
330 |
main()
|