Update worker_runpod.py
Browse files- worker_runpod.py +5 -12
worker_runpod.py
CHANGED
@@ -130,17 +130,11 @@ def generate(input):
|
|
130 |
closest_size, closest_ratio = get_closest_ratio(original_height, original_width, ratios=aspect_ratio_sample_size)
|
131 |
height, width = [int(x / 16) * 16 for x in closest_size]
|
132 |
sample_size = [height, width]
|
133 |
-
if
|
134 |
-
|
135 |
-
# ... (existing logic for partial video generation)
|
136 |
-
else:
|
137 |
-
# Standard video generation
|
138 |
-
video_length = int((video_length - 1) // vae.config.temporal_compression_ratio * vae.config.temporal_compression_ratio) + 1 if video_length != 1 else 1
|
139 |
-
input_video, input_video_mask, clip_image = get_image_to_video_latent(downloaded_image_path, validation_image_end, video_length=video_length, sample_size=sample_size)
|
140 |
|
141 |
-
|
142 |
-
|
143 |
-
prompt=prompt,
|
144 |
num_frames=video_length,
|
145 |
negative_prompt=negative_prompt,
|
146 |
height=sample_size[0],
|
@@ -149,8 +143,7 @@ def generate(input):
|
|
149 |
guidance_scale=guidance_scale,
|
150 |
num_inference_steps=num_inference_steps,
|
151 |
video=input_video,
|
152 |
-
mask_video=input_video_mask
|
153 |
-
).videos
|
154 |
|
155 |
if not os.path.exists(save_path):
|
156 |
os.makedirs(save_path, exist_ok=True)
|
|
|
130 |
closest_size, closest_ratio = get_closest_ratio(original_height, original_width, ratios=aspect_ratio_sample_size)
|
131 |
height, width = [int(x / 16) * 16 for x in closest_size]
|
132 |
sample_size = [height, width]
|
133 |
+
video_length = int((video_length - 1) // vae.config.temporal_compression_ratio * vae.config.temporal_compression_ratio) + 1 if video_length != 1 else 1
|
134 |
+
input_video, input_video_mask, clip_image = get_image_to_video_latent(downloaded_image_path, validation_image_end, video_length=video_length, sample_size=sample_size)
|
|
|
|
|
|
|
|
|
|
|
135 |
|
136 |
+
with torch.no_grad():
|
137 |
+
sample = pipeline(prompt=prompt,
|
|
|
138 |
num_frames=video_length,
|
139 |
negative_prompt=negative_prompt,
|
140 |
height=sample_size[0],
|
|
|
143 |
guidance_scale=guidance_scale,
|
144 |
num_inference_steps=num_inference_steps,
|
145 |
video=input_video,
|
146 |
+
mask_video=input_video_mask).videos
|
|
|
147 |
|
148 |
if not os.path.exists(save_path):
|
149 |
os.makedirs(save_path, exist_ok=True)
|