Spaces:
Running
on
Zero
Running
on
Zero
slothfulxtx
commited on
Commit
·
36f5cb8
1
Parent(s):
dd86648
fix input range bug sent to moge
Browse files- app.py +1 -1
- geometrycrafter/determ_ppl.py +1 -1
- geometrycrafter/diff_ppl.py +1 -1
app.py
CHANGED
@@ -137,7 +137,7 @@ def infer_geometry(
|
|
137 |
|
138 |
frames, height, width, fps = read_video_frames(video, process_length, max_res)
|
139 |
aspect_ratio = width / height
|
140 |
-
assert 0.5 <= aspect_ratio and aspect_ratio <= 2.0
|
141 |
frames_tensor = torch.tensor(frames.astype("float32"), device='cuda').float().permute(0, 3, 1, 2)
|
142 |
window_size = min(window_size, len(frames))
|
143 |
if window_size == len(frames):
|
|
|
137 |
|
138 |
frames, height, width, fps = read_video_frames(video, process_length, max_res)
|
139 |
aspect_ratio = width / height
|
140 |
+
assert 0.5 <= aspect_ratio and aspect_ratio <= 2.0, "Error! The aspect ratio of video must fall in range [0.5, 2]."
|
141 |
frames_tensor = torch.tensor(frames.astype("float32"), device='cuda').float().permute(0, 3, 1, 2)
|
142 |
window_size = min(window_size, len(frames))
|
143 |
if window_size == len(frames):
|
geometrycrafter/determ_ppl.py
CHANGED
@@ -109,7 +109,7 @@ class GeometryCrafterDetermPipeline(StableVideoDiffusionPipeline):
|
|
109 |
@torch.inference_mode()
|
110 |
def produce_priors(self, prior_model, frame, chunk_size=8):
|
111 |
T, _, H, W = frame.shape
|
112 |
-
frame = (frame + 1) / 2
|
113 |
pred_point_maps = []
|
114 |
pred_masks = []
|
115 |
for i in range(0, len(frame), chunk_size):
|
|
|
109 |
@torch.inference_mode()
|
110 |
def produce_priors(self, prior_model, frame, chunk_size=8):
|
111 |
T, _, H, W = frame.shape
|
112 |
+
# frame = (frame + 1) / 2
|
113 |
pred_point_maps = []
|
114 |
pred_masks = []
|
115 |
for i in range(0, len(frame), chunk_size):
|
geometrycrafter/diff_ppl.py
CHANGED
@@ -109,7 +109,7 @@ class GeometryCrafterDiffPipeline(StableVideoDiffusionPipeline):
|
|
109 |
@torch.inference_mode()
|
110 |
def produce_priors(self, prior_model, frame, chunk_size=8):
|
111 |
T, _, H, W = frame.shape
|
112 |
-
frame = (frame + 1) / 2
|
113 |
pred_point_maps = []
|
114 |
pred_masks = []
|
115 |
for i in range(0, len(frame), chunk_size):
|
|
|
109 |
@torch.inference_mode()
|
110 |
def produce_priors(self, prior_model, frame, chunk_size=8):
|
111 |
T, _, H, W = frame.shape
|
112 |
+
# frame = (frame + 1) / 2
|
113 |
pred_point_maps = []
|
114 |
pred_masks = []
|
115 |
for i in range(0, len(frame), chunk_size):
|