Meehai's picture
added consistency script
8010ada
#!/usr/bin/env python3
import os
import torch as tr
import numpy as np
import matplotlib.pyplot as plt
from functools import lru_cache
from omegaconf import OmegaConf
from pathlib import Path
from torch.nn import functional as F
from argparse import ArgumentParser, Namespace
from tqdm import tqdm
from vre.utils import clip, AtomicOpen
from vre import FFmpegVideo
from vre_repository.optical_flow.raft import FlowRaft
from matplotlib.cm import hot
device = "cuda" if tr.cuda.is_available() else "cpu"
def _vre_inference(model: "Representation", video: "Video", ixs: list[int]) -> np.ndarray:
model.data = None
model.compute(video, ixs)
return model.data.output
def warp_image_torch(rgb_t_numpy: np.ndarray, flow_numpy: np.ndarray) -> np.ndarray:
image = tr.tensor(rgb_t_numpy).permute(0, 3, 1, 2).float().to(device)
flow = tr.tensor(flow_numpy).float().to(device)
H, W = image.shape[-2:]
# Create normalized meshgrid [-1,1] for grid_sample
grid_x, grid_y = tr.meshgrid(
tr.linspace(-1, 1, W, device=image.device),
tr.linspace(-1, 1, H, device=image.device),
indexing="xy",
)
grid = tr.stack((grid_x, grid_y), dim=-1) # (H, W, 2), normalized [-1, 1]
new_grid = grid - flow # why minus ?
# Warp image using grid_sample
warped = F.grid_sample(image, new_grid, mode="bilinear", align_corners=True)
warped_numpy = warped.permute(0, 2, 3, 1).cpu().numpy()
return warped_numpy
@lru_cache(maxsize=100)
def _npload(pth: str) -> np.ndarray:
return np.load(pth)["arr_0"]
def get_args() -> Namespace:
parser = ArgumentParser()
parser.add_argument("video_path", type=Path)
parser.add_argument("semantic_preds_path", type=Path, help="Path to 0.npz,..., N.npz argmaxed predictions")
parser.add_argument("--frames", type=str)
parser.add_argument("--batch_size", type=int, default=1)
parser.add_argument("--delta", type=int, default=1)
parser.add_argument("--output_path", "-o", type=Path, help="Path to output csv file")
args = parser.parse_args()
assert args.delta >= 1, args.delta
assert args.batch_size >= 1, args.batch_size
assert args.output_path.suffix == ".csv", args.output_path
assert args.semantic_preds_path.exists(), args.semantic_preds_path
args.frames = list(range(*map(int, args.frames.split("..")))) if args.frames is not None else None
return args
def main(args: Namespace):
video = FFmpegVideo(args.video_path)
h, w = video.shape[1:3]
raft_r = FlowRaft(name="flow_raft", dependencies=[], inference_width=w, inference_height=h, iters=5,
small=False, delta=args.delta)
raft_l = FlowRaft(name="flow_raft", dependencies=[], inference_width=w, inference_height=h, iters=5,
small=False, delta=-args.delta)
raft_r.device = raft_l.device = device
raft_r.vre_setup() if raft_r.setup_called is False else None
raft_l.vre_setup() if raft_l.setup_called is False else None
frames = list(range(len(video))) if args.frames is None else args.frames
if args.output_path.exists():
with AtomicOpen(args.output_path, "r") as f:
data = f.readlines()[1:]
done_frames = list(map(int, [x.split(",")[0] for x in data]))
b4 = len(frames)
frames = [f for f in frames if f not in done_frames]
print(f"Eliminating previously computed frames. Before: {b4} frames. After: {len(frames)} frames left")
else:
with AtomicOpen(args.output_path, "w") as f:
f.write("frame, delta, score\n")
batches = [frames[i:i + args.batch_size] for i in range(0, len(frames), args.batch_size)]
assert all((args.semantic_preds_path / f"{f}.npz").exists() for f in frames)
for ixs in tqdm(batches):
ixs_l = [clip(ix + raft_l.delta, 0, len(video) - 1) for ix in ixs]
ixs_r = [clip(ix + raft_r.delta, 0, len(video) - 1) for ix in ixs]
rgb = video[ixs]
rgb_l = video[ixs_l]
rgb_r = video[ixs_r]
sema = np.array([_npload(str(args.semantic_preds_path / f"{ix}.npz")) for ix in ixs])
sema_l = np.array([_npload(str(args.semantic_preds_path / f"{ix}.npz")) for ix in ixs_l])
sema_r = np.array([_npload(str(args.semantic_preds_path / f"{ix}.npz")) for ix in ixs_r])
flow_l = _vre_inference(raft_l, video, ixs)
rgb_warp_l = warp_image_torch(rgb, flow_l)
mask_l = rgb_warp_l.sum(axis=-1) != 0
sema_warp_l = warp_image_torch(sema[..., None], flow_l)[..., 0].round().astype(np.uint8)
diff_sema_l = (sema_l != sema_warp_l).astype(int)
flow_r = _vre_inference(raft_r, video, ixs)
rgb_warp_r = warp_image_torch(rgb, flow_r)
mask_r = rgb_warp_r.sum(axis=-1) != 0
sema_warp_r = warp_image_torch(sema[..., None], flow_r)[..., 0].round().astype(np.uint8)
diff_sema_r = (sema_r != sema_warp_r).astype(int)
# best score = 1 (all agree). Worst score = 0 (none agree). 1/2 means either left or right flow agrees.
score = 1 - (diff_sema_l + diff_sema_r) / 2
mask = mask_l * mask_r
score_valid_perc = [100 * (score[i] * mask[i]).sum() / mask[i].sum() for i in range(len(ixs))]
with AtomicOpen(args.output_path, "a+") as f:
for i in range(len(ixs)):
f.write(f"{ixs[i]}, {args.delta}, {score_valid_perc[i]:.2f}\n")
if __name__ == "__main__":
main(get_args())