Meehai commited on
Commit
8010ada
·
1 Parent(s): 701721b

added consistency script

Browse files
scripts/collage_comparison/consistency.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ import os
3
+ import torch as tr
4
+ import numpy as np
5
+ import matplotlib.pyplot as plt
6
+ from functools import lru_cache
7
+ from omegaconf import OmegaConf
8
+ from pathlib import Path
9
+ from torch.nn import functional as F
10
+ from argparse import ArgumentParser, Namespace
11
+ from tqdm import tqdm
12
+
13
+ from vre.utils import clip, AtomicOpen
14
+ from vre import FFmpegVideo
15
+ from vre_repository.optical_flow.raft import FlowRaft
16
+ from matplotlib.cm import hot
17
+
18
+ device = "cuda" if tr.cuda.is_available() else "cpu"
19
+
20
+ def _vre_inference(model: "Representation", video: "Video", ixs: list[int]) -> np.ndarray:
21
+ model.data = None
22
+ model.compute(video, ixs)
23
+ return model.data.output
24
+
25
+ def warp_image_torch(rgb_t_numpy: np.ndarray, flow_numpy: np.ndarray) -> np.ndarray:
26
+ image = tr.tensor(rgb_t_numpy).permute(0, 3, 1, 2).float().to(device)
27
+ flow = tr.tensor(flow_numpy).float().to(device)
28
+
29
+ H, W = image.shape[-2:]
30
+
31
+ # Create normalized meshgrid [-1,1] for grid_sample
32
+ grid_x, grid_y = tr.meshgrid(
33
+ tr.linspace(-1, 1, W, device=image.device),
34
+ tr.linspace(-1, 1, H, device=image.device),
35
+ indexing="xy",
36
+ )
37
+ grid = tr.stack((grid_x, grid_y), dim=-1) # (H, W, 2), normalized [-1, 1]
38
+
39
+ new_grid = grid - flow # why minus ?
40
+
41
+ # Warp image using grid_sample
42
+ warped = F.grid_sample(image, new_grid, mode="bilinear", align_corners=True)
43
+ warped_numpy = warped.permute(0, 2, 3, 1).cpu().numpy()
44
+ return warped_numpy
45
+
46
+ @lru_cache(maxsize=100)
47
+ def _npload(pth: str) -> np.ndarray:
48
+ return np.load(pth)["arr_0"]
49
+
50
+ def get_args() -> Namespace:
51
+ parser = ArgumentParser()
52
+ parser.add_argument("video_path", type=Path)
53
+ parser.add_argument("semantic_preds_path", type=Path, help="Path to 0.npz,..., N.npz argmaxed predictions")
54
+ parser.add_argument("--frames", type=str)
55
+ parser.add_argument("--batch_size", type=int, default=1)
56
+ parser.add_argument("--delta", type=int, default=1)
57
+ parser.add_argument("--output_path", "-o", type=Path, help="Path to output csv file")
58
+ args = parser.parse_args()
59
+ assert args.delta >= 1, args.delta
60
+ assert args.batch_size >= 1, args.batch_size
61
+ assert args.output_path.suffix == ".csv", args.output_path
62
+ assert args.semantic_preds_path.exists(), args.semantic_preds_path
63
+ args.frames = list(range(*map(int, args.frames.split("..")))) if args.frames is not None else None
64
+ return args
65
+
66
+ def main(args: Namespace):
67
+ video = FFmpegVideo(args.video_path)
68
+ h, w = video.shape[1:3]
69
+ raft_r = FlowRaft(name="flow_raft", dependencies=[], inference_width=w, inference_height=h, iters=5,
70
+ small=False, delta=args.delta)
71
+ raft_l = FlowRaft(name="flow_raft", dependencies=[], inference_width=w, inference_height=h, iters=5,
72
+ small=False, delta=-args.delta)
73
+ raft_r.device = raft_l.device = device
74
+ raft_r.vre_setup() if raft_r.setup_called is False else None
75
+ raft_l.vre_setup() if raft_l.setup_called is False else None
76
+
77
+ frames = list(range(len(video))) if args.frames is None else args.frames
78
+
79
+ if args.output_path.exists():
80
+ with AtomicOpen(args.output_path, "r") as f:
81
+ data = f.readlines()[1:]
82
+ done_frames = list(map(int, [x.split(",")[0] for x in data]))
83
+ b4 = len(frames)
84
+ frames = [f for f in frames if f not in done_frames]
85
+ print(f"Eliminating previously computed frames. Before: {b4} frames. After: {len(frames)} frames left")
86
+ else:
87
+ with AtomicOpen(args.output_path, "w") as f:
88
+ f.write("frame, delta, score\n")
89
+
90
+ batches = [frames[i:i + args.batch_size] for i in range(0, len(frames), args.batch_size)]
91
+ assert all((args.semantic_preds_path / f"{f}.npz").exists() for f in frames)
92
+
93
+ for ixs in tqdm(batches):
94
+ ixs_l = [clip(ix + raft_l.delta, 0, len(video) - 1) for ix in ixs]
95
+ ixs_r = [clip(ix + raft_r.delta, 0, len(video) - 1) for ix in ixs]
96
+
97
+ rgb = video[ixs]
98
+ rgb_l = video[ixs_l]
99
+ rgb_r = video[ixs_r]
100
+ sema = np.array([_npload(str(args.semantic_preds_path / f"{ix}.npz")) for ix in ixs])
101
+ sema_l = np.array([_npload(str(args.semantic_preds_path / f"{ix}.npz")) for ix in ixs_l])
102
+ sema_r = np.array([_npload(str(args.semantic_preds_path / f"{ix}.npz")) for ix in ixs_r])
103
+
104
+ flow_l = _vre_inference(raft_l, video, ixs)
105
+ rgb_warp_l = warp_image_torch(rgb, flow_l)
106
+ mask_l = rgb_warp_l.sum(axis=-1) != 0
107
+ sema_warp_l = warp_image_torch(sema[..., None], flow_l)[..., 0].round().astype(np.uint8)
108
+ diff_sema_l = (sema_l != sema_warp_l).astype(int)
109
+
110
+ flow_r = _vre_inference(raft_r, video, ixs)
111
+ rgb_warp_r = warp_image_torch(rgb, flow_r)
112
+ mask_r = rgb_warp_r.sum(axis=-1) != 0
113
+ sema_warp_r = warp_image_torch(sema[..., None], flow_r)[..., 0].round().astype(np.uint8)
114
+ diff_sema_r = (sema_r != sema_warp_r).astype(int)
115
+
116
+ # best score = 1 (all agree). Worst score = 0 (none agree). 1/2 means either left or right flow agrees.
117
+ score = 1 - (diff_sema_l + diff_sema_r) / 2
118
+ mask = mask_l * mask_r
119
+ score_valid_perc = [100 * (score[i] * mask[i]).sum() / mask[i].sum() for i in range(len(ixs))]
120
+
121
+ with AtomicOpen(args.output_path, "a+") as f:
122
+ for i in range(len(ixs)):
123
+ f.write(f"{ixs[i]}, {args.delta}, {score_valid_perc[i]:.2f}\n")
124
+
125
+ if __name__ == "__main__":
126
+ main(get_args())
scripts/collage_comparison/wip.py CHANGED
@@ -3,6 +3,7 @@ import os
3
  os.environ["VRE_LOGLEVEL"] = "0"
4
  from argparse import ArgumentParser, Namespace
5
  import torch as tr
 
6
  import numpy as np
7
  from pathlib import Path
8
  import sys
@@ -14,9 +15,9 @@ from lightning_module_enhanced.utils import to_device
14
  from omegaconf import DictConfig
15
  from loggez import loggez_logger as logger
16
  from vre.readers import MultiTaskDataset
17
- from functools import partial
18
  from vre.utils import collage_fn, image_add_title, colorize_semantic_segmentation, lo, image_resize, image_write
19
  from vre import FFmpegVideo
 
20
  from PIL import Image
21
  import subprocess
22
  from tqdm import tqdm
@@ -112,6 +113,7 @@ def inference(model: LME | str, batch: dict, n_ens: int | None = None) -> np.nda
112
  item = acc_sema[0]
113
  return item.permute(1, 2, 0).numpy().argmax(-1).astype(np.uint8)
114
 
 
115
  def get_args() -> Namespace:
116
  parser = ArgumentParser()
117
  parser.add_argument("video_path", type=Path)
@@ -148,7 +150,7 @@ def main(args: Namespace):
148
  # # print(" ".join(args))
149
  # subprocess.run(args=args, env={**os.environ.copy(), **{"VRE_DEVICE": "cuda" if tr.cuda.is_available() else "cpu", "CUDA_VISIBLE_DEVICES": "7"}})
150
 
151
- assert (vre_dir / "rgb").exists(), vre_dir
152
  for frame in frames:
153
  assert (vre_dir / f"rgb/npz/{frame}.npz").exists(), frame
154
  weights_path = "/export/home/proiecte/aux/mihai_cristian.pirvu/code/neo-transformers/ckpts/safeuav/sema/mae-4M-ext/epoch=37-val_semantic_output_mean_iou=0.470.ckpt"
@@ -159,13 +161,12 @@ def main(args: Namespace):
159
  task_types = build_representations((cfg := model_mae.hparams.cfg).data.dataset)
160
  stats = model_mae.hparams.stats
161
 
 
 
162
  test_base_reader2 = MultiTaskDataset(vre_dir, task_types={k: task_types[k] for k in {"semantic_mask2former_r50_mapillary_converted", "rgb"}},
163
  **{**cfg.data.parameters, "task_names": ["rgb", "semantic_mask2former_r50_mapillary_converted"], "normalization": None},
164
  statistics=stats)
165
  reader2 = VITMultiTaskDataset(test_base_reader2)
166
- plot_fns2 = dict(zip(reader2.task_names, [partial(vre_plot_fn, node=n) for n in reader2.tasks]))
167
- fix_plot_fns_(plot_fns2, task_types, stats, cfg["data"]["parameters"]["normalization"])
168
-
169
  model_tasks = [t for t in cfg.data.parameters.task_names
170
  if t not in cfg.train.algorithm.masking.parameters.excluded_tasks]
171
  _task_types = {k: v for k, v in task_types.items() if k in model_tasks}
@@ -181,30 +182,32 @@ def main(args: Namespace):
181
  [(out_dir / x).mkdir(exist_ok=True) for x in ["ens", "m2f", "distil", "collage"]]
182
 
183
  for frame_ix in tqdm(frames):
184
- if (out_file := out_dir / f"collage/{frame_ix}.jpg").exists():
185
- continue
186
  batch_m2f = reader2.collate_fn([reader2[frame_ix]])
187
  batch = reader.collate_fn([reader[frame_ix]])
188
- rgb = batch_m2f["data"]["rgb"][0].permute(1, 2, 0).numpy()
189
 
190
- if not (pth := out_dir / f"m2f/{frame_ix}.npz").exists():
191
  y = inference("semantic_mask2former_r50_mapillary_converted", batch_m2f)
192
- np.savez_compressed(pth, y)
193
- m2f_img = colorize_dronescapes(np.load(pth)["arr_0"])
194
 
195
- if not (pth := out_dir / f"ens/{frame_ix}.npz").exists():
196
  y = inference(model_mae, batch, n_ens=30)
197
- np.savez_compressed(pth, y)
198
- ens_img = colorize_dronescapes(np.load(pth)["arr_0"])
199
 
200
- if not (pth := out_dir / f"distil/{frame_ix}.npz").exists():
201
  y = inference(model_distil, batch)
202
- np.savez_compressed(pth, y)
203
- distil_img = colorize_dronescapes(np.load(pth)["arr_0"])
204
-
205
- titles = ["RGB", "Mask2Former (216M)", "Ensembles-30 (4M)", "Distillation (4M)"]
206
- collage = collage_fn([rgb, m2f_img, ens_img, distil_img], titles=titles, rows_cols=(2, 2), size_px=40)
207
- image_write(collage, out_file)
 
 
 
 
208
 
209
  if __name__ == "__main__":
210
  main(get_args())
 
3
  os.environ["VRE_LOGLEVEL"] = "0"
4
  from argparse import ArgumentParser, Namespace
5
  import torch as tr
6
+ from torch.nn import functional as F
7
  import numpy as np
8
  from pathlib import Path
9
  import sys
 
15
  from omegaconf import DictConfig
16
  from loggez import loggez_logger as logger
17
  from vre.readers import MultiTaskDataset
 
18
  from vre.utils import collage_fn, image_add_title, colorize_semantic_segmentation, lo, image_resize, image_write
19
  from vre import FFmpegVideo
20
+ from functools import partial
21
  from PIL import Image
22
  import subprocess
23
  from tqdm import tqdm
 
113
  item = acc_sema[0]
114
  return item.permute(1, 2, 0).numpy().argmax(-1).astype(np.uint8)
115
 
116
+
117
  def get_args() -> Namespace:
118
  parser = ArgumentParser()
119
  parser.add_argument("video_path", type=Path)
 
150
  # # print(" ".join(args))
151
  # subprocess.run(args=args, env={**os.environ.copy(), **{"VRE_DEVICE": "cuda" if tr.cuda.is_available() else "cpu", "CUDA_VISIBLE_DEVICES": "7"}})
152
 
153
+ assert (vre_dir / "rgb").exists(), vre_dir # run vre otherwise
154
  for frame in frames:
155
  assert (vre_dir / f"rgb/npz/{frame}.npz").exists(), frame
156
  weights_path = "/export/home/proiecte/aux/mihai_cristian.pirvu/code/neo-transformers/ckpts/safeuav/sema/mae-4M-ext/epoch=37-val_semantic_output_mean_iou=0.470.ckpt"
 
161
  task_types = build_representations((cfg := model_mae.hparams.cfg).data.dataset)
162
  stats = model_mae.hparams.stats
163
 
164
+ h, w = video.shape[1:3]
165
+
166
  test_base_reader2 = MultiTaskDataset(vre_dir, task_types={k: task_types[k] for k in {"semantic_mask2former_r50_mapillary_converted", "rgb"}},
167
  **{**cfg.data.parameters, "task_names": ["rgb", "semantic_mask2former_r50_mapillary_converted"], "normalization": None},
168
  statistics=stats)
169
  reader2 = VITMultiTaskDataset(test_base_reader2)
 
 
 
170
  model_tasks = [t for t in cfg.data.parameters.task_names
171
  if t not in cfg.train.algorithm.masking.parameters.excluded_tasks]
172
  _task_types = {k: v for k, v in task_types.items() if k in model_tasks}
 
182
  [(out_dir / x).mkdir(exist_ok=True) for x in ["ens", "m2f", "distil", "collage"]]
183
 
184
  for frame_ix in tqdm(frames):
 
 
185
  batch_m2f = reader2.collate_fn([reader2[frame_ix]])
186
  batch = reader.collate_fn([reader[frame_ix]])
187
+ rgb = video[frame_ix]
188
 
189
+ if not (pth1 := out_dir / f"m2f/{frame_ix}.npz").exists():
190
  y = inference("semantic_mask2former_r50_mapillary_converted", batch_m2f)
191
+ np.savez_compressed(pth1, y)
192
+ m2f = np.load(pth1)["arr_0"]
193
 
194
+ if not (pth2 := out_dir / f"ens/{frame_ix}.npz").exists():
195
  y = inference(model_mae, batch, n_ens=30)
196
+ np.savez_compressed(pth2, y)
197
+ ens = np.load(pth2)["arr_0"]
198
 
199
+ if not (pth3 := out_dir / f"distil/{frame_ix}.npz").exists():
200
  y = inference(model_distil, batch)
201
+ np.savez_compressed(pth3, y)
202
+ distil = np.load(pth3)["arr_0"]
203
+
204
+ if not (out_file := out_dir / f"collage/{frame_ix}.jpg").exists():
205
+ m2f_img = colorize_dronescapes(m2f)
206
+ ens_img = colorize_dronescapes(ens)
207
+ distil_img = colorize_dronescapes(distil)
208
+ titles = ["RGB", "Mask2Former (216M)", "Ensembles-30 (4M)", "Distillation (4M)"]
209
+ collage = collage_fn([rgb, m2f_img, ens_img, distil_img], titles=titles, rows_cols=(2, 2), size_px=40)
210
+ image_write(collage, out_file)
211
 
212
  if __name__ == "__main__":
213
  main(get_args())
scripts/collage_comparison/wip.sh CHANGED
@@ -35,4 +35,4 @@ done
35
  wait
36
 
37
  # Combine output frames into a video using ffmpeg
38
- ffmpeg -framerate $fps -i out_"$video_file"/%d.jpg -c:v libx265 -pix_fmt yuv420p out_"$video_file"/collage.mp4
 
35
  wait
36
 
37
  # Combine output frames into a video using ffmpeg
38
+ ffmpeg -framerate $fps -i out_"$video_file"/collage/%d.jpg -c:v libx265 -pix_fmt yuv420p out_"$video_file"/collage/collage.mp4