added script
Browse files
scripts/collage_comparison/cfg.yaml
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
default_io_parameters:
|
2 |
+
binary_format: npz
|
3 |
+
image_format: not-set
|
4 |
+
compress: True
|
5 |
+
output_size: [540, 960]
|
6 |
+
|
7 |
+
default_learned_parameters:
|
8 |
+
device: ${oc.env:VRE_DEVICE,cpu}
|
9 |
+
|
10 |
+
default_compute_parameters:
|
11 |
+
batch_size: 15
|
12 |
+
|
13 |
+
representations:
|
14 |
+
rgb:
|
15 |
+
type: color/rgb
|
16 |
+
dependencies: []
|
17 |
+
parameters: {}
|
18 |
+
|
19 |
+
# opticalflow_rife:
|
20 |
+
# type: optical-flow/rife
|
21 |
+
# dependencies: []
|
22 |
+
# parameters:
|
23 |
+
# uhd: False
|
24 |
+
# compute_backward_flow: False
|
25 |
+
# compute_parameters:
|
26 |
+
# output_dtype: float16
|
27 |
+
|
28 |
+
semantic_mask2former_coco_47429163_0:
|
29 |
+
type: semantic-segmentation/mask2former
|
30 |
+
dependencies: []
|
31 |
+
parameters:
|
32 |
+
model_id: "47429163_0"
|
33 |
+
disk_data_argmax: True
|
34 |
+
compute_parameters:
|
35 |
+
batch_size: 1
|
36 |
+
|
37 |
+
semantic_mask2former_mapillary_49189528_0:
|
38 |
+
type: semantic-segmentation/mask2former
|
39 |
+
dependencies: []
|
40 |
+
parameters:
|
41 |
+
model_id: "49189528_0"
|
42 |
+
disk_data_argmax: True
|
43 |
+
compute_parameters:
|
44 |
+
batch_size: 1
|
45 |
+
|
46 |
+
semantic_mask2former_mapillary_49189528_1:
|
47 |
+
type: semantic-segmentation/mask2former
|
48 |
+
dependencies: []
|
49 |
+
parameters:
|
50 |
+
model_id: "49189528_1"
|
51 |
+
disk_data_argmax: True
|
52 |
+
compute_parameters:
|
53 |
+
batch_size: 1
|
54 |
+
|
55 |
+
depth_marigold:
|
56 |
+
type: depth/marigold
|
57 |
+
dependencies: []
|
58 |
+
parameters:
|
59 |
+
variant: marigold-lcm-v1-0
|
60 |
+
denoising_steps: 4
|
61 |
+
ensemble_size: 1
|
62 |
+
processing_resolution: 768
|
63 |
+
compute_parameters:
|
64 |
+
batch_size: 5
|
65 |
+
|
66 |
+
normals_svd(depth_marigold):
|
67 |
+
type: normals/depth-svd
|
68 |
+
dependencies: [depth_marigold]
|
69 |
+
parameters:
|
70 |
+
sensor_fov: 75
|
71 |
+
sensor_size: [3840, 2160]
|
72 |
+
window_size: 11
|
73 |
+
io_parameters:
|
74 |
+
output_dtype: float16
|
scripts/collage_comparison/wip.py
ADDED
@@ -0,0 +1,195 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
import os
|
3 |
+
os.environ["VRE_LOGLEVEL"] = "0"
|
4 |
+
from argparse import ArgumentParser, Namespace
|
5 |
+
import torch as tr
|
6 |
+
import numpy as np
|
7 |
+
from pathlib import Path
|
8 |
+
import sys
|
9 |
+
import random
|
10 |
+
from typing import Callable
|
11 |
+
from pprint import pprint
|
12 |
+
from lightning_module_enhanced import LME
|
13 |
+
from lightning_module_enhanced.utils import to_device
|
14 |
+
from omegaconf import DictConfig
|
15 |
+
from loggez import loggez_logger as logger
|
16 |
+
from vre.readers import MultiTaskDataset
|
17 |
+
from functools import partial
|
18 |
+
from vre.utils import collage_fn, image_add_title, colorize_semantic_segmentation, lo, image_resize, image_write
|
19 |
+
from vre import FFmpegVideo
|
20 |
+
from PIL import Image
|
21 |
+
import subprocess
|
22 |
+
from tqdm import tqdm
|
23 |
+
import matplotlib.pyplot as plt
|
24 |
+
from contexttimer import Timer
|
25 |
+
|
26 |
+
sys.path.append("/export/home/proiecte/aux/mihai_cristian.pirvu/code/neo-transformers")
|
27 |
+
from readers import VITMultiTaskDataset, build_representations
|
28 |
+
from models import build_model
|
29 |
+
from plots import vre_plot_fn
|
30 |
+
from algorithms import build_algorithm, ModelAlgorithmOutput
|
31 |
+
|
32 |
+
# os.environ["CUDA_VISIBLE_DEVICES"]="0" #"7"
|
33 |
+
device = tr.device("cuda") if tr.cuda.is_available() else tr.device("cpu")
|
34 |
+
|
35 |
+
def seed(seed: int):
|
36 |
+
random.seed(seed)
|
37 |
+
np.random.seed(seed)
|
38 |
+
tr.random.manual_seed(seed)
|
39 |
+
|
40 |
+
def fix_batch_(batch: dict, missing_tasks: list[str]) -> dict:
|
41 |
+
assert len(batch["data"]["rgb"]) == 1, batch["data"]["rgb"] # inference with bs=1 allowed for now only
|
42 |
+
assert set(missing_tasks).issubset({"semantic_output", "depth_output", "camera_normals_output"}), missing_tasks
|
43 |
+
if "semantic_output" in missing_tasks:
|
44 |
+
batch["data"]["semantic_output"] = [tr.zeros(8, 540, 960)]
|
45 |
+
batch["image_shape"][0]["semantic_output"] = 8
|
46 |
+
if "depth_output" in missing_tasks:
|
47 |
+
batch["data"]["depth_output"] = [tr.zeros(1, 540, 960)]
|
48 |
+
batch["image_shape"][0]["depth_output"] = 1
|
49 |
+
if "camera_normals_output" in missing_tasks:
|
50 |
+
batch["data"]["camera_normals_output"] = [tr.zeros(3, 540, 960)]
|
51 |
+
batch["image_shape"][0]["camera_normals_output"] = 3
|
52 |
+
batch["data"] = {k: batch["data"][k] for k in sorted(batch["data"].keys())}
|
53 |
+
batch["image_shape"] = [{k: batch["image_shape"][0][k] for k in sorted(batch["image_shape"][0].keys())}, ]
|
54 |
+
return batch
|
55 |
+
|
56 |
+
def fix_plot_fns_(plot_fns: dict[str, Callable], task_types: dict[str, "Repr"], stats: dict[str, tuple[list[float]]],
|
57 |
+
normalization: str):
|
58 |
+
for task_name in ["camera_normals_output", "depth_output", "semantic_output"]:
|
59 |
+
if hasattr(task_types[task_name], "set_normalization"):
|
60 |
+
task_types[task_name].set_normalization(normalization, tuple(stats[task_name]))
|
61 |
+
plot_fns[task_name] = partial(vre_plot_fn, node=task_types[task_name])
|
62 |
+
|
63 |
+
def load_model_from_path(weights_path):
|
64 |
+
data = tr.load(weights_path, map_location="cpu")
|
65 |
+
cfg = DictConfig(data["hyper_parameters"]["cfg"])
|
66 |
+
cfg.train.algorithm.metrics_only_on_masked = True
|
67 |
+
model = LME(build_model(cfg).to(device).eval())
|
68 |
+
print(f"DEVICE: {device}")
|
69 |
+
model.load_state_dict(data["state_dict"])
|
70 |
+
model.model_algorithm = build_algorithm(cfg.model.type, cfg.train.algorithm, loss_computer=None)
|
71 |
+
model.hparams.cfg = cfg
|
72 |
+
stats = data["hyper_parameters"]["statistics"]
|
73 |
+
model.hparams.stats = stats
|
74 |
+
|
75 |
+
logger.info(f"Loaded '{cfg.model.type}' with {model.num_params} parameters from '{weights_path}'")
|
76 |
+
logger.info(f"Excluded (fully masked) tasks: {cfg.train.algorithm.masking.parameters.excluded_tasks}")
|
77 |
+
return model
|
78 |
+
|
79 |
+
|
80 |
+
def colorize_dronescapes(item: np.ndarray) -> np.ndarray:
|
81 |
+
# colorize_semantic_segmentation
|
82 |
+
assert len(item.shape) == 3 and item.shape[-1] == 8, item.shape
|
83 |
+
color_map = [[0, 255, 0], [0, 127, 0], [255, 255, 0], [255, 255, 255],
|
84 |
+
[255, 0, 0], [0, 0, 255], [0, 255, 255], [127, 127, 63]]
|
85 |
+
classes_8 = ["land", "forest", "residential", "road", "little-objects", "water", "sky", "hill"]
|
86 |
+
return colorize_semantic_segmentation(item[None].argmax(-1), color_map=color_map, classes=classes_8)[0]
|
87 |
+
|
88 |
+
@tr.no_grad
|
89 |
+
def inference(model: LME | str, batch: dict, n_ens: int | None = None) -> np.ndarray:
|
90 |
+
assert len(batch["name"]) == 1, batch["name"]
|
91 |
+
if isinstance(model, str) and model == "semantic_mask2former_r50_mapillary_converted":
|
92 |
+
item = batch["data"]["semantic_mask2former_r50_mapillary_converted"][0]
|
93 |
+
else:
|
94 |
+
tasks = model.hparams["cfg"]["train"]["algorithm"]["masking"]["parameters"]["tasks"]
|
95 |
+
if len(tasks) == 2:
|
96 |
+
batch = {
|
97 |
+
"data": {"rgb": batch["data"]["rgb"], "semantic_output": [tr.zeros(8, 540, 960)]},
|
98 |
+
"image_shape": [{"rgb": 3, "semantic_output": 8}, *batch["image_shape"][1:]],
|
99 |
+
}
|
100 |
+
y_model: ModelAlgorithmOutput = to_device(model.model_algorithm(model, batch).y, "cpu")
|
101 |
+
item = y_model.pred["semantic_output"][0]
|
102 |
+
else:
|
103 |
+
assert n_ens is not None, n_ens
|
104 |
+
batch = fix_batch_(batch, model.hparams.cfg.train.algorithm.masking.parameters.excluded_tasks)
|
105 |
+
acc_sema = None
|
106 |
+
for i in range(n_ens):
|
107 |
+
y_model: ModelAlgorithmOutput = to_device(model.model_algorithm(model, batch).y, "cpu")
|
108 |
+
curr_sema = y_model.pred["semantic_output"].to("cpu")
|
109 |
+
if acc_sema is None:
|
110 |
+
acc_sema = curr_sema
|
111 |
+
else:
|
112 |
+
acc_sema = (acc_sema * i + curr_sema) / (i + 1)
|
113 |
+
item = acc_sema[0]
|
114 |
+
return colorize_dronescapes(item.permute(1, 2, 0).numpy())
|
115 |
+
|
116 |
+
def get_args() -> Namespace:
|
117 |
+
parser = ArgumentParser()
|
118 |
+
parser.add_argument("video_path", type=Path)
|
119 |
+
parser.add_argument("--frames")
|
120 |
+
return parser.parse_args()
|
121 |
+
|
122 |
+
def main(args: Namespace):
|
123 |
+
video = FFmpegVideo(video_path := args.video_path)
|
124 |
+
frames = list(range(len(video)))
|
125 |
+
if args.frames is not None:
|
126 |
+
frames = list(range(*map(int, args.frames.split(".."))))
|
127 |
+
|
128 |
+
(vre_dir := Path.cwd() / f"data_{video_path.name}").mkdir(exist_ok=True)
|
129 |
+
assert (vre_dir / "rgb").exists(), vre_dir
|
130 |
+
for frame in frames:
|
131 |
+
assert (vre_dir / f"rgb/npz/{frame}.npz").exists(), frame
|
132 |
+
cfg_path = Path(__file__).parent / "cfg.yaml"
|
133 |
+
assert cfg_path.exists(), cfg_path
|
134 |
+
# frames = sorted(map(str, set([random.randint(0, N) for _ in range(5)])))
|
135 |
+
representations = ["semantic_mask2former_coco_47429163_0", "semantic_mask2former_mapillary_49189528_0",
|
136 |
+
"semantic_mask2former_mapillary_49189528_1", "depth_marigold", "normals_svd(depth_marigold)",
|
137 |
+
"semantic_mask2former_swin_mapillary_converted", "semantic_mask2former_r50_mapillary_converted",
|
138 |
+
"semantic_mask2former_swin_coco_converted", "semantic_median_expert",
|
139 |
+
"buildings", "buildings(nearby)", "containing", "rgb", "safe-landing-no-sseg",
|
140 |
+
"safe-landing-semantics", "sky-and-water", "transportation", "vegetation"]
|
141 |
+
|
142 |
+
# args = ["vre", str(video_path), "--config_path", str(cfg_path),
|
143 |
+
# "-o", str(vre_dir), "--representations", *representations,
|
144 |
+
# "-I", f"{Path.cwd().parents[1]}/readers/semantic_mapper.py:get_new_semantic_mapped_tasks",
|
145 |
+
# "--output_dir_exists_mode", "skip_computed",
|
146 |
+
# ]
|
147 |
+
# frames = None#["5"]
|
148 |
+
# if frames is not None:
|
149 |
+
# args.extend(["--frames", *frames])
|
150 |
+
# # print(" ".join(args))
|
151 |
+
# subprocess.run(args=args, env={**os.environ.copy(), **{"VRE_DEVICE": "cuda" if tr.cuda.is_available() else "cpu", "CUDA_VISIBLE_DEVICES": "7"}})
|
152 |
+
|
153 |
+
weights_path = "/export/home/proiecte/aux/mihai_cristian.pirvu/code/neo-transformers/ckpts/safeuav/sema/mae-4M-ext/epoch=37-val_semantic_output_mean_iou=0.470.ckpt"
|
154 |
+
weights_path_dstil = "/export/home/proiecte/aux/mihai_cristian.pirvu/code/neo-transformers/ckpts/safeuav/distil2/sl0-4M-ext2-distil/epoch=24-val_semantic_output_mean_iou=0.472.ckpt"
|
155 |
+
|
156 |
+
model_mae = load_model_from_path(weights_path)
|
157 |
+
model_distil = load_model_from_path(weights_path_dstil)
|
158 |
+
task_types = build_representations((cfg := model_mae.hparams.cfg).data.dataset)
|
159 |
+
stats = model_mae.hparams.stats
|
160 |
+
|
161 |
+
test_base_reader2 = MultiTaskDataset(vre_dir, task_types={k: task_types[k] for k in {"semantic_mask2former_r50_mapillary_converted", "rgb"}},
|
162 |
+
**{**cfg.data.parameters, "task_names": ["rgb", "semantic_mask2former_r50_mapillary_converted"], "normalization": None},
|
163 |
+
statistics=stats)
|
164 |
+
reader2 = VITMultiTaskDataset(test_base_reader2)
|
165 |
+
plot_fns2 = dict(zip(reader2.task_names, [partial(vre_plot_fn, node=n) for n in reader2.tasks]))
|
166 |
+
fix_plot_fns_(plot_fns2, task_types, stats, cfg["data"]["parameters"]["normalization"])
|
167 |
+
|
168 |
+
model_tasks = [t for t in cfg.data.parameters.task_names
|
169 |
+
if t not in cfg.train.algorithm.masking.parameters.excluded_tasks]
|
170 |
+
_task_types = {k: v for k, v in task_types.items() if k in model_tasks}
|
171 |
+
test_base_reader = MultiTaskDataset(vre_dir, task_types=_task_types,
|
172 |
+
**{**cfg.data.parameters, "task_names": list(_task_types)},
|
173 |
+
statistics=stats)
|
174 |
+
reader = VITMultiTaskDataset(test_base_reader)
|
175 |
+
|
176 |
+
plot_fns = dict(zip(reader.task_names, [partial(vre_plot_fn, node=n) for n in reader.tasks]))
|
177 |
+
fix_plot_fns_(plot_fns, task_types, stats, cfg["data"]["parameters"]["normalization"])
|
178 |
+
|
179 |
+
(out_dir := Path.cwd() / f"out_{video_path.name}").mkdir(exist_ok=True)
|
180 |
+
for frame_ix in tqdm(frames):
|
181 |
+
if (out_file := out_dir/ f"{frame_ix}.jpg").exists():
|
182 |
+
continue
|
183 |
+
batch_m2f = reader2.collate_fn([reader2[frame_ix]])
|
184 |
+
batch = reader.collate_fn([reader[frame_ix]])
|
185 |
+
m2f_img = inference("semantic_mask2former_r50_mapillary_converted", batch_m2f)
|
186 |
+
ens_img = inference(model_mae, batch, n_ens=30)
|
187 |
+
distil_img = inference(model_distil, batch)
|
188 |
+
rgb = batch_m2f["data"]["rgb"][0].permute(1, 2, 0).numpy()
|
189 |
+
|
190 |
+
titles = ["RGB", "Mask2Former (216M)", "Ensembles-30 (4M)", "Distillation (4M)"]
|
191 |
+
collage = collage_fn([rgb, m2f_img, ens_img, distil_img], titles=titles, rows_cols=(2, 2), size_px=40)
|
192 |
+
image_write(collage, out_file)
|
193 |
+
|
194 |
+
if __name__ == "__main__":
|
195 |
+
main(get_args())
|