Meehai's picture
added consistency script
8010ada
#!/usr/bin/env python3
import os
os.environ["VRE_LOGLEVEL"] = "0"
from argparse import ArgumentParser, Namespace
import torch as tr
from torch.nn import functional as F
import numpy as np
from pathlib import Path
import sys
import random
from typing import Callable
from pprint import pprint
from lightning_module_enhanced import LME
from lightning_module_enhanced.utils import to_device
from omegaconf import DictConfig
from loggez import loggez_logger as logger
from vre.readers import MultiTaskDataset
from vre.utils import collage_fn, image_add_title, colorize_semantic_segmentation, lo, image_resize, image_write
from vre import FFmpegVideo
from functools import partial
from PIL import Image
import subprocess
from tqdm import tqdm
import matplotlib.pyplot as plt
from contexttimer import Timer
sys.path.append("/export/home/proiecte/aux/mihai_cristian.pirvu/code/neo-transformers")
from readers import VITMultiTaskDataset, build_representations
from models import build_model
from plots import vre_plot_fn
from algorithms import build_algorithm, ModelAlgorithmOutput
# os.environ["CUDA_VISIBLE_DEVICES"]="0" #"7"
device = tr.device("cuda") if tr.cuda.is_available() else tr.device("cpu")
def seed(seed: int):
random.seed(seed)
np.random.seed(seed)
tr.random.manual_seed(seed)
def fix_batch_(batch: dict, missing_tasks: list[str]) -> dict:
assert len(batch["data"]["rgb"]) == 1, batch["data"]["rgb"] # inference with bs=1 allowed for now only
assert set(missing_tasks).issubset({"semantic_output", "depth_output", "camera_normals_output"}), missing_tasks
if "semantic_output" in missing_tasks:
batch["data"]["semantic_output"] = [tr.zeros(8, 540, 960)]
batch["image_shape"][0]["semantic_output"] = 8
if "depth_output" in missing_tasks:
batch["data"]["depth_output"] = [tr.zeros(1, 540, 960)]
batch["image_shape"][0]["depth_output"] = 1
if "camera_normals_output" in missing_tasks:
batch["data"]["camera_normals_output"] = [tr.zeros(3, 540, 960)]
batch["image_shape"][0]["camera_normals_output"] = 3
batch["data"] = {k: batch["data"][k] for k in sorted(batch["data"].keys())}
batch["image_shape"] = [{k: batch["image_shape"][0][k] for k in sorted(batch["image_shape"][0].keys())}, ]
return batch
def fix_plot_fns_(plot_fns: dict[str, Callable], task_types: dict[str, "Repr"], stats: dict[str, tuple[list[float]]],
normalization: str):
for task_name in ["camera_normals_output", "depth_output", "semantic_output"]:
if hasattr(task_types[task_name], "set_normalization"):
task_types[task_name].set_normalization(normalization, tuple(stats[task_name]))
plot_fns[task_name] = partial(vre_plot_fn, node=task_types[task_name])
def load_model_from_path(weights_path):
data = tr.load(weights_path, map_location="cpu")
cfg = DictConfig(data["hyper_parameters"]["cfg"])
cfg.train.algorithm.metrics_only_on_masked = True
model = LME(build_model(cfg).to(device).eval())
print(f"DEVICE: {device}")
model.load_state_dict(data["state_dict"])
model.model_algorithm = build_algorithm(cfg.model.type, cfg.train.algorithm, loss_computer=None)
model.hparams.cfg = cfg
stats = data["hyper_parameters"]["statistics"]
model.hparams.stats = stats
logger.info(f"Loaded '{cfg.model.type}' with {model.num_params} parameters from '{weights_path}'")
logger.info(f"Excluded (fully masked) tasks: {cfg.train.algorithm.masking.parameters.excluded_tasks}")
return model
def colorize_dronescapes(item: np.ndarray) -> np.ndarray:
# colorize_semantic_segmentation
assert len(item.shape) == 2, item.shape
color_map = [[0, 255, 0], [0, 127, 0], [255, 255, 0], [255, 255, 255],
[255, 0, 0], [0, 0, 255], [0, 255, 255], [127, 127, 63]]
classes_8 = ["land", "forest", "residential", "road", "little-objects", "water", "sky", "hill"]
return colorize_semantic_segmentation(item[None], color_map=color_map, classes=classes_8)[0]
@tr.no_grad
def inference(model: LME | str, batch: dict, n_ens: int | None = None) -> np.ndarray:
assert len(batch["name"]) == 1, batch["name"]
if isinstance(model, str) and model == "semantic_mask2former_r50_mapillary_converted":
item = batch["data"]["semantic_mask2former_r50_mapillary_converted"][0]
else:
tasks = model.hparams["cfg"]["train"]["algorithm"]["masking"]["parameters"]["tasks"]
if len(tasks) == 2:
batch = {
"data": {"rgb": batch["data"]["rgb"], "semantic_output": [tr.zeros(8, 540, 960)]},
"image_shape": [{"rgb": 3, "semantic_output": 8}, *batch["image_shape"][1:]],
}
y_model: ModelAlgorithmOutput = to_device(model.model_algorithm(model, batch).y, "cpu")
item = y_model.pred["semantic_output"][0]
else:
assert n_ens is not None, n_ens
batch = fix_batch_(batch, model.hparams.cfg.train.algorithm.masking.parameters.excluded_tasks)
acc_sema = None
for i in range(n_ens):
y_model: ModelAlgorithmOutput = to_device(model.model_algorithm(model, batch).y, "cpu")
curr_sema = y_model.pred["semantic_output"].to("cpu")
if acc_sema is None:
acc_sema = curr_sema
else:
acc_sema = (acc_sema * i + curr_sema) / (i + 1)
item = acc_sema[0]
return item.permute(1, 2, 0).numpy().argmax(-1).astype(np.uint8)
def get_args() -> Namespace:
parser = ArgumentParser()
parser.add_argument("video_path", type=Path)
parser.add_argument("--frames")
return parser.parse_args()
def main(args: Namespace):
video = FFmpegVideo(video_path := args.video_path)
frames = list(range(len(video)))
if args.frames is not None:
frames = list(range(*map(int, args.frames.split(".."))))
(vre_dir := Path.cwd() / f"data_{video_path.name}").mkdir(exist_ok=True)
cfg_path = Path(__file__).parent / "cfg.yaml"
assert cfg_path.exists(), cfg_path
# frames = sorted(map(str, set([random.randint(0, N) for _ in range(5)])))
representations = ["semantic_mask2former_coco_47429163_0", "semantic_mask2former_mapillary_49189528_0",
"semantic_mask2former_mapillary_49189528_1", "depth_marigold", "normals_svd(depth_marigold)",
"semantic_mask2former_swin_mapillary_converted", "semantic_mask2former_r50_mapillary_converted",
"semantic_mask2former_swin_coco_converted", "semantic_median_expert",
"buildings", "buildings(nearby)", "containing", "rgb", "safe-landing-no-sseg",
"safe-landing-semantics", "sky-and-water", "transportation", "vegetation"]
# args = ["vre", str(video_path), "--config_path", str(cfg_path),
# "-o", str(vre_dir), "--representations", *representations,
# "-I", f"{Path.cwd().parents[1]}/readers/semantic_mapper.py:get_new_semantic_mapped_tasks",
# "--output_dir_exists_mode", "skip_computed",
# ]
# print(" ".join(args))
# exit(0)
# frames = None#["5"]
# if frames is not None:
# args.extend(["--frames", *frames])
# # print(" ".join(args))
# subprocess.run(args=args, env={**os.environ.copy(), **{"VRE_DEVICE": "cuda" if tr.cuda.is_available() else "cpu", "CUDA_VISIBLE_DEVICES": "7"}})
assert (vre_dir / "rgb").exists(), vre_dir # run vre otherwise
for frame in frames:
assert (vre_dir / f"rgb/npz/{frame}.npz").exists(), frame
weights_path = "/export/home/proiecte/aux/mihai_cristian.pirvu/code/neo-transformers/ckpts/safeuav/sema/mae-4M-ext/epoch=37-val_semantic_output_mean_iou=0.470.ckpt"
weights_path_dstil = "/export/home/proiecte/aux/mihai_cristian.pirvu/code/neo-transformers/ckpts/safeuav/distil2/sl0-4M-ext2-distil/epoch=24-val_semantic_output_mean_iou=0.472.ckpt"
model_mae = load_model_from_path(weights_path)
model_distil = load_model_from_path(weights_path_dstil)
task_types = build_representations((cfg := model_mae.hparams.cfg).data.dataset)
stats = model_mae.hparams.stats
h, w = video.shape[1:3]
test_base_reader2 = MultiTaskDataset(vre_dir, task_types={k: task_types[k] for k in {"semantic_mask2former_r50_mapillary_converted", "rgb"}},
**{**cfg.data.parameters, "task_names": ["rgb", "semantic_mask2former_r50_mapillary_converted"], "normalization": None},
statistics=stats)
reader2 = VITMultiTaskDataset(test_base_reader2)
model_tasks = [t for t in cfg.data.parameters.task_names
if t not in cfg.train.algorithm.masking.parameters.excluded_tasks]
_task_types = {k: v for k, v in task_types.items() if k in model_tasks}
test_base_reader = MultiTaskDataset(vre_dir, task_types=_task_types,
**{**cfg.data.parameters, "task_names": list(_task_types)},
statistics=stats)
reader = VITMultiTaskDataset(test_base_reader)
plot_fns = dict(zip(reader.task_names, [partial(vre_plot_fn, node=n) for n in reader.tasks]))
fix_plot_fns_(plot_fns, task_types, stats, cfg["data"]["parameters"]["normalization"])
(out_dir := Path.cwd() / f"out_{video_path.name}").mkdir(exist_ok=True)
[(out_dir / x).mkdir(exist_ok=True) for x in ["ens", "m2f", "distil", "collage"]]
for frame_ix in tqdm(frames):
batch_m2f = reader2.collate_fn([reader2[frame_ix]])
batch = reader.collate_fn([reader[frame_ix]])
rgb = video[frame_ix]
if not (pth1 := out_dir / f"m2f/{frame_ix}.npz").exists():
y = inference("semantic_mask2former_r50_mapillary_converted", batch_m2f)
np.savez_compressed(pth1, y)
m2f = np.load(pth1)["arr_0"]
if not (pth2 := out_dir / f"ens/{frame_ix}.npz").exists():
y = inference(model_mae, batch, n_ens=30)
np.savez_compressed(pth2, y)
ens = np.load(pth2)["arr_0"]
if not (pth3 := out_dir / f"distil/{frame_ix}.npz").exists():
y = inference(model_distil, batch)
np.savez_compressed(pth3, y)
distil = np.load(pth3)["arr_0"]
if not (out_file := out_dir / f"collage/{frame_ix}.jpg").exists():
m2f_img = colorize_dronescapes(m2f)
ens_img = colorize_dronescapes(ens)
distil_img = colorize_dronescapes(distil)
titles = ["RGB", "Mask2Former (216M)", "Ensembles-30 (4M)", "Distillation (4M)"]
collage = collage_fn([rgb, m2f_img, ens_img, distil_img], titles=titles, rows_cols=(2, 2), size_px=40)
image_write(collage, out_file)
if __name__ == "__main__":
main(get_args())