various scripts updates. Fixed a bug in transportation mapping
Browse files- data/train_set/.task_statistics.npz +2 -2
- scripts/dronescapes_viewer.ipynb +0 -0
- scripts/dronescapes_viewer/dronescapes_representations.py +37 -0
- scripts/dronescapes_viewer/dronescapes_viewer.ipynb +0 -0
- scripts/dronescapes_viewer/dronescapes_viewer.py +35 -0
- scripts/semantic_mapper/semantic_mapper.py +2 -1
- scripts/world_normals_analysis/convert_w2c.py +60 -0
- scripts/world_normals_analysis/world_to_camera_normals.ipynb +16 -21
data/train_set/.task_statistics.npz
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fd711dff3ed71c2d1c3d4821c81615ed07551a35526e2044f0c5f5182091f378
|
3 |
+
size 19190
|
scripts/dronescapes_viewer.ipynb
DELETED
The diff for this file is too large to render.
See raw diff
|
|
scripts/dronescapes_viewer/dronescapes_representations.py
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
from pathlib import Path
|
3 |
+
from vre.representations.cv_representations import (
|
4 |
+
DepthRepresentation, NormalsRepresentation, SemanticRepresentation, ColorRepresentation, HSVRepresentation,
|
5 |
+
EdgesRepresentation, OpticalFlowRepresentation)
|
6 |
+
from vre.representations import Representation
|
7 |
+
sys.path.append(str(Path(__file__).parents[1] / "semantic_mapper"))
|
8 |
+
from semantic_mapper import get_new_semantic_mapped_tasks
|
9 |
+
|
10 |
+
def get_gt_tasks() -> dict[str, Representation]:
|
11 |
+
color_map = [[0, 255, 0], [0, 127, 0], [255, 255, 0], [255, 255, 255],
|
12 |
+
[255, 0, 0], [0, 0, 255], [0, 255, 255], [127, 127, 63]]
|
13 |
+
classes_8 = ["land", "forest", "residential", "road", "little-objects", "water", "sky", "hill"]
|
14 |
+
tasks = [
|
15 |
+
SemanticRepresentation("semantic_segprop8", classes=classes_8, color_map=color_map),
|
16 |
+
DepthRepresentation("depth_sfm_manual202204", min_depth=0, max_depth=300),
|
17 |
+
NormalsRepresentation("camera_normals_sfm_manual202204"),
|
18 |
+
]
|
19 |
+
return {t.name: t for t in tasks}
|
20 |
+
|
21 |
+
def get_other_tasks() -> dict[str, Representation]:
|
22 |
+
tasks = [
|
23 |
+
rgb := ColorRepresentation("rgb"),
|
24 |
+
# HSVRepresentation("hsv", [rgb]),
|
25 |
+
# DepthRepresentation("depth_dpt", min_depth=0, max_depth=1),
|
26 |
+
# EdgesRepresentation("edges_dexined"),
|
27 |
+
OpticalFlowRepresentation("opticalflow_rife"),
|
28 |
+
DepthRepresentation("depth_marigold", min_depth=0, max_depth=1),
|
29 |
+
NormalsRepresentation("normals_svd(depth_marigold)")
|
30 |
+
]
|
31 |
+
return {t.name: t for t in tasks}
|
32 |
+
|
33 |
+
dronescapes_task_types = {
|
34 |
+
**get_new_semantic_mapped_tasks(),
|
35 |
+
**get_other_tasks(),
|
36 |
+
**get_gt_tasks()
|
37 |
+
}
|
scripts/dronescapes_viewer/dronescapes_viewer.ipynb
ADDED
The diff for this file is too large to render.
See raw diff
|
|
scripts/dronescapes_viewer/dronescapes_viewer.py
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import os
|
3 |
+
os.environ["STATS_PBAR"] = "1"
|
4 |
+
os.environ["VRE_LOGLEVEL"] = "0"
|
5 |
+
from pathlib import Path
|
6 |
+
sys.path.append(Path.cwd().parent.__str__())
|
7 |
+
from pprint import pprint
|
8 |
+
import random
|
9 |
+
from vre.readers.multitask_dataset import MultiTaskDataset#, MultiTaskItem
|
10 |
+
#from vre.representations import build_representations_from_cfg, add_external_representations, Representation, ReprOut
|
11 |
+
from vre.utils import MemoryData, reorder_dict
|
12 |
+
from omegaconf import OmegaConf
|
13 |
+
import numpy as np
|
14 |
+
import torch as tr
|
15 |
+
from media_processing_lib.collage_maker import collage_fn
|
16 |
+
from media_processing_lib.image import image_add_title, image_write
|
17 |
+
import matplotlib.pyplot as plt
|
18 |
+
|
19 |
+
from dronescapes_representations import dronescapes_task_types
|
20 |
+
|
21 |
+
data_path = "../../data/test_set"
|
22 |
+
# data_path = "../vre_dronescapes/atanasie_DJI_0652_full"
|
23 |
+
# config_path = "../vre_dronescapes/cfg.yaml"
|
24 |
+
# external_path = "../vre_dronescapes/semantic_mapper.py:get_new_semantic_mapped_tasks"
|
25 |
+
stats_path = "../../data/train_set/.task_statistics.npz"
|
26 |
+
# cfg = OmegaConf.to_container(OmegaConf.load(config_path), resolve=True)
|
27 |
+
# representations = build_representations_from_cfg(cfg)
|
28 |
+
# representations = add_external_representations(representations, external_path, cfg)
|
29 |
+
reader = MultiTaskDataset(data_path, task_names=list(dronescapes_task_types),
|
30 |
+
task_types=dronescapes_task_types, handle_missing_data="fill_nan",
|
31 |
+
normalization="min_max", cache_task_stats=True, batch_size_stats=100,
|
32 |
+
statistics=np.load(stats_path, allow_pickle=True)["arr_0"].item())
|
33 |
+
print(reader)
|
34 |
+
print("== Shapes ==")
|
35 |
+
pprint(reader.data_shape)
|
scripts/semantic_mapper/semantic_mapper.py
CHANGED
@@ -349,7 +349,8 @@ def get_new_semantic_mapped_tasks(tasks_subset: list[str] | None = None) -> dict
|
|
349 |
},
|
350 |
{
|
351 |
"others": [c for c in coco_classes if c not in
|
352 |
-
(cls := ["bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat"
|
|
|
353 |
"transportation": cls,
|
354 |
},
|
355 |
{
|
|
|
349 |
},
|
350 |
{
|
351 |
"others": [c for c in coco_classes if c not in
|
352 |
+
(cls := ["bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat",
|
353 |
+
"road", "railroad", "pavement-merged"])],
|
354 |
"transportation": cls,
|
355 |
},
|
356 |
{
|
scripts/world_normals_analysis/convert_w2c.py
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from argparse import ArgumentParser, Namespace
|
2 |
+
from pathlib import Path
|
3 |
+
from multiprocessing import Pool
|
4 |
+
import shutil
|
5 |
+
import glob
|
6 |
+
from tqdm import tqdm
|
7 |
+
from loggez import loggez_logger as logger
|
8 |
+
import numpy as np
|
9 |
+
|
10 |
+
def w2c(x: np.ndarray, cm: np.ndarray) -> np.ndarray:
|
11 |
+
x1 = (x - 0.5) * 2 # [0:1] -> [-1: 1]
|
12 |
+
x2 = x1 @ np.linalg.inv(cm) # [-1: 1] -> [-1: 1]
|
13 |
+
return x2.clip(-1, 1)
|
14 |
+
|
15 |
+
def load(path: Path) -> np.ndarray:
|
16 |
+
return np.load(path, allow_pickle=True)["arr_0"]
|
17 |
+
|
18 |
+
def do_one(args: tuple[Path, Path, Path]):
|
19 |
+
in_path, cm_path, out_path = args
|
20 |
+
in_np, cm_np = load(in_path), load(cm_path)
|
21 |
+
out_np = w2c(in_np.astype(np.float32), cm_np).astype(in_np.dtype)
|
22 |
+
np.savez_compressed(out_path, out_np)
|
23 |
+
|
24 |
+
def get_args() -> Namespace:
|
25 |
+
parser = ArgumentParser()
|
26 |
+
parser.add_argument("in_dir", type=Path)
|
27 |
+
parser.add_argument("camera_parameters_dir", type=Path)
|
28 |
+
parser.add_argument("--out_dir", "-o", type=Path, required=True)
|
29 |
+
parser.add_argument("--overwrite", action="store_true")
|
30 |
+
parser.add_argument("--n_workers", type=int, default=0)
|
31 |
+
args = parser.parse_args()
|
32 |
+
assert not args.out_dir.exists() or args.overwrite, f"'{args.out_dir}' exists. Use --overwrite"
|
33 |
+
|
34 |
+
return args
|
35 |
+
|
36 |
+
def main(args: Namespace):
|
37 |
+
logger.info(f"- In dir: '{args.in_dir}'")
|
38 |
+
logger.info(f"- Camera Parameters dir: '{args.camera_parameters_dir}'")
|
39 |
+
logger.info(f"- Out dir: '{args.camera_parameters_dir}'")
|
40 |
+
in_paths = list(map(Path, glob.glob(f"{args.in_dir}/**/*.npz", recursive=True)))
|
41 |
+
out_paths = [args.out_dir / in_path.name for in_path in in_paths]
|
42 |
+
assert len(in_paths) > 0, (args.in_dir, in_paths)
|
43 |
+
logger.info(f"npz files found: {len(in_paths)}")
|
44 |
+
shutil.rmtree(args.out_dir, ignore_errors=True)
|
45 |
+
Path(args.out_dir).mkdir()
|
46 |
+
|
47 |
+
cm_paths = []
|
48 |
+
for path in in_paths:
|
49 |
+
path_split = path.stem.split("_")
|
50 |
+
scene, scene_ix = "_".join(path_split[0:-1]), path_split[-1]
|
51 |
+
cm_path = Path(args.camera_parameters_dir) / scene / f"cameraRotationMatrices/{scene_ix:0>6}.npz"
|
52 |
+
assert cm_path.exists(), (path, cm_path)
|
53 |
+
cm_paths.append(cm_path)
|
54 |
+
logger.info("Found all camera matrices paths")
|
55 |
+
|
56 |
+
map_fn = map if args.n_workers == 0 else Pool(args.n_workers).imap
|
57 |
+
list(map_fn(do_one, tqdm(zip(in_paths, cm_paths, out_paths), total=len(in_paths))))
|
58 |
+
|
59 |
+
if __name__ == "__main__":
|
60 |
+
main(get_args())
|
scripts/world_normals_analysis/world_to_camera_normals.ipynb
CHANGED
@@ -2,7 +2,7 @@
|
|
2 |
"cells": [
|
3 |
{
|
4 |
"cell_type": "code",
|
5 |
-
"execution_count":
|
6 |
"metadata": {},
|
7 |
"outputs": [],
|
8 |
"source": [
|
@@ -12,12 +12,7 @@
|
|
12 |
"os.environ[\"VRE_LOGLEVEL\"] = \"0\"\n",
|
13 |
"from pathlib import Path\n",
|
14 |
"sys.path.append(Path.cwd().parent.__str__())\n",
|
15 |
-
"from pprint import pprint\n",
|
16 |
-
"import random\n",
|
17 |
-
"from vre.readers.multitask_dataset import MultiTaskDataset, MultiTaskItem\n",
|
18 |
-
"from vre.representations import build_representations_from_cfg, add_external_representations, Representation, ReprOut\n",
|
19 |
"from vre.utils import MemoryData, reorder_dict, lo, FakeVideo\n",
|
20 |
-
"from omegaconf import OmegaConf\n",
|
21 |
"import numpy as np\n",
|
22 |
"import torch as tr\n",
|
23 |
"from media_processing_lib.collage_maker import collage_fn\n",
|
@@ -66,7 +61,7 @@
|
|
66 |
},
|
67 |
{
|
68 |
"cell_type": "code",
|
69 |
-
"execution_count":
|
70 |
"metadata": {},
|
71 |
"outputs": [
|
72 |
{
|
@@ -83,15 +78,15 @@
|
|
83 |
}
|
84 |
],
|
85 |
"source": [
|
86 |
-
"
|
87 |
-
"scene = \"herculane_DJI_0021_full\"\n",
|
88 |
-
"marigolds_path = f\"
|
89 |
-
"cms_path = f\"
|
90 |
-
"
|
91 |
-
"normals_path = \"
|
92 |
"\n",
|
93 |
"marigold_data = natsorted(Path(marigolds_path).iterdir(), key=lambda p: p.name)\n",
|
94 |
-
"
|
95 |
"cms_data = natsorted(Path(cms_path).iterdir(), key=lambda p: p.name)\n",
|
96 |
"\n",
|
97 |
"normals_all = list(map(Path, glob.glob(f\"{normals_path}/**/normals_sfm_manual202204/**/{scene}*.npz\", recursive=True)))\n",
|
@@ -269,16 +264,16 @@
|
|
269 |
" imgs = [*imgs, C_collage]\n",
|
270 |
" titles, diffs = [*titles, f\"inv + permute axis {comb}\"], [*diffs, C_diff]\n",
|
271 |
"\n",
|
272 |
-
"
|
273 |
-
"
|
274 |
-
"
|
275 |
-
"
|
276 |
-
"
|
277 |
]
|
278 |
},
|
279 |
{
|
280 |
"cell_type": "code",
|
281 |
-
"execution_count":
|
282 |
"metadata": {},
|
283 |
"outputs": [
|
284 |
{
|
@@ -313,7 +308,7 @@
|
|
313 |
"print(frames.shape)\n",
|
314 |
"video = FakeVideo(frames, fps=10)\n",
|
315 |
"print(video)\n",
|
316 |
-
"video.write(\"
|
317 |
]
|
318 |
},
|
319 |
{
|
|
|
2 |
"cells": [
|
3 |
{
|
4 |
"cell_type": "code",
|
5 |
+
"execution_count": null,
|
6 |
"metadata": {},
|
7 |
"outputs": [],
|
8 |
"source": [
|
|
|
12 |
"os.environ[\"VRE_LOGLEVEL\"] = \"0\"\n",
|
13 |
"from pathlib import Path\n",
|
14 |
"sys.path.append(Path.cwd().parent.__str__())\n",
|
|
|
|
|
|
|
|
|
15 |
"from vre.utils import MemoryData, reorder_dict, lo, FakeVideo\n",
|
|
|
16 |
"import numpy as np\n",
|
17 |
"import torch as tr\n",
|
18 |
"from media_processing_lib.collage_maker import collage_fn\n",
|
|
|
61 |
},
|
62 |
{
|
63 |
"cell_type": "code",
|
64 |
+
"execution_count": null,
|
65 |
"metadata": {},
|
66 |
"outputs": [
|
67 |
{
|
|
|
78 |
}
|
79 |
],
|
80 |
"source": [
|
81 |
+
"scene = \"atanasie_DJI_0652_full\"\n",
|
82 |
+
"# scene = \"herculane_DJI_0021_full\"\n",
|
83 |
+
"marigolds_path = f\"../../vre_dronescapes/{scene}/normals_svd(depth_marigold)/npz\"\n",
|
84 |
+
"cms_path = f\"../../raw_data/camera_matrices/{scene}/cameraRotationMatrices\"\n",
|
85 |
+
"buildings_path = f\"../../vre_dronescapes/{scene}/buildings/npz\"\n",
|
86 |
+
"normals_path = \"../../data\"\n",
|
87 |
"\n",
|
88 |
"marigold_data = natsorted(Path(marigolds_path).iterdir(), key=lambda p: p.name)\n",
|
89 |
+
"buildings_data = natsorted(Path(buildings_path).iterdir(), key=lambda p: p.name)\n",
|
90 |
"cms_data = natsorted(Path(cms_path).iterdir(), key=lambda p: p.name)\n",
|
91 |
"\n",
|
92 |
"normals_all = list(map(Path, glob.glob(f\"{normals_path}/**/normals_sfm_manual202204/**/{scene}*.npz\", recursive=True)))\n",
|
|
|
264 |
" imgs = [*imgs, C_collage]\n",
|
265 |
" titles, diffs = [*titles, f\"inv + permute axis {comb}\"], [*diffs, C_diff]\n",
|
266 |
"\n",
|
267 |
+
"display(pd.DataFrame(diffs, index=titles, columns=ixs).sum(1).to_frame().fillna(0).sort_values(0))\n",
|
268 |
+
"sorted_ixs = np.argsort(pd.DataFrame(diffs, index=titles, columns=ixs).sum(1)).values\n",
|
269 |
+
"imgs, titles = [imgs[ix] for ix in sorted_ixs], [titles[ix] for ix in sorted_ixs]\n",
|
270 |
+
"collage = collage_fn(imgs, rows_cols=(len(imgs), 1), titles=titles, size_px=25, pad_to_max=False)\n",
|
271 |
+
"display(Image.fromarray(collage))"
|
272 |
]
|
273 |
},
|
274 |
{
|
275 |
"cell_type": "code",
|
276 |
+
"execution_count": null,
|
277 |
"metadata": {},
|
278 |
"outputs": [
|
279 |
{
|
|
|
308 |
"print(frames.shape)\n",
|
309 |
"video = FakeVideo(frames, fps=10)\n",
|
310 |
"print(video)\n",
|
311 |
+
"video.write(\"atanasie.mp4\")"
|
312 |
]
|
313 |
},
|
314 |
{
|