updated notebook and added basic dronescapes representations
Browse files
dronescapes_reader/__init__.py
CHANGED
@@ -1,2 +1,3 @@
|
|
1 |
"""init file"""
|
2 |
from .multitask_dataset import MultiTaskDataset
|
|
|
|
1 |
"""init file"""
|
2 |
from .multitask_dataset import MultiTaskDataset
|
3 |
+
from .dronescapes_representations import DepthRepresentation, OpticalFlowRepresentation, SemanticRepresentation
|
dronescapes_reader/dronescapes_representations.py
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Dronescapes representations -- adds various loading/writing/image showing capabilities to dronescapes tasks"""
|
2 |
+
from pathlib import Path
|
3 |
+
import numpy as np
|
4 |
+
import torch as tr
|
5 |
+
import flow_vis
|
6 |
+
from overrides import overrides
|
7 |
+
from matplotlib.cm import hot # pylint: disable=no-name-in-module
|
8 |
+
from .multitask_dataset import NpzRepresentation
|
9 |
+
|
10 |
+
class DepthRepresentation(NpzRepresentation):
|
11 |
+
"""DepthRepresentation. Implements depth task-specific stuff, like hotmap."""
|
12 |
+
def __init__(self, *args, min_depth: float, max_depth: float, **kwargs):
|
13 |
+
super().__init__(*args, **kwargs)
|
14 |
+
self.min_depth = min_depth
|
15 |
+
self.max_depth = max_depth
|
16 |
+
|
17 |
+
@overrides
|
18 |
+
def plot_fn(self, x: tr.Tensor) -> np.ndarray:
|
19 |
+
x = x.numpy()
|
20 |
+
x = np.clip(x, self.min_depth, self.max_depth)
|
21 |
+
x = np.nan_to_num((x - x.min()) / (x.max() - x.min()), 0)
|
22 |
+
y = hot(x)[..., 0:3]
|
23 |
+
y = np.uint8(y * 255)
|
24 |
+
return y
|
25 |
+
|
26 |
+
class OpticalFlowRepresentation(NpzRepresentation):
|
27 |
+
"""OpticalFlowRepresentation. Implements depth task-specific stuff, like using flow_vis."""
|
28 |
+
@overrides
|
29 |
+
def plot_fn(self, x: tr.Tensor) -> np.ndarray:
|
30 |
+
return flow_vis.flow_to_color(x.numpy())
|
31 |
+
|
32 |
+
class SemanticRepresentation(NpzRepresentation):
|
33 |
+
"""SemanticRepresentation. Implements depth task-specific stuff, like using flow_vis."""
|
34 |
+
def __init__(self, *args, classes: int | list[str], color_map: list[tuple[int, int, int]], **kwargs):
|
35 |
+
super().__init__(*args, **kwargs)
|
36 |
+
self.classes = list(range(classes)) if isinstance(classes, int) else classes
|
37 |
+
self.n_classes = len(self.classes)
|
38 |
+
self.color_map = color_map
|
39 |
+
assert len(color_map) == self.n_classes, (color_map, self.n_classes)
|
40 |
+
|
41 |
+
@overrides
|
42 |
+
def load_from_disk(self, path: Path) -> tr.Tensor:
|
43 |
+
res = super().load_from_disk(path)
|
44 |
+
assert len(res.shape) == 2, f"Only argmaxed data supported, got: {res.shape}"
|
45 |
+
return res
|
46 |
+
|
47 |
+
@overrides
|
48 |
+
def plot_fn(self, x: tr.Tensor) -> np.ndarray:
|
49 |
+
new_images = np.zeros((*x.shape, 3), dtype=np.uint8)
|
50 |
+
x = x.numpy()
|
51 |
+
for i in range(self.n_classes):
|
52 |
+
new_images[x == i] = self.color_map[i]
|
53 |
+
return new_images
|
dronescapes_reader/multitask_dataset.py
CHANGED
@@ -109,7 +109,13 @@ class MultiTaskDataset(Dataset):
|
|
109 |
"""
|
110 |
if self._tasks is not None:
|
111 |
return self._tasks
|
112 |
-
self._tasks = [
|
|
|
|
|
|
|
|
|
|
|
|
|
113 |
return self._tasks
|
114 |
|
115 |
def collate_fn(self, items: list[MultiTaskItem]) -> MultiTaskItem:
|
@@ -122,7 +128,7 @@ class MultiTaskDataset(Dataset):
|
|
122 |
res = {k: tr.zeros(len(items), *self.data_shape[k]).float() for k in self.task_names} # float32 always
|
123 |
for i in range(len(items)):
|
124 |
for k in self.task_names:
|
125 |
-
res[k][i] = items[i][0][k] if items[i][0][k] is not None else
|
126 |
return res, items_name, self.task_names
|
127 |
|
128 |
# Private methods
|
|
|
109 |
"""
|
110 |
if self._tasks is not None:
|
111 |
return self._tasks
|
112 |
+
self._tasks = []
|
113 |
+
for task_name in self.task_names:
|
114 |
+
t = self.task_types[task_name]
|
115 |
+
if not isinstance(t, NpzRepresentation):
|
116 |
+
t = t(task_name)
|
117 |
+
self._tasks.append(t)
|
118 |
+
assert all(t.name == t_n for t, t_n in zip(self._tasks, self.task_names)), (self._task_names, self._tasks)
|
119 |
return self._tasks
|
120 |
|
121 |
def collate_fn(self, items: list[MultiTaskItem]) -> MultiTaskItem:
|
|
|
128 |
res = {k: tr.zeros(len(items), *self.data_shape[k]).float() for k in self.task_names} # float32 always
|
129 |
for i in range(len(items)):
|
130 |
for k in self.task_names:
|
131 |
+
res[k][i] = items[i][0][k] if items[i][0][k] is not None else float("nan")
|
132 |
return res, items_name, self.task_names
|
133 |
|
134 |
# Private methods
|
scripts/dronescapes_viewer.ipynb
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
scripts/dronescapes_viewer.py
CHANGED
@@ -1,14 +1,24 @@
|
|
1 |
#!/usr/bin/env python3
|
2 |
import sys
|
3 |
from pathlib import Path
|
4 |
-
sys.path.append(Path
|
5 |
-
from
|
|
|
6 |
from pprint import pprint
|
7 |
from torch.utils.data import DataLoader
|
8 |
import random
|
9 |
|
10 |
def main():
|
11 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
print(reader)
|
13 |
|
14 |
print("== Shapes ==")
|
|
|
1 |
#!/usr/bin/env python3
|
2 |
import sys
|
3 |
from pathlib import Path
|
4 |
+
sys.path.append(Path(__file__).parents[1].__str__())
|
5 |
+
from functools import partial
|
6 |
+
from dronescapes_reader import MultiTaskDataset, DepthRepresentation, OpticalFlowRepresentation, SemanticRepresentation
|
7 |
from pprint import pprint
|
8 |
from torch.utils.data import DataLoader
|
9 |
import random
|
10 |
|
11 |
def main():
|
12 |
+
sema_repr = partial(SemanticRepresentation, classes=8, color_map=[[0, 255, 0], [0, 127, 0], [255, 255, 0],
|
13 |
+
[255, 255, 255], [255, 0, 0], [0, 0, 255],
|
14 |
+
[0, 255, 255], [127, 127, 63]])
|
15 |
+
reader = MultiTaskDataset(sys.argv[1], handle_missing_data="fill_none",
|
16 |
+
task_types={"depth_dpt": DepthRepresentation("depth_dpt", min_depth=0, max_depth=0.999),
|
17 |
+
"depth_sfm_manual202204": DepthRepresentation("depth_sfm_manual202204",
|
18 |
+
min_depth=0, max_depth=300),
|
19 |
+
"opticalflow_rife": OpticalFlowRepresentation,
|
20 |
+
"semantic_segprop8": sema_repr,
|
21 |
+
"semantic_mask2former_swin_mapillary_converted": sema_repr})
|
22 |
print(reader)
|
23 |
|
24 |
print("== Shapes ==")
|