refactor multitask gathering to support dependencies
Browse files
dronescapes_reader/dronescapes_representations.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
"""Dronescapes representations -- adds various loading/writing/image showing capabilities to dronescapes tasks"""
|
2 |
from __future__ import annotations
|
3 |
from pathlib import Path
|
|
|
4 |
import numpy as np
|
5 |
import torch as tr
|
6 |
import flow_vis
|
@@ -15,8 +16,8 @@ except ImportError:
|
|
15 |
from .npz_representation import NpzRepresentation
|
16 |
|
17 |
class RGBRepresentation(NpzRepresentation):
|
18 |
-
def __init__(self,
|
19 |
-
super().__init__(
|
20 |
|
21 |
class HSVRepresentation(RGBRepresentation):
|
22 |
@overrides
|
@@ -25,13 +26,13 @@ class HSVRepresentation(RGBRepresentation):
|
|
25 |
return tr.from_numpy(rgb2hsv(rgb)).float()
|
26 |
|
27 |
class EdgesRepresentation(NpzRepresentation):
|
28 |
-
def __init__(self,
|
29 |
-
super().__init__(
|
30 |
|
31 |
class DepthRepresentation(NpzRepresentation):
|
32 |
-
"""DepthRepresentation. Implements depth task-specific stuff, like
|
33 |
-
def __init__(self, name: str, min_depth: float, max_depth: float):
|
34 |
-
super().__init__(name, n_channels=1)
|
35 |
self.min_depth = min_depth
|
36 |
self.max_depth = max_depth
|
37 |
|
@@ -51,13 +52,13 @@ class DepthRepresentation(NpzRepresentation):
|
|
51 |
return y.astype(np.uint8)
|
52 |
|
53 |
class NormalsRepresentation(NpzRepresentation):
|
54 |
-
def __init__(self,
|
55 |
-
super().__init__(
|
56 |
|
57 |
class OpticalFlowRepresentation(NpzRepresentation):
|
58 |
-
"""OpticalFlowRepresentation. Implements
|
59 |
-
def __init__(self,
|
60 |
-
super().__init__(
|
61 |
|
62 |
@overrides
|
63 |
def plot_fn(self, x: tr.Tensor) -> np.ndarray:
|
@@ -66,7 +67,7 @@ class OpticalFlowRepresentation(NpzRepresentation):
|
|
66 |
return flow_vis.flow_to_color(x)
|
67 |
|
68 |
class SemanticRepresentation(NpzRepresentation):
|
69 |
-
"""SemanticRepresentation. Implements
|
70 |
def __init__(self, *args, classes: int | list[str], color_map: list[tuple[int, int, int]], **kwargs):
|
71 |
self.n_classes = len(list(range(classes)) if isinstance(classes, int) else classes)
|
72 |
super().__init__(*args, **kwargs, n_channels=self.n_classes)
|
@@ -92,6 +93,35 @@ class SemanticRepresentation(NpzRepresentation):
|
|
92 |
new_images[x_argmax == i] = self.color_map[i]
|
93 |
return new_images
|
94 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
95 |
_color_map = [[0, 255, 0], [0, 127, 0], [255, 255, 0], [255, 255, 255],
|
96 |
[255, 0, 0], [0, 0, 255], [0, 255, 255], [127, 127, 63]]
|
97 |
coco_classes = ["person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light",
|
@@ -156,7 +186,7 @@ mapillary_color_map = [[165, 42, 42], [0, 192, 0], [196, 196, 196], [190, 153, 1
|
|
156 |
|
157 |
dronescapes_task_types = { # some pre-baked representations
|
158 |
"rgb": RGBRepresentation("rgb"),
|
159 |
-
"hsv": HSVRepresentation("hsv"),
|
160 |
"edges_dexined": EdgesRepresentation("edges_dexined"),
|
161 |
"edges_gb": EdgesRepresentation("edges_gb"),
|
162 |
"depth_dpt": DepthRepresentation("depth_dpt", min_depth=0, max_depth=0.999),
|
|
|
1 |
"""Dronescapes representations -- adds various loading/writing/image showing capabilities to dronescapes tasks"""
|
2 |
from __future__ import annotations
|
3 |
from pathlib import Path
|
4 |
+
from typing import Callable
|
5 |
import numpy as np
|
6 |
import torch as tr
|
7 |
import flow_vis
|
|
|
16 |
from .npz_representation import NpzRepresentation
|
17 |
|
18 |
class RGBRepresentation(NpzRepresentation):
|
19 |
+
def __init__(self, *args, **kwargs):
|
20 |
+
super().__init__(*args, n_channels=3, **kwargs)
|
21 |
|
22 |
class HSVRepresentation(RGBRepresentation):
|
23 |
@overrides
|
|
|
26 |
return tr.from_numpy(rgb2hsv(rgb)).float()
|
27 |
|
28 |
class EdgesRepresentation(NpzRepresentation):
|
29 |
+
def __init__(self, *args, **kwargs):
|
30 |
+
super().__init__(*args, n_channels=1, **kwargs)
|
31 |
|
32 |
class DepthRepresentation(NpzRepresentation):
|
33 |
+
"""DepthRepresentation. Implements depth task-specific stuff, like spectral map for plots."""
|
34 |
+
def __init__(self, name: str, min_depth: float, max_depth: float, *args, **kwargs):
|
35 |
+
super().__init__(name, n_channels=1, *args, **kwargs)
|
36 |
self.min_depth = min_depth
|
37 |
self.max_depth = max_depth
|
38 |
|
|
|
52 |
return y.astype(np.uint8)
|
53 |
|
54 |
class NormalsRepresentation(NpzRepresentation):
|
55 |
+
def __init__(self, *args, **kwargs):
|
56 |
+
super().__init__(*args, n_channels=3, **kwargs)
|
57 |
|
58 |
class OpticalFlowRepresentation(NpzRepresentation):
|
59 |
+
"""OpticalFlowRepresentation. Implements flow task-specific stuff, like using flow_vis."""
|
60 |
+
def __init__(self, *args, **kwargs):
|
61 |
+
super().__init__(*args, n_channels=2, **kwargs)
|
62 |
|
63 |
@overrides
|
64 |
def plot_fn(self, x: tr.Tensor) -> np.ndarray:
|
|
|
67 |
return flow_vis.flow_to_color(x)
|
68 |
|
69 |
class SemanticRepresentation(NpzRepresentation):
|
70 |
+
"""SemanticRepresentation. Implements semantic task-specific stuff, like argmaxing if needed"""
|
71 |
def __init__(self, *args, classes: int | list[str], color_map: list[tuple[int, int, int]], **kwargs):
|
72 |
self.n_classes = len(list(range(classes)) if isinstance(classes, int) else classes)
|
73 |
super().__init__(*args, **kwargs, n_channels=self.n_classes)
|
|
|
93 |
new_images[x_argmax == i] = self.color_map[i]
|
94 |
return new_images
|
95 |
|
96 |
+
# class SemanticMapper(SemanticRepresentation):
|
97 |
+
# """Maps one or more semantic segmentations to a final one + a merge fn. Copy-pasta from VRE"""
|
98 |
+
# def __init__(*args, original_classes: list[list[str]], mapping: list[dict[str, list[str]]],
|
99 |
+
# color_map: list[tuple[int, int, int]],
|
100 |
+
# merge_fn: Callable[[list[np.ndarray]], np.ndarray] | None = None, **kwargs):
|
101 |
+
# super().__init__(*args, classes=list(mapping[0].keys()), color_map=color_map, **kwargs)
|
102 |
+
# assert len(self.dependencies) >= 1, "No dependencies provided. Need at least one semantic segmentation to map."
|
103 |
+
# assert isinstance(mapping, list), type(mapping)
|
104 |
+
# assert len(mapping) == (B := len(self.dependencies)), (len(mapping), B)
|
105 |
+
# assert (A := len(original_classes)) == len(self.dependencies), (A, B)
|
106 |
+
# assert all(m.keys() == mapping[0].keys() for m in mapping), [list(m.keys()) for m in mapping]
|
107 |
+
# assert len(color_map) == len(mapping[0].keys()), (len(color_map), len(mapping[0].keys()))
|
108 |
+
# self.original_classes = original_classes
|
109 |
+
# self.mapping = mapping
|
110 |
+
|
111 |
+
# def _make_one(self, semantic_dep_data: np.ndarray, mapping: dict[str, list[str]],
|
112 |
+
# original_classes: list[str]) -> np.ndarray:
|
113 |
+
# assert semantic_dep_data.dtype in (np.uint8, np.uint16), semantic_dep_data.dtype
|
114 |
+
# mapping_ix = {list(mapping.keys()).index(k): [original_classes.index(_v)
|
115 |
+
# for _v in v] for k, v in mapping.items()}
|
116 |
+
# flat_mapping = {}
|
117 |
+
# for k, v in mapping_ix.items():
|
118 |
+
# for _v in v:
|
119 |
+
# flat_mapping[_v] = k
|
120 |
+
# mapped_data = np.vectorize(flat_mapping.get)(semantic_dep_data).astype(np.uint8)
|
121 |
+
# return mapped_data
|
122 |
+
|
123 |
+
|
124 |
+
|
125 |
_color_map = [[0, 255, 0], [0, 127, 0], [255, 255, 0], [255, 255, 255],
|
126 |
[255, 0, 0], [0, 0, 255], [0, 255, 255], [127, 127, 63]]
|
127 |
coco_classes = ["person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light",
|
|
|
186 |
|
187 |
dronescapes_task_types = { # some pre-baked representations
|
188 |
"rgb": RGBRepresentation("rgb"),
|
189 |
+
"hsv": HSVRepresentation("hsv", dependencies=["rgb"]),
|
190 |
"edges_dexined": EdgesRepresentation("edges_dexined"),
|
191 |
"edges_gb": EdgesRepresentation("edges_gb"),
|
192 |
"depth_dpt": DepthRepresentation("depth_dpt", min_depth=0, max_depth=0.999),
|
dronescapes_reader/multitask_dataset.py
CHANGED
@@ -62,7 +62,6 @@ class MultiTaskDataset(Dataset):
|
|
62 |
normalization: str | None | dict[str],
|
63 |
handle_missing_data: str = "fill_none",
|
64 |
files_suffix: str = "npz",
|
65 |
-
files_per_repr_overwrites: dict[str, str] | None = None,
|
66 |
cache_task_stats: bool = (os.getenv("STATS_CACHE", "0") == "1"),
|
67 |
batch_size_stats: int = int(os.getenv("STATS_BATCH_SIZE", "1")),
|
68 |
statistics: dict[str, TaskStatistics] | None = None,
|
@@ -74,8 +73,7 @@ class MultiTaskDataset(Dataset):
|
|
74 |
self.path = Path(path).absolute()
|
75 |
self.handle_missing_data = handle_missing_data
|
76 |
self.suffix = files_suffix
|
77 |
-
self.
|
78 |
-
self.files_per_repr, self.file_names = self._build_dataset() # filtered by 'drop' or 'fill_*' logic
|
79 |
self.cache_task_stats = cache_task_stats
|
80 |
self.batch_size_stats = batch_size_stats
|
81 |
|
@@ -213,50 +211,38 @@ class MultiTaskDataset(Dataset):
|
|
213 |
assert not any(len(x) == 0 for x in in_files.values()), f"{ [k for k, v in in_files.items() if len(v) == 0] }"
|
214 |
return in_files
|
215 |
|
216 |
-
def _build_dataset(self) -> BuildDatasetTuple:
|
217 |
logger.debug(f"Building dataset from: '{self.path}'")
|
218 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
219 |
if self.handle_missing_data == "drop":
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
common = common.intersection([f.name for f in in_files[node]])
|
237 |
-
assert len(common) > 0, f"Node '{node}' made the intersection null"
|
238 |
-
common = natsorted(list(common))
|
239 |
-
logger.info(f"Found {len(common)} data points for each node ({len(nodes)} nodes).")
|
240 |
-
files_per_repr = {node: [name_to_node_path[node][x] for x in common] for node in nodes}
|
241 |
-
assert len(files_per_repr) > 0
|
242 |
-
return files_per_repr, common
|
243 |
-
|
244 |
-
def _build_dataset_fill_missing(self, in_files: dict[str, list[Path]]) -> BuildDatasetTuple:
|
245 |
-
name_to_node_path = {k: {_v.name: _v for _v in v} for k, v in in_files.items()}
|
246 |
-
all_files = set(x.name for x in next(iter(in_files.values())))
|
247 |
-
nodes = in_files.keys()
|
248 |
-
for node in (nodes := in_files.keys()):
|
249 |
-
all_files = all_files.union([f.name for f in in_files[node]])
|
250 |
-
all_files = natsorted(list(all_files))
|
251 |
-
logger.info(f"Found {len(all_files)} data points as union of all nodes' data ({len(nodes)} nodes).")
|
252 |
-
|
253 |
-
files_per_repr = {node: [] for node in nodes}
|
254 |
-
for node in nodes:
|
255 |
-
for file_name in all_files:
|
256 |
-
file_path = name_to_node_path[node].get(file_name, None)
|
257 |
-
files_per_repr[node].append(file_path)
|
258 |
-
assert len(files_per_repr) > 0
|
259 |
-
return files_per_repr, all_files
|
260 |
|
261 |
def _compute_statistics(self) -> dict[str, TaskStatistics]:
|
262 |
cache_path = self.path / f".task_statistics.npz"
|
|
|
62 |
normalization: str | None | dict[str],
|
63 |
handle_missing_data: str = "fill_none",
|
64 |
files_suffix: str = "npz",
|
|
|
65 |
cache_task_stats: bool = (os.getenv("STATS_CACHE", "0") == "1"),
|
66 |
batch_size_stats: int = int(os.getenv("STATS_BATCH_SIZE", "1")),
|
67 |
statistics: dict[str, TaskStatistics] | None = None,
|
|
|
73 |
self.path = Path(path).absolute()
|
74 |
self.handle_missing_data = handle_missing_data
|
75 |
self.suffix = files_suffix
|
76 |
+
self.files_per_repr, self.file_names = self._build_dataset(task_types, task_names) # filtered by 'drop' or 'fill_*' logic
|
|
|
77 |
self.cache_task_stats = cache_task_stats
|
78 |
self.batch_size_stats = batch_size_stats
|
79 |
|
|
|
211 |
assert not any(len(x) == 0 for x in in_files.values()), f"{ [k for k, v in in_files.items() if len(v) == 0] }"
|
212 |
return in_files
|
213 |
|
214 |
+
def _build_dataset(self, task_types: dict[str, NpzRepresentation], task_names: list[str] | None) -> BuildDatasetTuple:
|
215 |
logger.debug(f"Building dataset from: '{self.path}'")
|
216 |
+
all_npz_files = self._get_all_npz_files()
|
217 |
+
all_files: dict[str, dict[str, str]] = {k: {_v.name: _v for _v in v} for k, v in all_npz_files.items()}
|
218 |
+
task_names: list[str] = list(all_files.keys()) if task_names is None else task_names
|
219 |
+
relevant_tasks_for_files = set() # hsv requires only rgb, so we look at dependencies later on
|
220 |
+
for task_name in task_names:
|
221 |
+
relevant_tasks_for_files.update(task_types[task_name].dependencies)
|
222 |
+
assert (diff := relevant_tasks_for_files.difference(all_files)) == set(), diff
|
223 |
+
names_to_tasks: dict[str, list[str]] = {} # {name: [task]}
|
224 |
+
for task_name in relevant_tasks_for_files: # just the relevant tasks
|
225 |
+
for path_name in all_files[task_name].keys():
|
226 |
+
names_to_tasks.setdefault(path_name, [])
|
227 |
+
names_to_tasks[path_name].append(task_name)
|
228 |
+
|
229 |
if self.handle_missing_data == "drop":
|
230 |
+
b4 = len(names_to_tasks)
|
231 |
+
names_to_tasks = {k: v for k, v in names_to_tasks if len(v) == len(relevant_tasks_for_files)}
|
232 |
+
logger.debug(f"Dropped {b4 - len(names_to_tasks)} files not in all tasks")
|
233 |
+
all_names: list[str] = natsorted(names_to_tasks.keys())
|
234 |
+
logger.info(f"Total files: {len(names_to_tasks)} per task across {len(task_names)} tasks")
|
235 |
+
|
236 |
+
files_per_task: dict[str, list[str | None] | list[list[str] | None]] = {task: [] for task in task_names}
|
237 |
+
for name in all_names:
|
238 |
+
for task in task_names:
|
239 |
+
all_deps_exist = set(deps := task_types[task].dependencies).issubset(names_to_tasks[name])
|
240 |
+
if not all_deps_exist:
|
241 |
+
files_per_task[task].append(None) # if any of the deps don't exist for this task, skip it.
|
242 |
+
else:
|
243 |
+
paths = [all_files[dep][name] for dep in deps]
|
244 |
+
files_per_task[task].append(paths if len(deps) > 1 else paths[0])
|
245 |
+
return files_per_task, all_names
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
246 |
|
247 |
def _compute_statistics(self) -> dict[str, TaskStatistics]:
|
248 |
cache_path = self.path / f".task_statistics.npz"
|
dronescapes_reader/npz_representation.py
CHANGED
@@ -6,9 +6,10 @@ import torch as tr
|
|
6 |
|
7 |
class NpzRepresentation:
|
8 |
"""Generic Task with data read from/saved to npz files. Tries to read data as-is from disk and store it as well"""
|
9 |
-
def __init__(self, name: str, n_channels: int):
|
10 |
self.name = name
|
11 |
self.n_channels = n_channels
|
|
|
12 |
self.classes: list[str] | None = None
|
13 |
self.normalization: str | None = None
|
14 |
self.min: tr.Tensor | None = None
|
|
|
6 |
|
7 |
class NpzRepresentation:
|
8 |
"""Generic Task with data read from/saved to npz files. Tries to read data as-is from disk and store it as well"""
|
9 |
+
def __init__(self, name: str, n_channels: int, dependencies: list[str] | None = None):
|
10 |
self.name = name
|
11 |
self.n_channels = n_channels
|
12 |
+
self.dependencies: list[str] = [name] if dependencies is None else dependencies
|
13 |
self.classes: list[str] | None = None
|
14 |
self.normalization: str | None = None
|
15 |
self.min: tr.Tensor | None = None
|
scripts/dronescapes_viewer.ipynb
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
scripts/dronescapes_viewer.py
CHANGED
@@ -9,9 +9,10 @@ from torch.utils.data import DataLoader
|
|
9 |
import random
|
10 |
|
11 |
def main():
|
|
|
12 |
reader = MultiTaskDataset(sys.argv[1], task_names=list(dronescapes_task_types.keys()),
|
13 |
task_types=dronescapes_task_types,
|
14 |
-
|
15 |
normalization="min_max", cache_task_stats=True)
|
16 |
print(reader)
|
17 |
|
|
|
9 |
import random
|
10 |
|
11 |
def main():
|
12 |
+
assert len(sys.argv) == 2, f"Usage ./dronescapes_viewer.py /path/to/dataset"
|
13 |
reader = MultiTaskDataset(sys.argv[1], task_names=list(dronescapes_task_types.keys()),
|
14 |
task_types=dronescapes_task_types,
|
15 |
+
handle_missing_data="fill_nan",
|
16 |
normalization="min_max", cache_task_stats=True)
|
17 |
print(reader)
|
18 |
|