more updates to dronescapes. Added TaskMapper and more control over mappings. We can do generic dependencies now, not just semantic.
Browse files
dronescapes_reader/dronescapes_representations.py
CHANGED
@@ -93,57 +93,32 @@ class SemanticRepresentation(NpzRepresentation):
|
|
93 |
new_images[x_argmax == i] = self.color_map[i]
|
94 |
return new_images
|
95 |
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
self.
|
113 |
-
self.
|
114 |
-
|
115 |
-
|
116 |
-
@staticmethod
|
117 |
-
def _default_merge_fn(dep_data: list[np.ndarray]) -> np.ndarray:
|
118 |
-
if len(dep_data) > 1:
|
119 |
-
raise ValueError(f"default_merge_fn doesnt' work with >1 dependencies: {len(dep_data)}")
|
120 |
-
return dep_data[0]
|
121 |
-
|
122 |
-
def _make_one(self, path: Path, mapping: dict[str, list[str]],
|
123 |
-
original_classes: list[str]) -> np.ndarray:
|
124 |
-
semantic_dep_data: np.ndarray = NpzRepresentation.load_from_disk(self, path).numpy()
|
125 |
-
semantic_dep_data = semantic_dep_data.argmax(-1) if len(semantic_dep_data.shape) == 3 else semantic_dep_data
|
126 |
-
assert len(semantic_dep_data.shape) == 2, f"Only argmaxed data supported, got: {semantic_dep_data.shape}"
|
127 |
-
assert semantic_dep_data.dtype in (np.uint8, np.uint16), semantic_dep_data.dtype
|
128 |
-
mapping_ix = {list(mapping.keys()).index(k): [original_classes.index(_v)
|
129 |
-
for _v in v] for k, v in mapping.items()}
|
130 |
-
flat_mapping = {}
|
131 |
-
for k, v in mapping_ix.items():
|
132 |
-
for _v in v:
|
133 |
-
flat_mapping[_v] = k
|
134 |
-
mapped_data = np.vectorize(flat_mapping.get)(semantic_dep_data).astype(np.uint8)
|
135 |
-
return mapped_data
|
136 |
-
|
137 |
-
def load_from_disk(self, path: Path | list[Path]):
|
138 |
-
# note: assuming SemanticRepresentation for all deps. TODO: generic deps.
|
139 |
paths = [path] if isinstance(path, Path) else path
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
res_torch = F.one_hot(tr.from_numpy(res).long(), num_classes=self.n_classes).float()
|
146 |
-
return res_torch
|
147 |
|
148 |
color_map_8classes = [[0, 255, 0], [0, 127, 0], [255, 255, 0], [255, 255, 255],
|
149 |
[255, 0, 0], [0, 0, 255], [0, 255, 255], [127, 127, 63]]
|
@@ -207,27 +182,48 @@ mapillary_color_map = [[165, 42, 42], [0, 192, 0], [196, 196, 196], [190, 153, 1
|
|
207 |
[0, 60, 100], [0, 0, 142], [0, 0, 90], [0, 0, 230], [0, 80, 100], [128, 64, 64], [0, 0, 110],
|
208 |
[0, 0, 70], [0, 0, 192], [32, 32, 32], [120, 10, 10]]
|
209 |
|
210 |
-
|
211 |
-
|
212 |
-
"
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
231 |
EdgesRepresentation("edges_dexined"),
|
232 |
EdgesRepresentation("edges_gb"),
|
233 |
DepthRepresentation("depth_dpt", min_depth=0, max_depth=0.999),
|
@@ -237,12 +233,10 @@ tasks = [ # some pre-baked representations
|
|
237 |
NormalsRepresentation("normals_sfm_manual202204"),
|
238 |
OpticalFlowRepresentation("opticalflow_rife"),
|
239 |
SemanticRepresentation("semantic_segprop8", classes=8, color_map=color_map_8classes),
|
240 |
-
SemanticMapper("semantic_mask2former_swin_mapillary_converted", original_classes=[mapillary_classes],
|
241 |
-
mapping=[m2f_mapillary_to_8_classes], color_map=color_map_8classes,
|
242 |
-
dependencies=["semantic_mask2former_mapillary_49189528_0"]),
|
243 |
SemanticRepresentation("semantic_mask2former_coco_47429163_0", classes=coco_classes, color_map=coco_color_map),
|
244 |
-
SemanticRepresentation("semantic_mask2former_mapillary_49189528_0", classes=mapillary_classes,
|
245 |
-
|
|
|
246 |
NpzRepresentation("softseg_gb", 3),
|
247 |
]
|
248 |
-
dronescapes_task_types: dict[str, NpzRepresentation] = {task.name: task for task in
|
|
|
93 |
new_images[x_argmax == i] = self.color_map[i]
|
94 |
return new_images
|
95 |
|
96 |
+
def semantic_mapper(semantic_original: np.ndarray, mapping: dict[str, list[str]],
|
97 |
+
original_classes: list[str]) -> np.ndarray:
|
98 |
+
"""maps a bigger semantic segmentation to a smaller one"""
|
99 |
+
assert len(semantic_original.shape) == 2, f"Only argmaxed data supported, got: {semantic_original.shape}"
|
100 |
+
assert np.issubdtype(semantic_original.dtype, np.integer), semantic_original.dtype
|
101 |
+
mapping_ix = {list(mapping.keys()).index(k): [original_classes.index(_v) for _v in v] for k, v in mapping.items()}
|
102 |
+
flat_mapping = {}
|
103 |
+
for k, v in mapping_ix.items():
|
104 |
+
for _v in v:
|
105 |
+
flat_mapping[_v] = k
|
106 |
+
mapped_data = np.vectorize(flat_mapping.get)(semantic_original).astype(np.uint8)
|
107 |
+
return mapped_data
|
108 |
+
|
109 |
+
class TaskMapper(NpzRepresentation):
|
110 |
+
def __init__(self, *args, merge_fn: Callable[[list[np.ndarray]], tr.Tensor], **kwargs):
|
111 |
+
super().__init__(*args, **kwargs)
|
112 |
+
assert len(self.dependencies) > 0 and self.dep_names[0] != self.name, "Need at least one dependency"
|
113 |
+
self.merge_fn = merge_fn
|
114 |
+
|
115 |
+
def load_from_disk(self, path: Path | list[Path]) -> tr.Tensor:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
116 |
paths = [path] if isinstance(path, Path) else path
|
117 |
+
dep_data = [dep.load_from_disk(path) for dep, path in zip(self.dependencies, paths)]
|
118 |
+
return self.merge_fn(dep_data)
|
119 |
+
|
120 |
+
def plot_fn(self, x):
|
121 |
+
raise NotImplementedError("Must be overriden by the user")
|
|
|
|
|
122 |
|
123 |
color_map_8classes = [[0, 255, 0], [0, 127, 0], [255, 255, 0], [255, 255, 255],
|
124 |
[255, 0, 0], [0, 0, 255], [0, 255, 255], [127, 127, 63]]
|
|
|
182 |
[0, 60, 100], [0, 0, 142], [0, 0, 90], [0, 0, 230], [0, 80, 100], [128, 64, 64], [0, 0, 110],
|
183 |
[0, 0, 70], [0, 0, 192], [32, 32, 32], [120, 10, 10]]
|
184 |
|
185 |
+
class SemanticMask2FormerMapillaryConvertedPaper(TaskMapper):
|
186 |
+
def __init__(self, dep: NpzRepresentation):
|
187 |
+
super().__init__("semantic_mask2former_swin_mapillary_converted",
|
188 |
+
dependencies=[dep], merge_fn=self._merge_fn, n_channels=8)
|
189 |
+
self.mapping = {
|
190 |
+
"land": ["Terrain", "Sand", "Snow"],
|
191 |
+
"forest": ["Vegetation"],
|
192 |
+
"residential": ["Building", "Utility Pole", "Pole", "Fence", "Wall", "Manhole", "Street Light", "Curb",
|
193 |
+
"Guard Rail", "Caravan", "Junction Box", "Traffic Sign (Front)", "Billboard", "Banner",
|
194 |
+
"Mailbox", "Traffic Sign (Back)", "Bench", "Fire Hydrant", "Trash Can", "CCTV Camera",
|
195 |
+
"Traffic Light", "Barrier", "Rail Track", "Phone Booth", "Curb Cut", "Traffic Sign Frame",
|
196 |
+
"Bike Rack"],
|
197 |
+
"road": ["Road", "Lane Marking - General", "Sidewalk", "Bridge", "Other Vehicle", "Motorcyclist", "Pothole",
|
198 |
+
"Catch Basin", "Car Mount", "Tunnel", "Parking", "Service Lane", "Lane Marking - Crosswalk",
|
199 |
+
"Pedestrian Area", "On Rails", "Bike Lane", "Crosswalk - Plain"],
|
200 |
+
"little-objects": ["Car", "Person", "Truck", "Boat", "Wheeled Slow", "Trailer", "Ground Animal", "Bicycle",
|
201 |
+
"Motorcycle", "Bird", "Bus", "Ego Vehicle", "Bicyclist", "Other Rider"],
|
202 |
+
"water": ["Water"],
|
203 |
+
"sky": ["Sky"],
|
204 |
+
"hill": ["Mountain"]
|
205 |
+
}
|
206 |
+
self.color_map = color_map_8classes
|
207 |
+
self.original_classes = mapillary_classes
|
208 |
+
self.classes = list(self.mapping.keys())
|
209 |
+
self.n_classes = len(self.classes)
|
210 |
+
|
211 |
+
def plot_fn(self, x: tr.Tensor) -> np.ndarray:
|
212 |
+
x_argmax = x.squeeze().nan_to_num(0).detach().argmax(-1).cpu().numpy()
|
213 |
+
new_images = np.zeros((*x_argmax.shape, 3), dtype=np.uint8)
|
214 |
+
for i in range(self.n_classes):
|
215 |
+
new_images[x_argmax == i] = self.color_map[i]
|
216 |
+
return new_images
|
217 |
+
|
218 |
+
def _merge_fn(self, dep_data: list[np.ndarray]) -> tr.Tensor:
|
219 |
+
m2f_mapillary = dep_data[0].argmax(-1).numpy()
|
220 |
+
m2f_mapillary_converted = semantic_mapper(m2f_mapillary, self.mapping, self.original_classes)
|
221 |
+
converted_oh = F.one_hot(tr.from_numpy(m2f_mapillary_converted).long(), num_classes=self.n_classes).float()
|
222 |
+
return converted_oh
|
223 |
+
|
224 |
+
_tasks: list[NpzRepresentation] = [ # some pre-baked representations
|
225 |
+
rgb := RGBRepresentation("rgb"),
|
226 |
+
HSVRepresentation("hsv", dependencies=[rgb]),
|
227 |
EdgesRepresentation("edges_dexined"),
|
228 |
EdgesRepresentation("edges_gb"),
|
229 |
DepthRepresentation("depth_dpt", min_depth=0, max_depth=0.999),
|
|
|
233 |
NormalsRepresentation("normals_sfm_manual202204"),
|
234 |
OpticalFlowRepresentation("opticalflow_rife"),
|
235 |
SemanticRepresentation("semantic_segprop8", classes=8, color_map=color_map_8classes),
|
|
|
|
|
|
|
236 |
SemanticRepresentation("semantic_mask2former_coco_47429163_0", classes=coco_classes, color_map=coco_color_map),
|
237 |
+
m2f_mapillary := SemanticRepresentation("semantic_mask2former_mapillary_49189528_0", classes=mapillary_classes,
|
238 |
+
color_map=mapillary_color_map),
|
239 |
+
SemanticMask2FormerMapillaryConvertedPaper(m2f_mapillary),
|
240 |
NpzRepresentation("softseg_gb", 3),
|
241 |
]
|
242 |
+
dronescapes_task_types: dict[str, NpzRepresentation] = {task.name: task for task in _tasks}
|
dronescapes_reader/multitask_dataset.py
CHANGED
@@ -229,8 +229,9 @@ class MultiTaskDataset(Dataset):
|
|
229 |
|
230 |
relevant_tasks_for_files = set() # hsv requires only rgb, so we look at dependencies later on
|
231 |
for task_name in task_names:
|
232 |
-
relevant_tasks_for_files.update(task_types[task_name].
|
233 |
-
|
|
|
234 |
names_to_tasks: dict[str, list[str]] = {} # {name: [task]}
|
235 |
for task_name in relevant_tasks_for_files: # just the relevant tasks
|
236 |
for path_name in all_files[task_name].keys():
|
@@ -247,7 +248,7 @@ class MultiTaskDataset(Dataset):
|
|
247 |
files_per_task: dict[str, list[str | None] | list[list[str] | None]] = {task: [] for task in task_names}
|
248 |
for name in all_names:
|
249 |
for task in task_names:
|
250 |
-
all_deps_exist = set(deps := task_types[task].
|
251 |
if not all_deps_exist:
|
252 |
files_per_task[task].append(None) # if any of the deps don't exist for this task, skip it.
|
253 |
else:
|
|
|
229 |
|
230 |
relevant_tasks_for_files = set() # hsv requires only rgb, so we look at dependencies later on
|
231 |
for task_name in task_names:
|
232 |
+
relevant_tasks_for_files.update(task_types[task_name].dep_names)
|
233 |
+
if (diff := relevant_tasks_for_files.difference(all_files)) != set():
|
234 |
+
raise FileNotFoundError(f"Missing files for {diff}.\nFound on disk: {[*all_files]}")
|
235 |
names_to_tasks: dict[str, list[str]] = {} # {name: [task]}
|
236 |
for task_name in relevant_tasks_for_files: # just the relevant tasks
|
237 |
for path_name in all_files[task_name].keys():
|
|
|
248 |
files_per_task: dict[str, list[str | None] | list[list[str] | None]] = {task: [] for task in task_names}
|
249 |
for name in all_names:
|
250 |
for task in task_names:
|
251 |
+
all_deps_exist = set(deps := task_types[task].dep_names).issubset(names_to_tasks[name])
|
252 |
if not all_deps_exist:
|
253 |
files_per_task[task].append(None) # if any of the deps don't exist for this task, skip it.
|
254 |
else:
|
dronescapes_reader/npz_representation.py
CHANGED
@@ -6,10 +6,12 @@ import torch as tr
|
|
6 |
|
7 |
class NpzRepresentation:
|
8 |
"""Generic Task with data read from/saved to npz files. Tries to read data as-is from disk and store it as well"""
|
9 |
-
def __init__(self, name: str, n_channels: int, dependencies: list[
|
10 |
self.name = name
|
11 |
self.n_channels = n_channels
|
12 |
-
|
|
|
|
|
13 |
self.classes: list[str] | None = None
|
14 |
self.normalization: str | None = None
|
15 |
self.min: tr.Tensor | None = None
|
@@ -22,6 +24,11 @@ class NpzRepresentation:
|
|
22 |
"""if we have self.classes"""
|
23 |
return self.classes is not None
|
24 |
|
|
|
|
|
|
|
|
|
|
|
25 |
def set_normalization(self, normalization: str, stats: tuple[tr.Tensor, tr.Tensor, tr.Tensor, tr.Tensor]):
|
26 |
"""sets the normalization"""
|
27 |
assert normalization in ("min_max", "standardization"), normalization
|
|
|
6 |
|
7 |
class NpzRepresentation:
|
8 |
"""Generic Task with data read from/saved to npz files. Tries to read data as-is from disk and store it as well"""
|
9 |
+
def __init__(self, name: str, n_channels: int, dependencies: list[NpzRepresentation] | None = None):
|
10 |
self.name = name
|
11 |
self.n_channels = n_channels
|
12 |
+
dependencies = deps = [self] if dependencies is None else dependencies
|
13 |
+
self.dependencies: list[NpzRepresentation] = dependencies
|
14 |
+
assert all(isinstance(dep, NpzRepresentation) for dep in deps), f"{self}: {dict(zip(deps, map(type, deps)))}"
|
15 |
self.classes: list[str] | None = None
|
16 |
self.normalization: str | None = None
|
17 |
self.min: tr.Tensor | None = None
|
|
|
24 |
"""if we have self.classes"""
|
25 |
return self.classes is not None
|
26 |
|
27 |
+
@property
|
28 |
+
def dep_names(self) -> list[str]:
|
29 |
+
"""The names of the dependencies of this representation"""
|
30 |
+
return [dep.name for dep in self.dependencies]
|
31 |
+
|
32 |
def set_normalization(self, normalization: str, stats: tuple[tr.Tensor, tr.Tensor, tr.Tensor, tr.Tensor]):
|
33 |
"""sets the normalization"""
|
34 |
assert normalization in ("min_max", "standardization"), normalization
|
scripts/semantic_mapper.ipynb
CHANGED
The diff for this file is too large to render.
See raw diff
|
|