almost done
Browse files
dronescapes_reader/dronescapes_representations.py
CHANGED
@@ -93,37 +93,56 @@ class SemanticRepresentation(NpzRepresentation):
|
|
93 |
new_images[x_argmax == i] = self.color_map[i]
|
94 |
return new_images
|
95 |
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
123 |
|
124 |
-
|
125 |
-
|
126 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
127 |
coco_classes = ["person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light",
|
128 |
"fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow",
|
129 |
"elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee",
|
@@ -184,24 +203,41 @@ mapillary_color_map = [[165, 42, 42], [0, 192, 0], [196, 196, 196], [190, 153, 1
|
|
184 |
[0, 60, 100], [0, 0, 142], [0, 0, 90], [0, 0, 230], [0, 80, 100], [128, 64, 64], [0, 0, 110],
|
185 |
[0, 0, 70], [0, 0, 192], [32, 32, 32], [120, 10, 10]]
|
186 |
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
93 |
new_images[x_argmax == i] = self.color_map[i]
|
94 |
return new_images
|
95 |
|
96 |
+
class SemanticMapper(SemanticRepresentation):
|
97 |
+
"""Maps one or more semantic segmentations to a final one + a merge fn. Copy-pasta from VRE"""
|
98 |
+
def __init__(self, *args, original_classes: list[list[str]], mapping: list[dict[str, list[str]]],
|
99 |
+
color_map: list[tuple[int, int, int]],
|
100 |
+
merge_fn: Callable[[list[np.ndarray]], np.ndarray] | None = None, **kwargs):
|
101 |
+
super().__init__(*args, classes=list(mapping[0].keys()), color_map=color_map, **kwargs)
|
102 |
+
assert len(self.dependencies) >= 1, "No dependencies provided. Need at least one semantic segmentation to map."
|
103 |
+
assert isinstance(mapping, list), type(mapping)
|
104 |
+
assert len(mapping) == (B := len(self.dependencies)), (len(mapping), B)
|
105 |
+
assert (A := len(original_classes)) == len(self.dependencies), (A, B)
|
106 |
+
assert all(m.keys() == mapping[0].keys() for m in mapping), [list(m.keys()) for m in mapping]
|
107 |
+
assert len(color_map) == len(mapping[0].keys()), (len(color_map), len(mapping[0].keys()))
|
108 |
+
self.original_classes = original_classes
|
109 |
+
self.mapping = mapping
|
110 |
+
self.merge_fn = merge_fn if merge_fn is not None else SemanticMapper._default_merge_fn
|
111 |
+
|
112 |
+
@staticmethod
|
113 |
+
def _default_merge_fn(dep_data: list[np.ndarray]) -> np.ndarray:
|
114 |
+
if len(dep_data) > 1:
|
115 |
+
raise ValueError(f"default_merge_fn doesnt' work with >1 dependencies: {len(dep_data)}")
|
116 |
+
return dep_data[0]
|
117 |
+
|
118 |
+
def _make_one(self, path: Path, mapping: dict[str, list[str]],
|
119 |
+
original_classes: list[str]) -> np.ndarray:
|
120 |
+
semantic_dep_data: np.ndarray = NpzRepresentation.load_from_disk(self, path).numpy()
|
121 |
+
semantic_dep_data = semantic_dep_data.argmax(-1) if len(semantic_dep_data.shape) == 3 else semantic_dep_data
|
122 |
+
assert len(semantic_dep_data.shape) == 2, f"Only argmaxed data supported, got: {semantic_dep_data.shape}"
|
123 |
+
assert semantic_dep_data.dtype in (np.uint8, np.uint16), semantic_dep_data.dtype
|
124 |
+
mapping_ix = {list(mapping.keys()).index(k): [original_classes.index(_v)
|
125 |
+
for _v in v] for k, v in mapping.items()}
|
126 |
+
flat_mapping = {}
|
127 |
+
for k, v in mapping_ix.items():
|
128 |
+
for _v in v:
|
129 |
+
flat_mapping[_v] = k
|
130 |
+
mapped_data = np.vectorize(flat_mapping.get)(semantic_dep_data).astype(np.uint8)
|
131 |
+
return mapped_data
|
132 |
|
133 |
+
def load_from_disk(self, path: Path | list[Path]):
|
134 |
+
# note: assuming SemanticRepresentation for all deps. TODO: generic deps.
|
135 |
+
paths = [path] if isinstance(path, Path) else path
|
136 |
+
assert len(paths) == len(self.dependencies), (len(path), len(self.dependencies))
|
137 |
+
individual_semantics = []
|
138 |
+
for path, mapping, original_classes in zip(paths, self.mapping, self.original_classes):
|
139 |
+
individual_semantics.append(self._make_one(path, mapping, original_classes))
|
140 |
+
res = self.merge_fn(individual_semantics)
|
141 |
+
res_torch = F.one_hot(tr.from_numpy(res).long(), num_classes=self.n_classes).float()
|
142 |
+
return res_torch
|
143 |
+
|
144 |
+
color_map_8classes = [[0, 255, 0], [0, 127, 0], [255, 255, 0], [255, 255, 255],
|
145 |
+
[255, 0, 0], [0, 0, 255], [0, 255, 255], [127, 127, 63]]
|
146 |
coco_classes = ["person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light",
|
147 |
"fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow",
|
148 |
"elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee",
|
|
|
203 |
[0, 60, 100], [0, 0, 142], [0, 0, 90], [0, 0, 230], [0, 80, 100], [128, 64, 64], [0, 0, 110],
|
204 |
[0, 0, 70], [0, 0, 192], [32, 32, 32], [120, 10, 10]]
|
205 |
|
206 |
+
m2f_mapillary_to_8_classes = {
|
207 |
+
"land": ["Terrain", "Sand", "Snow"],
|
208 |
+
"forest": ["Vegetation"],
|
209 |
+
"residential": ["Building", "Utility Pole", "Pole", "Fence", "Wall", "Manhole", "Street Light", "Curb",
|
210 |
+
"Guard Rail", "Caravan", "Junction Box", "Traffic Sign (Front)", "Billboard", "Banner",
|
211 |
+
"Mailbox", "Traffic Sign (Back)", "Bench", "Fire Hydrant", "Trash Can", "CCTV Camera",
|
212 |
+
"Traffic Light", "Barrier", "Rail Track", "Phone Booth", "Curb Cut", "Traffic Sign Frame",
|
213 |
+
"Bike Rack"],
|
214 |
+
"road": ["Road", "Lane Marking - General", "Sidewalk", "Bridge", "Other Vehicle", "Motorcyclist", "Pothole",
|
215 |
+
"Catch Basin", "Car Mount", "Tunnel", "Parking", "Service Lane", "Lane Marking - Crosswalk",
|
216 |
+
"Pedestrian Area", "On Rails", "Bike Lane", "Crosswalk - Plain"],
|
217 |
+
"little-objects": ["Car", "Person", "Truck", "Boat", "Wheeled Slow", "Trailer", "Ground Animal", "Bicycle",
|
218 |
+
"Motorcycle", "Bird", "Bus", "Ego Vehicle", "Bicyclist", "Other Rider"],
|
219 |
+
"water": ["Water"],
|
220 |
+
"sky": ["Sky"],
|
221 |
+
"hill": ["Mountain"]
|
222 |
+
}
|
223 |
+
|
224 |
+
tasks = [ # some pre-baked representations
|
225 |
+
RGBRepresentation("rgb"),
|
226 |
+
HSVRepresentation("hsv", dependencies=["rgb"]),
|
227 |
+
EdgesRepresentation("edges_dexined"),
|
228 |
+
EdgesRepresentation("edges_gb"),
|
229 |
+
DepthRepresentation("depth_dpt", min_depth=0, max_depth=0.999),
|
230 |
+
DepthRepresentation("depth_sfm_manual202204", min_depth=0, max_depth=300),
|
231 |
+
DepthRepresentation("depth_ufo", min_depth=0, max_depth=1),
|
232 |
+
DepthRepresentation("depth_marigold", min_depth=0, max_depth=1),
|
233 |
+
NormalsRepresentation("normals_sfm_manual202204"),
|
234 |
+
OpticalFlowRepresentation("opticalflow_rife"),
|
235 |
+
SemanticRepresentation("semantic_segprop8", classes=8, color_map=color_map_8classes),
|
236 |
+
SemanticMapper("semantic_mask2former_swin_mapillary_converted", original_classes=[mapillary_classes],
|
237 |
+
mapping=[m2f_mapillary_to_8_classes], color_map=color_map_8classes),
|
238 |
+
SemanticRepresentation("semantic_mask2former_coco_47429163_0", classes=coco_classes, color_map=coco_color_map),
|
239 |
+
SemanticRepresentation("semantic_mask2former_mapillary_49189528_0", classes=mapillary_classes,
|
240 |
+
color_map=mapillary_color_map),
|
241 |
+
NpzRepresentation("softseg_gb", 3),
|
242 |
+
]
|
243 |
+
dronescapes_task_types: dict[str, NpzRepresentation] = {task.name: task for task in tasks}
|
dronescapes_reader/multitask_dataset.py
CHANGED
@@ -41,7 +41,6 @@ class MultiTaskDataset(Dataset):
|
|
41 |
- 'drop': Drop the data point if any of the representations is missing.
|
42 |
- 'fill_{none,zero,nan}': Fill the missing data with Nones, zeros or NaNs.
|
43 |
- files_suffix: What suffix to look for when creating the dataset. Valid values: 'npy' or 'npz'.
|
44 |
-
- files_per_repr_overwrites: A dictionay {src: target} that maps one task to another's data (i.e. {'hsv': 'rgb'})
|
45 |
- cache_task_stats: If set to True, the statistics will be cached at '{path}/.task_statistics.npz'. Can be enabled
|
46 |
using the environmental variable STATS_CACHE=1. Defaults to False.
|
47 |
- batch_size_stats: Controls the batch size during statistics computation. Can be enabled by environmental variable
|
@@ -211,7 +210,8 @@ class MultiTaskDataset(Dataset):
|
|
211 |
assert not any(len(x) == 0 for x in in_files.values()), f"{ [k for k, v in in_files.items() if len(v) == 0] }"
|
212 |
return in_files
|
213 |
|
214 |
-
def _build_dataset(self, task_types: dict[str, NpzRepresentation],
|
|
|
215 |
logger.debug(f"Building dataset from: '{self.path}'")
|
216 |
all_npz_files = self._get_all_npz_files()
|
217 |
all_files: dict[str, dict[str, str]] = {k: {_v.name: _v for _v in v} for k, v in all_npz_files.items()}
|
|
|
41 |
- 'drop': Drop the data point if any of the representations is missing.
|
42 |
- 'fill_{none,zero,nan}': Fill the missing data with Nones, zeros or NaNs.
|
43 |
- files_suffix: What suffix to look for when creating the dataset. Valid values: 'npy' or 'npz'.
|
|
|
44 |
- cache_task_stats: If set to True, the statistics will be cached at '{path}/.task_statistics.npz'. Can be enabled
|
45 |
using the environmental variable STATS_CACHE=1. Defaults to False.
|
46 |
- batch_size_stats: Controls the batch size during statistics computation. Can be enabled by environmental variable
|
|
|
210 |
assert not any(len(x) == 0 for x in in_files.values()), f"{ [k for k, v in in_files.items() if len(v) == 0] }"
|
211 |
return in_files
|
212 |
|
213 |
+
def _build_dataset(self, task_types: dict[str, NpzRepresentation],
|
214 |
+
task_names: list[str] | None) -> BuildDatasetTuple:
|
215 |
logger.debug(f"Building dataset from: '{self.path}'")
|
216 |
all_npz_files = self._get_all_npz_files()
|
217 |
all_files: dict[str, dict[str, str]] = {k: {_v.name: _v for _v in v} for k, v in all_npz_files.items()}
|
scripts/dronescapes_viewer.py
CHANGED
@@ -11,9 +11,8 @@ import random
|
|
11 |
def main():
|
12 |
assert len(sys.argv) == 2, f"Usage ./dronescapes_viewer.py /path/to/dataset"
|
13 |
reader = MultiTaskDataset(sys.argv[1], task_names=list(dronescapes_task_types.keys()),
|
14 |
-
|
15 |
-
|
16 |
-
normalization="min_max", cache_task_stats=True)
|
17 |
print(reader)
|
18 |
|
19 |
print("== Shapes ==")
|
|
|
11 |
def main():
|
12 |
assert len(sys.argv) == 2, f"Usage ./dronescapes_viewer.py /path/to/dataset"
|
13 |
reader = MultiTaskDataset(sys.argv[1], task_names=list(dronescapes_task_types.keys()),
|
14 |
+
task_types=dronescapes_task_types, handle_missing_data="fill_nan",
|
15 |
+
normalization="min_max", cache_task_stats=True)
|
|
|
16 |
print(reader)
|
17 |
|
18 |
print("== Shapes ==")
|