refactored all code and will soon include global stats as well
Browse files
dronescapes_reader/dronescapes_representations.py
CHANGED
@@ -4,7 +4,7 @@ from pathlib import Path
|
|
4 |
import numpy as np
|
5 |
import torch as tr
|
6 |
import flow_vis
|
7 |
-
from skimage.color import rgb2hsv
|
8 |
from overrides import overrides
|
9 |
from matplotlib.cm import hot # pylint: disable=no-name-in-module
|
10 |
from torch.nn import functional as F
|
@@ -14,105 +14,54 @@ try:
|
|
14 |
except ImportError:
|
15 |
from .npz_representation import NpzRepresentation
|
16 |
|
17 |
-
class
|
18 |
-
def __init__(self, name: str
|
19 |
-
super().__init__(name)
|
20 |
-
self._n_channels = n_channels
|
21 |
|
|
|
22 |
@overrides
|
23 |
def load_from_disk(self, path: Path) -> tr.Tensor:
|
24 |
-
|
25 |
-
return res.float() / 255
|
26 |
-
|
27 |
-
@overrides
|
28 |
-
def save_to_disk(self, data: tr.Tensor, path: Path):
|
29 |
-
return super().save_to_disk((data * 255).byte(), path)
|
30 |
-
|
31 |
-
@property
|
32 |
-
@overrides
|
33 |
-
def n_channels(self) -> int:
|
34 |
-
return self._n_channels
|
35 |
-
|
36 |
-
class HSVRepresentation(ColorRepresentation):
|
37 |
-
@overrides
|
38 |
-
def load_from_disk(self, path: Path) -> tr.Tensor:
|
39 |
-
rgb = NpzRepresentation.load_from_disk(self, path)
|
40 |
return tr.from_numpy(rgb2hsv(rgb)).float()
|
41 |
|
42 |
-
@overrides
|
43 |
-
def save_to_disk(self, data: tr.Tensor, path: Path):
|
44 |
-
rgb = tr.from_numpy(hsv2rgb(data) * 255).byte()
|
45 |
-
NpzRepresentation.save_to_disk(rgb, path)
|
46 |
-
|
47 |
class EdgesRepresentation(NpzRepresentation):
|
48 |
-
|
49 |
-
|
50 |
-
res = super().load_from_disk(path).float()
|
51 |
-
assert len(res.shape) == 3 and res.shape[-1] == 1
|
52 |
-
return res
|
53 |
-
|
54 |
-
@overrides
|
55 |
-
def plot_fn(self, x: tr.Tensor) -> np.ndarray:
|
56 |
-
return (x.detach().repeat(1, 1, 3) * 255).cpu().numpy().astype(np.uint8)
|
57 |
-
|
58 |
-
@property
|
59 |
-
@overrides
|
60 |
-
def n_channels(self) -> int:
|
61 |
-
return 1
|
62 |
|
63 |
class DepthRepresentation(NpzRepresentation):
|
64 |
"""DepthRepresentation. Implements depth task-specific stuff, like hotmap."""
|
65 |
-
def __init__(self,
|
66 |
-
super().__init__(
|
67 |
-
assert 0 <= min_depth < max_depth, (min_depth, max_depth)
|
68 |
-
self.min_depth = min_depth
|
69 |
-
self.max_depth = max_depth
|
70 |
|
71 |
@overrides
|
72 |
def plot_fn(self, x: tr.Tensor) -> np.ndarray:
|
73 |
x = x.detach().clip(0, 1).squeeze().cpu().numpy()
|
|
|
|
|
74 |
y: np.ndarray = hot(x)[..., 0:3] * 255
|
75 |
return y.astype(np.uint8)
|
76 |
|
77 |
-
@overrides
|
78 |
-
def load_from_disk(self, path: Path) -> tr.Tensor:
|
79 |
-
res = super().load_from_disk(path).squeeze().unsqueeze(-1)
|
80 |
-
res = res.float().clip(self.min_depth, self.max_depth)
|
81 |
-
res = (res - self.min_depth) / (self.max_depth - self.min_depth)
|
82 |
-
return res
|
83 |
-
|
84 |
-
@property
|
85 |
-
@overrides
|
86 |
-
def n_channels(self) -> int:
|
87 |
-
return 1
|
88 |
-
|
89 |
class NormalsRepresentation(NpzRepresentation):
|
90 |
-
|
91 |
-
|
92 |
-
def n_channels(self) -> int:
|
93 |
-
return 3
|
94 |
|
95 |
class OpticalFlowRepresentation(NpzRepresentation):
|
96 |
"""OpticalFlowRepresentation. Implements depth task-specific stuff, like using flow_vis."""
|
97 |
-
|
98 |
-
|
99 |
-
return flow_vis.flow_to_color(x.squeeze().nan_to_num(0).detach().cpu().numpy())
|
100 |
|
101 |
@overrides
|
102 |
-
def
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
@overrides
|
107 |
-
def n_channels(self) -> int:
|
108 |
-
return 2
|
109 |
|
110 |
class SemanticRepresentation(NpzRepresentation):
|
111 |
"""SemanticRepresentation. Implements depth task-specific stuff, like using flow_vis."""
|
112 |
def __init__(self, *args, classes: int | list[str], color_map: list[tuple[int, int, int]], **kwargs):
|
113 |
-
|
|
|
114 |
self.classes = list(range(classes)) if isinstance(classes, int) else classes
|
115 |
-
self.n_classes = len(self.classes)
|
116 |
self.color_map = color_map
|
117 |
assert len(color_map) == self.n_classes and self.n_classes > 1, (color_map, self.n_classes)
|
118 |
|
@@ -128,31 +77,26 @@ class SemanticRepresentation(NpzRepresentation):
|
|
128 |
|
129 |
@overrides
|
130 |
def plot_fn(self, x: tr.Tensor) -> np.ndarray:
|
131 |
-
|
132 |
-
new_images = np.zeros((*
|
133 |
for i in range(self.n_classes):
|
134 |
-
new_images[
|
135 |
return new_images
|
136 |
|
137 |
-
@property
|
138 |
-
@overrides
|
139 |
-
def n_channels(self) -> int:
|
140 |
-
return self.n_classes
|
141 |
-
|
142 |
_color_map = [[0, 255, 0], [0, 127, 0], [255, 255, 0], [255, 255, 255],
|
143 |
[255, 0, 0], [0, 0, 255], [0, 255, 255], [127, 127, 63]]
|
144 |
_m2f_name = "semantic_mask2former_swin_mapillary_converted"
|
145 |
dronescapes_task_types = { # some pre-baked representations
|
146 |
-
"rgb":
|
147 |
-
"hsv": HSVRepresentation("hsv"
|
148 |
-
"edges_dexined":
|
149 |
-
"
|
150 |
-
"
|
|
|
|
|
151 |
"normals_sfm_manual202204": NormalsRepresentation("normals_sfm_manual202204"),
|
152 |
-
"
|
153 |
-
"opticalflow_rife": OpticalFlowRepresentation,
|
154 |
"semantic_segprop8": SemanticRepresentation("semantic_segprop8", classes=8, color_map=_color_map),
|
155 |
_m2f_name: SemanticRepresentation(_m2f_name, classes=8, color_map=_color_map),
|
156 |
-
"softseg_gb":
|
157 |
-
"edges_gb": EdgesRepresentation("edges_gb"),
|
158 |
}
|
|
|
4 |
import numpy as np
|
5 |
import torch as tr
|
6 |
import flow_vis
|
7 |
+
from skimage.color import rgb2hsv
|
8 |
from overrides import overrides
|
9 |
from matplotlib.cm import hot # pylint: disable=no-name-in-module
|
10 |
from torch.nn import functional as F
|
|
|
14 |
except ImportError:
|
15 |
from .npz_representation import NpzRepresentation
|
16 |
|
17 |
+
class RGBRepresentation(NpzRepresentation):
|
18 |
+
def __init__(self, name: str):
|
19 |
+
super().__init__(name, n_channels=3)
|
|
|
20 |
|
21 |
+
class HSVRepresentation(RGBRepresentation):
|
22 |
@overrides
|
23 |
def load_from_disk(self, path: Path) -> tr.Tensor:
|
24 |
+
rgb = super().load_from_disk(path)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
return tr.from_numpy(rgb2hsv(rgb)).float()
|
26 |
|
|
|
|
|
|
|
|
|
|
|
27 |
class EdgesRepresentation(NpzRepresentation):
|
28 |
+
def __init__(self, name: str):
|
29 |
+
super().__init__(name, n_channels=1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
|
31 |
class DepthRepresentation(NpzRepresentation):
|
32 |
"""DepthRepresentation. Implements depth task-specific stuff, like hotmap."""
|
33 |
+
def __init__(self, name: str):
|
34 |
+
super().__init__(name, n_channels=1)
|
|
|
|
|
|
|
35 |
|
36 |
@overrides
|
37 |
def plot_fn(self, x: tr.Tensor) -> np.ndarray:
|
38 |
x = x.detach().clip(0, 1).squeeze().cpu().numpy()
|
39 |
+
_min, _max = np.percentile(x, [5, 95])
|
40 |
+
x = np.nan_to_num((x - _min) / (_max - _min), False, 0, 0, 0).clip(0, 1)
|
41 |
y: np.ndarray = hot(x)[..., 0:3] * 255
|
42 |
return y.astype(np.uint8)
|
43 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
class NormalsRepresentation(NpzRepresentation):
|
45 |
+
def __init__(self, name: str):
|
46 |
+
super().__init__(name, n_channels=3)
|
|
|
|
|
47 |
|
48 |
class OpticalFlowRepresentation(NpzRepresentation):
|
49 |
"""OpticalFlowRepresentation. Implements depth task-specific stuff, like using flow_vis."""
|
50 |
+
def __init__(self, name: str):
|
51 |
+
super().__init__(name, n_channels=2)
|
|
|
52 |
|
53 |
@overrides
|
54 |
+
def plot_fn(self, x: tr.Tensor) -> np.ndarray:
|
55 |
+
_min, _max = x.min(0)[0].min(0)[0], x.max(0)[0].max(0)[0]
|
56 |
+
x = ((x - _min) / (_max - _min)).nan_to_num(0, 0, 0).detach().cpu().numpy()
|
57 |
+
return flow_vis.flow_to_color(x)
|
|
|
|
|
|
|
58 |
|
59 |
class SemanticRepresentation(NpzRepresentation):
|
60 |
"""SemanticRepresentation. Implements depth task-specific stuff, like using flow_vis."""
|
61 |
def __init__(self, *args, classes: int | list[str], color_map: list[tuple[int, int, int]], **kwargs):
|
62 |
+
self.n_classes = len(list(range(classes)) if isinstance(classes, int) else classes)
|
63 |
+
super().__init__(*args, **kwargs, n_channels=self.n_classes)
|
64 |
self.classes = list(range(classes)) if isinstance(classes, int) else classes
|
|
|
65 |
self.color_map = color_map
|
66 |
assert len(color_map) == self.n_classes and self.n_classes > 1, (color_map, self.n_classes)
|
67 |
|
|
|
77 |
|
78 |
@overrides
|
79 |
def plot_fn(self, x: tr.Tensor) -> np.ndarray:
|
80 |
+
x_argmax = x.squeeze().nan_to_num(0).detach().argmax(-1).cpu().numpy()
|
81 |
+
new_images = np.zeros((*x_argmax.shape, 3), dtype=np.uint8)
|
82 |
for i in range(self.n_classes):
|
83 |
+
new_images[x_argmax == i] = self.color_map[i]
|
84 |
return new_images
|
85 |
|
|
|
|
|
|
|
|
|
|
|
86 |
_color_map = [[0, 255, 0], [0, 127, 0], [255, 255, 0], [255, 255, 255],
|
87 |
[255, 0, 0], [0, 0, 255], [0, 255, 255], [127, 127, 63]]
|
88 |
_m2f_name = "semantic_mask2former_swin_mapillary_converted"
|
89 |
dronescapes_task_types = { # some pre-baked representations
|
90 |
+
"rgb": RGBRepresentation("rgb"),
|
91 |
+
"hsv": HSVRepresentation("hsv"),
|
92 |
+
"edges_dexined": EdgesRepresentation("edges_dexined"),
|
93 |
+
"edges_gb": EdgesRepresentation("edges_gb"),
|
94 |
+
"depth_dpt": DepthRepresentation("depth_dpt"),
|
95 |
+
"depth_sfm_manual202204": DepthRepresentation("depth_sfm_manual202204"),
|
96 |
+
"depth_ufo": DepthRepresentation("depth_ufo"),
|
97 |
"normals_sfm_manual202204": NormalsRepresentation("normals_sfm_manual202204"),
|
98 |
+
"opticalflow_rife": OpticalFlowRepresentation("opticalflow_rife"),
|
|
|
99 |
"semantic_segprop8": SemanticRepresentation("semantic_segprop8", classes=8, color_map=_color_map),
|
100 |
_m2f_name: SemanticRepresentation(_m2f_name, classes=8, color_map=_color_map),
|
101 |
+
"softseg_gb": NpzRepresentation("softseg_gb", 3),
|
|
|
102 |
}
|
dronescapes_reader/multitask_dataset.py
CHANGED
@@ -12,6 +12,7 @@ import torch as tr
|
|
12 |
import numpy as np
|
13 |
from torch.utils.data import Dataset, DataLoader
|
14 |
from lovely_tensors import monkey_patch
|
|
|
15 |
|
16 |
try:
|
17 |
from npz_representation import NpzRepresentation
|
@@ -26,13 +27,25 @@ TaskStatistics = Tuple[tr.Tensor, tr.Tensor, tr.Tensor, tr.Tensor] # (min, max,
|
|
26 |
class MultiTaskDataset(Dataset):
|
27 |
"""
|
28 |
MultiTaskDataset implementation. Reads data from npz files and returns them as a dict.
|
29 |
-
|
30 |
Parameters:
|
31 |
- path: Path to the directory containing the npz files.
|
32 |
- task_names: List of tasks that are present in the dataset. If set to None, will infer from the files on disk.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
- handle_missing_data: Modes to handle missing data. Valid options are:
|
34 |
-
- drop: Drop the data point if any of the representations is missing.
|
35 |
-
-
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
|
37 |
Expected directory structure:
|
38 |
path/
|
@@ -43,40 +56,60 @@ class MultiTaskDataset(Dataset):
|
|
43 |
Names can be in a different format (i.e. 2022-01-01.npz), but must be consistent and equal across all tasks.
|
44 |
"""
|
45 |
|
46 |
-
def __init__(self, path: Path,
|
47 |
-
|
|
|
|
|
|
|
|
|
48 |
files_per_repr_overwrites: dict[str, str] | None = None,
|
49 |
-
|
|
|
|
|
|
|
50 |
assert Path(path).exists(), f"Provided path '{path}' doesn't exist!"
|
51 |
assert handle_missing_data in ("drop", "fill_none", "fill_zero", "fill_nan"), \
|
52 |
f"Invalid handle_missing_data mode: {handle_missing_data}"
|
53 |
assert files_suffix == "npz", "Only npz supported right now (though trivial to update)"
|
|
|
54 |
self.path = Path(path).absolute()
|
|
|
55 |
self.handle_missing_data = handle_missing_data
|
56 |
self.suffix = files_suffix
|
57 |
self.files_per_repr_overwrites = files_per_repr_overwrites
|
58 |
-
self.
|
59 |
-
self.
|
60 |
-
|
61 |
-
logger.debug("No explicit task types. Defaulting all of them to NpzRepresentation.")
|
62 |
-
task_types = {}
|
63 |
|
64 |
if task_names is None:
|
65 |
task_names = list(self.files_per_repr.keys())
|
66 |
logger.debug(f"No explicit tasks provided. Using all of them as read from the paths ({len(task_names)}).")
|
67 |
-
assert all(task in self.files_per_repr for task in task_names), (task_names, self.files_per_repr.keys())
|
68 |
-
self.task_types = {k: task_types.get(k, NpzRepresentation) for k in task_names}
|
69 |
assert all(isinstance(x, str) for x in task_names), tuple(zip(task_names, (type(x) for x in task_names)))
|
|
|
|
|
70 |
self.task_names = sorted(task_names)
|
|
|
|
|
71 |
self._data_shape: tuple[int, ...] | None = None
|
72 |
self._tasks: list[NpzRepresentation] | None = None
|
73 |
-
self.name_to_task = {task.name: task for task in self.tasks}
|
74 |
-
logger.info(f"Tasks used in this dataset: {self.task_names}")
|
75 |
self._default_vals: dict[str, tr.Tensor] | None = None
|
76 |
-
self.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
77 |
|
78 |
# Public methods and properties
|
79 |
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
@property
|
81 |
def default_vals(self) -> dict[str, tr.Tensor]:
|
82 |
"""default values for __getitem__ if item is not on disk but we retrieve a full batch anyway"""
|
@@ -91,7 +124,31 @@ class MultiTaskDataset(Dataset):
|
|
91 |
"""Returns a {task: shape_tuple} for all representations. At least one npz file must exist for each."""
|
92 |
first_npz = {task: [_v for _v in files if _v is not None][0] for task, files in self.files_per_repr.items()}
|
93 |
data_shape = {task: self.name_to_task[task].load_from_disk(first_npz[task]).shape for task in self.task_names}
|
94 |
-
return data_shape
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
95 |
|
96 |
@property
|
97 |
def tasks(self) -> list[NpzRepresentation]:
|
@@ -99,17 +156,16 @@ class MultiTaskDataset(Dataset):
|
|
99 |
Returns a list of instantiated tasks in the same order as self.task_names. Overwrite this to add
|
100 |
new tasks and semantics (i.e. plot_fn or doing some preprocessing after loading from disk in some tasks.
|
101 |
"""
|
102 |
-
if self._tasks is
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
self._tasks.
|
112 |
-
assert all(t.name == t_n for t, t_n in zip(self._tasks, self.task_names)), (self.task_names, self._tasks)
|
113 |
return self._tasks
|
114 |
|
115 |
def collate_fn(self, items: list[MultiTaskItem]) -> MultiTaskItem:
|
@@ -122,7 +178,11 @@ class MultiTaskDataset(Dataset):
|
|
122 |
res = {k: tr.zeros(len(items), *self.data_shape[k]).float() for k in self.task_names} # float32 always
|
123 |
for i in range(len(items)):
|
124 |
for k in self.task_names:
|
125 |
-
|
|
|
|
|
|
|
|
|
126 |
return res, items_name, self.task_names
|
127 |
|
128 |
# Private methods
|
@@ -145,10 +205,11 @@ class MultiTaskDataset(Dataset):
|
|
145 |
|
146 |
def _build_dataset(self) -> BuildDatasetTuple:
|
147 |
logger.debug(f"Building dataset from: '{self.path}'")
|
|
|
148 |
if self.handle_missing_data == "drop":
|
149 |
-
files_per_repr, common = self._build_dataset_drop_missing()
|
150 |
else:
|
151 |
-
files_per_repr, common = self._build_dataset_fill_missing()
|
152 |
if self.files_per_repr_overwrites is not None: # here we match for example 'hsv' to read also from 'rgb' dir
|
153 |
for left, right in self.files_per_repr_overwrites.items():
|
154 |
if right not in (fpr := files_per_repr):
|
@@ -158,8 +219,7 @@ class MultiTaskDataset(Dataset):
|
|
158 |
files_per_repr[left] = files_per_repr[right]
|
159 |
return files_per_repr, common
|
160 |
|
161 |
-
def _build_dataset_drop_missing(self) -> BuildDatasetTuple:
|
162 |
-
in_files = self.all_files_per_repr
|
163 |
name_to_node_path = {k: {_v.name: _v for _v in v} for k, v in in_files.items()} # {node: {name: path}}
|
164 |
common = set(x.name for x in next(iter(in_files.values())))
|
165 |
for node in (nodes := in_files.keys()):
|
@@ -171,8 +231,7 @@ class MultiTaskDataset(Dataset):
|
|
171 |
assert len(files_per_repr) > 0
|
172 |
return files_per_repr, common
|
173 |
|
174 |
-
def _build_dataset_fill_missing(self) -> BuildDatasetTuple:
|
175 |
-
in_files = self.all_files_per_repr
|
176 |
name_to_node_path = {k: {_v.name: _v for _v in v} for k, v in in_files.items()}
|
177 |
all_files = set(x.name for x in next(iter(in_files.values())))
|
178 |
nodes = in_files.keys()
|
@@ -189,41 +248,78 @@ class MultiTaskDataset(Dataset):
|
|
189 |
assert len(files_per_repr) > 0
|
190 |
return files_per_repr, all_files
|
191 |
|
192 |
-
def _compute_statistics(self) -> dict[str,
|
193 |
cache_path = self.path / f".task_statistics.npz"
|
194 |
res: dict[str, TaskStatistics] = {}
|
195 |
-
if
|
196 |
res = np.load(cache_path, allow_pickle=True)["arr_0"].item()
|
197 |
-
logger.info(f"Loaded task statistics: { {k: v.shape for k, v in res.items()} }")
|
198 |
missing_tasks = list(set(self.task_names).difference(res.keys()))
|
199 |
if len(missing_tasks) == 0:
|
200 |
return res
|
201 |
logger.info(f"Computing global task statistics (dataset len {len(self)}) for {missing_tasks}")
|
202 |
-
old_tasks = self.tasks
|
203 |
-
self._tasks = [t for t in self.tasks if t.name in missing_tasks]
|
204 |
res = {**res, **self._compute_channel_level_stats(missing_tasks)}
|
205 |
-
|
206 |
-
|
207 |
-
if os.getenv("CACHE_IMG_STATS", "0") == "1":
|
208 |
-
np.savez(cache_path, res)
|
209 |
return res
|
210 |
|
211 |
def _compute_channel_level_stats(self, missing_tasks: list[str]) -> dict[str, TaskStatistics]:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
212 |
ch = {k: v[-1] if len(v) == 3 else 1 for k, v in self.data_shape.items()}
|
213 |
-
sums = {task_name: tr.zeros(ch[task_name]).type(tr.float64) for task_name in missing_tasks}
|
214 |
counts = {task_name: tr.zeros(ch[task_name]).long() for task_name in missing_tasks}
|
215 |
-
mins = {task_name: tr.zeros(ch[task_name]).type(tr.float64)
|
216 |
-
maxs = {task_name: tr.zeros(ch[task_name]).type(tr.float64)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
217 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
218 |
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
res = {k: v.reshape(-1, 1, 1).repeat(1, self.h, self.w) for k, v in res_ch.items()}
|
227 |
return res
|
228 |
|
229 |
# Python magic methods (pretty printing the reader object, reader[0], len(reader) etc.)
|
@@ -241,10 +337,15 @@ class MultiTaskDataset(Dataset):
|
|
241 |
res = {}
|
242 |
item_name = self.file_names[index]
|
243 |
|
244 |
-
for
|
245 |
-
|
|
|
246 |
file_path = None if file_path is None or not (fpr := file_path.resolve()).exists() else fpr
|
247 |
-
res[
|
|
|
|
|
|
|
|
|
248 |
return (res, item_name, self.task_names)
|
249 |
|
250 |
def __len__(self) -> int:
|
@@ -256,6 +357,7 @@ class MultiTaskDataset(Dataset):
|
|
256 |
f_str += f"\n - Tasks ({len(self.tasks)}): {self.tasks}"
|
257 |
f_str += f"\n - Length: {len(self)}"
|
258 |
f_str += f"\n - Handle missing data mode: '{self.handle_missing_data}'"
|
|
|
259 |
return f_str
|
260 |
|
261 |
def __repr__(self):
|
|
|
12 |
import numpy as np
|
13 |
from torch.utils.data import Dataset, DataLoader
|
14 |
from lovely_tensors import monkey_patch
|
15 |
+
from tqdm import trange
|
16 |
|
17 |
try:
|
18 |
from npz_representation import NpzRepresentation
|
|
|
27 |
class MultiTaskDataset(Dataset):
|
28 |
"""
|
29 |
MultiTaskDataset implementation. Reads data from npz files and returns them as a dict.
|
|
|
30 |
Parameters:
|
31 |
- path: Path to the directory containing the npz files.
|
32 |
- task_names: List of tasks that are present in the dataset. If set to None, will infer from the files on disk.
|
33 |
+
- task_types: A dictionary of form {task_name: task_type} for the reader to call to read from disk, plot etc.
|
34 |
+
- normalization: The normalization type used in __getitem__. Valid options are:
|
35 |
+
- None: Reads the data as-is using task.read_from_disk(path)
|
36 |
+
- 'min_max': Calls task.normalize(task.read_from_disk(path), mins[task], maxs[task])
|
37 |
+
- 'standardization': Calls task.standardize(task.read_from_disk(path), means[task], stds[task])
|
38 |
+
If normalization is not 'none', then task-level statistics will be computed. Environmental variable
|
39 |
+
STATS_PBAR=0/1 enables tqdm during statistics computation.
|
40 |
- handle_missing_data: Modes to handle missing data. Valid options are:
|
41 |
+
- 'drop': Drop the data point if any of the representations is missing.
|
42 |
+
- 'fill_{none,zero,nan}': Fill the missing data with Nones, zeros or NaNs.
|
43 |
+
- files_suffix: What suffix to look for when creating the dataset. Valid values: 'npy' or 'npz'.
|
44 |
+
- files_per_repr_overwrites: A dictionay {src: target} that maps one task to another's data (i.e. {'hsv': 'rgb'})
|
45 |
+
- cache_task_stats: If set to True, the statistics will be cached at '{path}/.task_statistics.npz'. Can be enabled
|
46 |
+
using the environmental variable STATS_CACHE=1. Defaults to False.
|
47 |
+
- batch_size_stats: Controls the batch size during statistics computation. Can be enabled by environmental variable
|
48 |
+
STATS_BATCH_SIZE. Defaults to 1.
|
49 |
|
50 |
Expected directory structure:
|
51 |
path/
|
|
|
56 |
Names can be in a different format (i.e. 2022-01-01.npz), but must be consistent and equal across all tasks.
|
57 |
"""
|
58 |
|
59 |
+
def __init__(self, path: Path,
|
60 |
+
task_names: list[str] | None,
|
61 |
+
task_types: dict[str, type],
|
62 |
+
normalization: str | None,
|
63 |
+
handle_missing_data: str = "fill_none",
|
64 |
+
files_suffix: str = "npz",
|
65 |
files_per_repr_overwrites: dict[str, str] | None = None,
|
66 |
+
cache_task_stats: bool = (os.getenv("STATS_CACHE", "0") == "1"),
|
67 |
+
batch_size_stats: int = int(os.getenv("STATS_BATCH_SIZE", "1")),
|
68 |
+
statistics: dict[str, TaskStatistics] | None = None,
|
69 |
+
):
|
70 |
assert Path(path).exists(), f"Provided path '{path}' doesn't exist!"
|
71 |
assert handle_missing_data in ("drop", "fill_none", "fill_zero", "fill_nan"), \
|
72 |
f"Invalid handle_missing_data mode: {handle_missing_data}"
|
73 |
assert files_suffix == "npz", "Only npz supported right now (though trivial to update)"
|
74 |
+
assert normalization is None or normalization in ("min_max", "standardization"), normalization
|
75 |
self.path = Path(path).absolute()
|
76 |
+
self.normalization = normalization
|
77 |
self.handle_missing_data = handle_missing_data
|
78 |
self.suffix = files_suffix
|
79 |
self.files_per_repr_overwrites = files_per_repr_overwrites
|
80 |
+
self.files_per_repr, self.file_names = self._build_dataset() # filtered by 'drop' or 'fill_*' logic
|
81 |
+
self.cache_task_stats = cache_task_stats
|
82 |
+
self.batch_size_stats = batch_size_stats
|
|
|
|
|
83 |
|
84 |
if task_names is None:
|
85 |
task_names = list(self.files_per_repr.keys())
|
86 |
logger.debug(f"No explicit tasks provided. Using all of them as read from the paths ({len(task_names)}).")
|
|
|
|
|
87 |
assert all(isinstance(x, str) for x in task_names), tuple(zip(task_names, (type(x) for x in task_names)))
|
88 |
+
assert all(task in self.files_per_repr for task in task_names), (task_names, self.files_per_repr.keys())
|
89 |
+
self.task_types = {k: v for k, v in task_types.items() if k in task_names} # all task_types must be provided!
|
90 |
self.task_names = sorted(task_names)
|
91 |
+
logger.info(f"Tasks used in this dataset: {self.task_names}")
|
92 |
+
|
93 |
self._data_shape: tuple[int, ...] | None = None
|
94 |
self._tasks: list[NpzRepresentation] | None = None
|
|
|
|
|
95 |
self._default_vals: dict[str, tr.Tensor] | None = None
|
96 |
+
self._name_to_task: dict[str, NpzRepresentation] | None = None
|
97 |
+
if statistics is not None:
|
98 |
+
self._statistics = self._load_external_statistics(statistics)
|
99 |
+
else:
|
100 |
+
self._statistics = None if normalization is None else self._compute_statistics()
|
101 |
+
if self._statistics is not None:
|
102 |
+
for task_name, task in self.name_to_task.items():
|
103 |
+
task.statistics = self._statistics[task_name]
|
104 |
|
105 |
# Public methods and properties
|
106 |
|
107 |
+
@property
|
108 |
+
def name_to_task(self) -> dict[str, NpzRepresentation]:
|
109 |
+
if self._name_to_task is None:
|
110 |
+
self._name_to_task = {task.name: task for task in self.tasks}
|
111 |
+
return self._name_to_task
|
112 |
+
|
113 |
@property
|
114 |
def default_vals(self) -> dict[str, tr.Tensor]:
|
115 |
"""default values for __getitem__ if item is not on disk but we retrieve a full batch anyway"""
|
|
|
124 |
"""Returns a {task: shape_tuple} for all representations. At least one npz file must exist for each."""
|
125 |
first_npz = {task: [_v for _v in files if _v is not None][0] for task, files in self.files_per_repr.items()}
|
126 |
data_shape = {task: self.name_to_task[task].load_from_disk(first_npz[task]).shape for task in self.task_names}
|
127 |
+
return {task: tuple(shape) for task, shape in data_shape.items()}
|
128 |
+
|
129 |
+
@property
|
130 |
+
def mins(self) -> dict[str, tr.Tensor]:
|
131 |
+
"""returns a dict {task: mins[task]} for all the tasks if self.statistics exists"""
|
132 |
+
assert self.normalization is not None, "No statistics for normalization is None"
|
133 |
+
return {k: v[0] for k, v in self._statistics.items() if k in self.task_names}
|
134 |
+
|
135 |
+
@property
|
136 |
+
def maxs(self) -> dict[str, tr.Tensor]:
|
137 |
+
"""returns a dict {task: mins[task]} for all the tasks if self.statistics exists"""
|
138 |
+
assert self.normalization is not None, "No statistics for normalization is None"
|
139 |
+
return {k: v[1] for k, v in self._statistics.items() if k in self.task_names}
|
140 |
+
|
141 |
+
@property
|
142 |
+
def means(self) -> dict[str, tr.Tensor]:
|
143 |
+
"""returns a dict {task: mins[task]} for all the tasks if self.statistics exists"""
|
144 |
+
assert self.normalization is not None, "No statistics for normalization is None"
|
145 |
+
return {k: v[2] for k, v in self._statistics.items() if k in self.task_names}
|
146 |
+
|
147 |
+
@property
|
148 |
+
def stds(self) -> dict[str, tr.Tensor]:
|
149 |
+
"""returns a dict {task: mins[task]} for all the tasks if self.statistics exists"""
|
150 |
+
assert self.normalization is not None, "No statistics for normalization is None"
|
151 |
+
return {k: v[3] for k, v in self._statistics.items() if k in self.task_names}
|
152 |
|
153 |
@property
|
154 |
def tasks(self) -> list[NpzRepresentation]:
|
|
|
156 |
Returns a list of instantiated tasks in the same order as self.task_names. Overwrite this to add
|
157 |
new tasks and semantics (i.e. plot_fn or doing some preprocessing after loading from disk in some tasks.
|
158 |
"""
|
159 |
+
if self._tasks is None:
|
160 |
+
self._tasks = []
|
161 |
+
for task_name in self.task_names:
|
162 |
+
t = self.task_types[task_name]
|
163 |
+
try:
|
164 |
+
t = t(task_name) # hack for not isinstance(self.task_types, NpzRepresentation) but callable
|
165 |
+
except Exception:
|
166 |
+
pass
|
167 |
+
self._tasks.append(t)
|
168 |
+
assert all(t.name == t_n for t, t_n in zip(self._tasks, self.task_names)), (self.task_names, self._tasks)
|
|
|
169 |
return self._tasks
|
170 |
|
171 |
def collate_fn(self, items: list[MultiTaskItem]) -> MultiTaskItem:
|
|
|
178 |
res = {k: tr.zeros(len(items), *self.data_shape[k]).float() for k in self.task_names} # float32 always
|
179 |
for i in range(len(items)):
|
180 |
for k in self.task_names:
|
181 |
+
try:
|
182 |
+
res[k][i][:] = items[i][0][k] if items[i][0][k] is not None else float("nan")
|
183 |
+
except Exception as e:
|
184 |
+
print(k, items)
|
185 |
+
raise e
|
186 |
return res, items_name, self.task_names
|
187 |
|
188 |
# Private methods
|
|
|
205 |
|
206 |
def _build_dataset(self) -> BuildDatasetTuple:
|
207 |
logger.debug(f"Building dataset from: '{self.path}'")
|
208 |
+
all_files_per_repr = self._get_all_npz_files()
|
209 |
if self.handle_missing_data == "drop":
|
210 |
+
files_per_repr, common = self._build_dataset_drop_missing(all_files_per_repr)
|
211 |
else:
|
212 |
+
files_per_repr, common = self._build_dataset_fill_missing(all_files_per_repr)
|
213 |
if self.files_per_repr_overwrites is not None: # here we match for example 'hsv' to read also from 'rgb' dir
|
214 |
for left, right in self.files_per_repr_overwrites.items():
|
215 |
if right not in (fpr := files_per_repr):
|
|
|
219 |
files_per_repr[left] = files_per_repr[right]
|
220 |
return files_per_repr, common
|
221 |
|
222 |
+
def _build_dataset_drop_missing(self, in_files: dict[str, list[Path]]) -> BuildDatasetTuple:
|
|
|
223 |
name_to_node_path = {k: {_v.name: _v for _v in v} for k, v in in_files.items()} # {node: {name: path}}
|
224 |
common = set(x.name for x in next(iter(in_files.values())))
|
225 |
for node in (nodes := in_files.keys()):
|
|
|
231 |
assert len(files_per_repr) > 0
|
232 |
return files_per_repr, common
|
233 |
|
234 |
+
def _build_dataset_fill_missing(self, in_files: dict[str, list[Path]]) -> BuildDatasetTuple:
|
|
|
235 |
name_to_node_path = {k: {_v.name: _v for _v in v} for k, v in in_files.items()}
|
236 |
all_files = set(x.name for x in next(iter(in_files.values())))
|
237 |
nodes = in_files.keys()
|
|
|
248 |
assert len(files_per_repr) > 0
|
249 |
return files_per_repr, all_files
|
250 |
|
251 |
+
def _compute_statistics(self) -> dict[str, TaskStatistics]:
|
252 |
cache_path = self.path / f".task_statistics.npz"
|
253 |
res: dict[str, TaskStatistics] = {}
|
254 |
+
if self.cache_task_stats and cache_path.exists():
|
255 |
res = np.load(cache_path, allow_pickle=True)["arr_0"].item()
|
256 |
+
logger.info(f"Loaded task statistics: { {k: tuple(v[0].shape) for k, v in res.items()} } from {cache_path}")
|
257 |
missing_tasks = list(set(self.task_names).difference(res.keys()))
|
258 |
if len(missing_tasks) == 0:
|
259 |
return res
|
260 |
logger.info(f"Computing global task statistics (dataset len {len(self)}) for {missing_tasks}")
|
|
|
|
|
261 |
res = {**res, **self._compute_channel_level_stats(missing_tasks)}
|
262 |
+
logger.info(f"Computed task statistics: { {k: tuple(v[0].shape) for k, v in res.items()} }")
|
263 |
+
np.savez(cache_path, res)
|
|
|
|
|
264 |
return res
|
265 |
|
266 |
def _compute_channel_level_stats(self, missing_tasks: list[str]) -> dict[str, TaskStatistics]:
|
267 |
+
# kinda based on: https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm
|
268 |
+
def update(counts: tr.Tensor, counts_delta: float, mean: tr.Tensor, M2: tr.Tensor,
|
269 |
+
new_value: tr.Tensor) -> tuple[tr.Tensor, tr.Tensor, tr.Tensor]:
|
270 |
+
new_count = counts + counts_delta
|
271 |
+
batch_mean = new_value.nanmean(0)
|
272 |
+
batch_var = ((new_value - batch_mean) ** 2).nansum(0)
|
273 |
+
delta = batch_mean - mean
|
274 |
+
new_count_no_zero = new_count + (new_count == 0) # add 1 (True) in case new_count is 0 to not divide by 0
|
275 |
+
new_mean = mean + delta * counts_delta / new_count_no_zero
|
276 |
+
new_M2 = M2 + batch_var + delta**2 * counts * counts_delta / new_count_no_zero
|
277 |
+
assert not new_mean.isnan().any() and not new_M2.isnan().any(), (mean, new_mean, counts, counts_delta)
|
278 |
+
return new_count, new_mean, new_M2
|
279 |
+
|
280 |
ch = {k: v[-1] if len(v) == 3 else 1 for k, v in self.data_shape.items()}
|
|
|
281 |
counts = {task_name: tr.zeros(ch[task_name]).long() for task_name in missing_tasks}
|
282 |
+
mins = {task_name: tr.zeros(ch[task_name]).type(tr.float64) + 10**10 for task_name in missing_tasks}
|
283 |
+
maxs = {task_name: tr.zeros(ch[task_name]).type(tr.float64) - 10**10 for task_name in missing_tasks}
|
284 |
+
means_vec = {task_name: tr.zeros(ch[task_name]).type(tr.float64) for task_name in missing_tasks}
|
285 |
+
M2s_vec = {task_name: tr.zeros(ch[task_name]).type(tr.float64) for task_name in missing_tasks}
|
286 |
+
|
287 |
+
old_names, old_normalization = self.task_names, self.normalization
|
288 |
+
self.task_names, self.normalization = missing_tasks, None # for self[ix]
|
289 |
+
|
290 |
+
BS = min(len(self), self.batch_size_stats)
|
291 |
+
n = (len(self) // BS) + (len(self) % BS != 0)
|
292 |
+
logger.debug(f"Global task statistics. Batch size: {BS}. N iterations: {n}.")
|
293 |
+
for ix in trange(n, disable=os.getenv("STATS_PBAR", "0") == "0", desc="Computing stats"):
|
294 |
+
item = self[ix * BS: min(len(self), (ix + 1) * BS)][0]
|
295 |
+
for task in missing_tasks:
|
296 |
+
if self.name_to_task[task].is_classification:
|
297 |
+
continue
|
298 |
+
item_flat_ch = item[task].reshape(-1, ch[task])
|
299 |
+
item_no_nan = item_flat_ch.nan_to_num(0).type(tr.float64)
|
300 |
+
mins[task] = tr.minimum(mins[task], item_no_nan.min(0)[0])
|
301 |
+
maxs[task] = tr.maximum(maxs[task], item_no_nan.max(0)[0])
|
302 |
+
counts_delta = (item_flat_ch == item_flat_ch).long().sum(0)
|
303 |
+
counts[task], means_vec[task], M2s_vec[task] = \
|
304 |
+
update(counts[task], counts_delta, means_vec[task], M2s_vec[task], item_no_nan)
|
305 |
|
306 |
+
res = {}
|
307 |
+
for task in missing_tasks:
|
308 |
+
if self.name_to_task[task].is_classification:
|
309 |
+
res[task] = (mins[task] * 0, mins[task] * 0 + 1, mins[task] * 0, mins[task] * 0 + 1)
|
310 |
+
else:
|
311 |
+
res[task] = (mins[task], maxs[task], means_vec[task], (M2s_vec[task] / counts[task]).sqrt())
|
312 |
+
assert not any(x[0].isnan().any() for x in res[task]), (task, res[task])
|
313 |
+
self.task_names, self.normalization = old_names, old_normalization
|
314 |
+
return res
|
315 |
|
316 |
+
def _load_external_statistics(self, statistics: dict[str, TaskStatistics | list]) -> dict[str, TaskStatistics]:
|
317 |
+
assert all(k in statistics.keys() for k in self.task_names), (list(statistics.keys()), self.task_names)
|
318 |
+
res: dict[str, TaskStatistics] = {}
|
319 |
+
for k, v in statistics.items():
|
320 |
+
res[k] = tuple(tr.Tensor(x) for x in v)
|
321 |
+
assert all(_stat.shape == (nd := (self.name_to_task[k].n_channels, )) for _stat in res[k]), (res[k], nd)
|
322 |
+
logger.info(f"External statistics provided: { {k: tuple(v[0].shape) for k, v in res.items()} }")
|
|
|
323 |
return res
|
324 |
|
325 |
# Python magic methods (pretty printing the reader object, reader[0], len(reader) etc.)
|
|
|
337 |
res = {}
|
338 |
item_name = self.file_names[index]
|
339 |
|
340 |
+
for task_name in self.task_names:
|
341 |
+
task = [t for t in self.tasks if t.name == task_name][0]
|
342 |
+
file_path = self.files_per_repr[task_name][index]
|
343 |
file_path = None if file_path is None or not (fpr := file_path.resolve()).exists() else fpr
|
344 |
+
res[task_name] = task.load_from_disk(file_path) if file_path is not None else self.default_vals[task_name]
|
345 |
+
if self.normalization == "min_max" and not task.is_classification:
|
346 |
+
res[task_name] = task.normalize(res[task_name])
|
347 |
+
if self.normalization == "standardization" and not task.is_classification:
|
348 |
+
res[task_name] = task.standardize(res[task_name])
|
349 |
return (res, item_name, self.task_names)
|
350 |
|
351 |
def __len__(self) -> int:
|
|
|
357 |
f_str += f"\n - Tasks ({len(self.tasks)}): {self.tasks}"
|
358 |
f_str += f"\n - Length: {len(self)}"
|
359 |
f_str += f"\n - Handle missing data mode: '{self.handle_missing_data}'"
|
360 |
+
f_str += f"\n - Normalization: '{self.normalization}'"
|
361 |
return f_str
|
362 |
|
363 |
def __repr__(self):
|
dronescapes_reader/npz_representation.py
CHANGED
@@ -6,14 +6,39 @@ import torch as tr
|
|
6 |
|
7 |
class NpzRepresentation:
|
8 |
"""Generic Task with data read from/saved to npz files. Tries to read data as-is from disk and store it as well"""
|
9 |
-
def __init__(self, name: str):
|
10 |
self.name = name
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
def load_from_disk(self, path: Path) -> tr.Tensor:
|
13 |
"""Reads the npz data from the disk and transforms it properly"""
|
14 |
data = np.load(path, allow_pickle=False)
|
15 |
data = data if isinstance(data, np.ndarray) else data["arr_0"] # in case on npz, we need this as well
|
16 |
-
|
|
|
|
|
|
|
|
|
|
|
17 |
|
18 |
def save_to_disk(self, data: tr.Tensor, path: Path):
|
19 |
"""stores this item to the disk which can then be loaded via `load_from_disk`"""
|
@@ -22,34 +47,32 @@ class NpzRepresentation:
|
|
22 |
def plot_fn(self, x: tr.Tensor) -> np.ndarray:
|
23 |
"""very basic implementation of converting this representation to a viewable image. You should overwrite this"""
|
24 |
assert isinstance(x, tr.Tensor), type(x)
|
25 |
-
if len(x.shape) == 2:
|
26 |
-
x = x.unsqueeze(-1)
|
27 |
assert len(x.shape) == 3, x.shape # guaranteed to be (H, W, C) at this point
|
|
|
28 |
if x.shape[-1] != 3:
|
29 |
x = x[..., 0:1]
|
30 |
-
if x.shape[-1] == 1:
|
31 |
x = x.repeat(1, 1, 3)
|
32 |
-
|
33 |
-
|
34 |
-
if
|
35 |
-
x =
|
|
|
|
|
36 |
return x
|
37 |
|
38 |
-
def normalize(self, x: tr.Tensor
|
39 |
"""normalizes a data point read with self.load_from_disk(path) using external min/max information"""
|
40 |
-
|
|
|
41 |
|
42 |
-
def standardize(self, x: tr.Tensor
|
43 |
"""standardizes a data point read with self.load_from_disk(path) using external min/max information"""
|
44 |
-
|
45 |
-
|
46 |
-
@property
|
47 |
-
def n_channels(self) -> int:
|
48 |
-
"""return the number of channels for this representation. Must be updated by each downstream representation"""
|
49 |
-
raise NotImplementedError(f"n_channels is not implemented for {self}")
|
50 |
|
51 |
def __repr__(self):
|
52 |
return str(self)
|
53 |
|
54 |
def __str__(self):
|
55 |
-
return f"{str(type(self)).split('.')[-1][0:-2]}({self.name})"
|
|
|
6 |
|
7 |
class NpzRepresentation:
|
8 |
"""Generic Task with data read from/saved to npz files. Tries to read data as-is from disk and store it as well"""
|
9 |
+
def __init__(self, name: str, n_channels: int):
|
10 |
self.name = name
|
11 |
+
self.n_channels = n_channels
|
12 |
+
self.classes: list[str] | None = None
|
13 |
+
self._min: tr.Tensor | None = None
|
14 |
+
self._max: tr.Tensor | None = None
|
15 |
+
self._mean: tr.Tensor | None = None
|
16 |
+
self._std: tr.Tensor | None = None
|
17 |
+
|
18 |
+
@property
|
19 |
+
def is_classification(self) -> bool:
|
20 |
+
"""if we have self.classes"""
|
21 |
+
return self.classes is not None
|
22 |
+
|
23 |
+
@property
|
24 |
+
def statistics(self) -> tuple[tr.Tensor, tr.Tensor, tr.Tensor, tr.Tensor] | None:
|
25 |
+
return (self._min, self._max, self._mean, self._std) if self._min is not None else None
|
26 |
+
|
27 |
+
@statistics.setter
|
28 |
+
def statistics(self, stats: tuple[tr.Tensor, tr.Tensor, tr.Tensor, tr.Tensor]):
|
29 |
+
assert isinstance(stats, tuple) and len(stats) == 4, stats
|
30 |
+
self._min, self._max, self._mean, self._std = stats
|
31 |
|
32 |
def load_from_disk(self, path: Path) -> tr.Tensor:
|
33 |
"""Reads the npz data from the disk and transforms it properly"""
|
34 |
data = np.load(path, allow_pickle=False)
|
35 |
data = data if isinstance(data, np.ndarray) else data["arr_0"] # in case on npz, we need this as well
|
36 |
+
data = data.astype(np.float32) if np.issubdtype(data.dtype, np.floating) else data # float16 is dangerous
|
37 |
+
res = tr.from_numpy(data)
|
38 |
+
res = res.unsqueeze(-1) if len(res.shape) == 2 and self.n_channels == 1 else res # (H, W) in some dph/edges
|
39 |
+
assert ((res.shape[-1] == self.n_channels and len(res.shape) == 3) or
|
40 |
+
(len(res.shape) == 2 and self.is_classification)), f"{self.name}: {res.shape} vs {self.n_channels}"
|
41 |
+
return res
|
42 |
|
43 |
def save_to_disk(self, data: tr.Tensor, path: Path):
|
44 |
"""stores this item to the disk which can then be loaded via `load_from_disk`"""
|
|
|
47 |
def plot_fn(self, x: tr.Tensor) -> np.ndarray:
|
48 |
"""very basic implementation of converting this representation to a viewable image. You should overwrite this"""
|
49 |
assert isinstance(x, tr.Tensor), type(x)
|
|
|
|
|
50 |
assert len(x.shape) == 3, x.shape # guaranteed to be (H, W, C) at this point
|
51 |
+
x = x.nan_to_num(0).cpu().detach()
|
52 |
if x.shape[-1] != 3:
|
53 |
x = x[..., 0:1]
|
54 |
+
if x.shape[-1] == 1: # guaranteed to be (H, W, 3) after this if statement hopefully
|
55 |
x = x.repeat(1, 1, 3)
|
56 |
+
if x.dtype == tr.uint8 or self.is_classification:
|
57 |
+
return x.numpy()
|
58 |
+
if self.statistics is not None:
|
59 |
+
x = (x * self._std + self._mean) if (x.min() < 0 or x.max() > 1) else x * (self._max - (m := self._min)) + m
|
60 |
+
x = (x * 255) if (self._max <= 1).any() else x
|
61 |
+
x = x.numpy().astype(np.uint8)
|
62 |
return x
|
63 |
|
64 |
+
def normalize(self, x: tr.Tensor) -> tr.Tensor:
|
65 |
"""normalizes a data point read with self.load_from_disk(path) using external min/max information"""
|
66 |
+
assert self.statistics is not None, "self.statistics must be set from reader before task.normalize(x)"
|
67 |
+
return ((x.float() - self._min) / (self._max - self._min)).nan_to_num(0, 0, 0).float()
|
68 |
|
69 |
+
def standardize(self, x: tr.Tensor) -> tr.Tensor:
|
70 |
"""standardizes a data point read with self.load_from_disk(path) using external min/max information"""
|
71 |
+
assert self.statistics is not None, "self.statistics must be set from reader before task.standardize(x)"
|
72 |
+
return ((x.float() - self._mean) / self._std).nan_to_num(0, 0, 0).float()
|
|
|
|
|
|
|
|
|
73 |
|
74 |
def __repr__(self):
|
75 |
return str(self)
|
76 |
|
77 |
def __str__(self):
|
78 |
+
return f"{str(type(self)).split('.')[-1][0:-2]}({self.name}[{self.n_channels}])"
|
scripts/dronescapes_viewer.ipynb
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
scripts/dronescapes_viewer.py
CHANGED
@@ -9,8 +9,10 @@ from torch.utils.data import DataLoader
|
|
9 |
import random
|
10 |
|
11 |
def main():
|
12 |
-
reader = MultiTaskDataset(sys.argv[1],
|
13 |
-
|
|
|
|
|
14 |
print(reader)
|
15 |
|
16 |
print("== Shapes ==")
|
|
|
9 |
import random
|
10 |
|
11 |
def main():
|
12 |
+
reader = MultiTaskDataset(sys.argv[1], task_names=list(dronescapes_task_types.keys()),
|
13 |
+
task_types=dronescapes_task_types,
|
14 |
+
files_per_repr_overwrites={"hsv": "rgb"}, handle_missing_data="fill_nan",
|
15 |
+
normalization="min_max", cache_task_stats=True)
|
16 |
print(reader)
|
17 |
|
18 |
print("== Shapes ==")
|