little update to multitask_dataset
Browse files- neo_reader/multitask_dataset.py +21 -19
- neo_reader/neo_node.py +1 -1
- neo_viewer.ipynb +2 -2
neo_reader/multitask_dataset.py
CHANGED
@@ -71,7 +71,8 @@ class MultiTaskDataset(Dataset):
|
|
71 |
def __init__(self, path: Path, task_names: list[str] | None = None, handle_missing_data: str = "fill_none",
|
72 |
files_suffix: str = "npz", task_types: dict[str, type] = None):
|
73 |
assert Path(path).exists(), f"Provided path '{path}' doesn't exist!"
|
74 |
-
assert handle_missing_data in ("drop", "fill_none"
|
|
|
75 |
assert files_suffix == "npz", "Only npz supported right now (though trivial to update)"
|
76 |
self.path = Path(path).absolute()
|
77 |
self.handle_missing_data = handle_missing_data
|
@@ -93,6 +94,10 @@ class MultiTaskDataset(Dataset):
|
|
93 |
self.name_to_task = {task.name: task for task in self.tasks}
|
94 |
logger.info(f"Tasks used in this dataset: {self.task_names}")
|
95 |
|
|
|
|
|
|
|
|
|
96 |
# Public methods and properties
|
97 |
|
98 |
@property
|
@@ -124,14 +129,14 @@ class MultiTaskDataset(Dataset):
|
|
124 |
def collate_fn(self, items: list[MultiTaskItem]) -> MultiTaskItem:
|
125 |
"""
|
126 |
given a list of items (i.e. from a reader[n:n+k] call), return the item batched on 1st dimension.
|
127 |
-
Nones (missing data points) are turned into
|
128 |
"""
|
129 |
-
assert all(item[2] == self.task_names for item in items), (
|
130 |
items_name = [item[1] for item in items]
|
131 |
res = {k: tr.zeros(len(items), *self.data_shape[k]).float() for k in self.task_names} # float32 always
|
132 |
for i in range(len(items)):
|
133 |
for k in self.task_names:
|
134 |
-
res[k][i] = items[i][0][k] if items[i][0][k] is not None else float("nan")
|
135 |
return res, items_name, self.task_names
|
136 |
|
137 |
# Private methods
|
@@ -152,12 +157,11 @@ class MultiTaskDataset(Dataset):
|
|
152 |
assert not any(len(x) == 0 for x in in_files.values()), f"{ [k for k, v in in_files.items() if len(v) == 0] }"
|
153 |
return in_files
|
154 |
|
155 |
-
def
|
156 |
in_files = self.all_files_per_repr
|
157 |
name_to_node_path = {k: {_v.name: _v for _v in v} for k, v in in_files.items()} # {node: {name: path}}
|
158 |
common = set(x.name for x in next(iter(in_files.values())))
|
159 |
-
nodes
|
160 |
-
for node in nodes:
|
161 |
common = common.intersection([f.name for f in in_files[node]])
|
162 |
assert len(common) > 0, f"Node '{node}' made the intersection null"
|
163 |
common = natsorted(list(common))
|
@@ -166,12 +170,12 @@ class MultiTaskDataset(Dataset):
|
|
166 |
assert len(files_per_repr) > 0
|
167 |
return files_per_repr, common
|
168 |
|
169 |
-
def
|
170 |
in_files = self.all_files_per_repr
|
171 |
name_to_node_path = {k: {_v.name: _v for _v in v} for k, v in in_files.items()}
|
172 |
all_files = set(x.name for x in next(iter(in_files.values())))
|
173 |
nodes = in_files.keys()
|
174 |
-
for node in nodes:
|
175 |
all_files = all_files.union([f.name for f in in_files[node]])
|
176 |
all_files = natsorted(list(all_files))
|
177 |
logger.info(f"Found {len(all_files)} data points as union of all nodes' data ({len(nodes)} nodes).")
|
@@ -187,9 +191,9 @@ class MultiTaskDataset(Dataset):
|
|
187 |
def _build_dataset(self) -> BuildDatasetTuple:
|
188 |
logger.debug(f"Building dataset from: '{self.path}'")
|
189 |
if self.handle_missing_data == "drop":
|
190 |
-
return self.
|
191 |
else:
|
192 |
-
return self.
|
193 |
|
194 |
# Python magic methods (pretty printing the reader object, reader[0], len(reader) etc.)
|
195 |
|
@@ -204,12 +208,10 @@ class MultiTaskDataset(Dataset):
|
|
204 |
res = {}
|
205 |
item_name = self.file_names[index]
|
206 |
|
207 |
-
for
|
208 |
-
file_path = self.files_per_repr[
|
209 |
-
file_path =
|
210 |
-
|
211 |
-
item = _repr.load_from_disk(file_path) if file_path is not None and file_path.exists() else None
|
212 |
-
res[_repr.name] = item
|
213 |
return (res, item_name, self.task_names)
|
214 |
|
215 |
def __len__(self) -> int:
|
@@ -218,9 +220,9 @@ class MultiTaskDataset(Dataset):
|
|
218 |
def __str__(self):
|
219 |
f_str = f"[{str(type(self)).rsplit('.', maxsplit=1)[-1][0:-2]}]"
|
220 |
f_str += f"\n - Path: '{self.path}'"
|
221 |
-
f_str += f"\n -
|
222 |
-
f_str += f"\n - Representations ({len(self.tasks)}): {self.tasks}"
|
223 |
f_str += f"\n - Length: {len(self)}"
|
|
|
224 |
return f_str
|
225 |
|
226 |
def __repr__(self):
|
|
|
71 |
def __init__(self, path: Path, task_names: list[str] | None = None, handle_missing_data: str = "fill_none",
|
72 |
files_suffix: str = "npz", task_types: dict[str, type] = None):
|
73 |
assert Path(path).exists(), f"Provided path '{path}' doesn't exist!"
|
74 |
+
assert handle_missing_data in ("drop", "fill_none", "fill_zero", "fill_nan"), \
|
75 |
+
f"Invalid handle_missing_data mode: {handle_missing_data}"
|
76 |
assert files_suffix == "npz", "Only npz supported right now (though trivial to update)"
|
77 |
self.path = Path(path).absolute()
|
78 |
self.handle_missing_data = handle_missing_data
|
|
|
94 |
self.name_to_task = {task.name: task for task in self.tasks}
|
95 |
logger.info(f"Tasks used in this dataset: {self.task_names}")
|
96 |
|
97 |
+
_default_val = float("nan") if handle_missing_data == "fill_nan" else 0
|
98 |
+
self._defaults = {task: None if handle_missing_data == "fill_none" else
|
99 |
+
tr.full(self.data_shape[task], _default_val) for task in self.task_names}
|
100 |
+
|
101 |
# Public methods and properties
|
102 |
|
103 |
@property
|
|
|
129 |
def collate_fn(self, items: list[MultiTaskItem]) -> MultiTaskItem:
|
130 |
"""
|
131 |
given a list of items (i.e. from a reader[n:n+k] call), return the item batched on 1st dimension.
|
132 |
+
Nones (missing data points) are turned into nans as per the data shape of that dim.
|
133 |
"""
|
134 |
+
assert all(item[2] == self.task_names for item in items), ([item[2] for item in items], self.task_names)
|
135 |
items_name = [item[1] for item in items]
|
136 |
res = {k: tr.zeros(len(items), *self.data_shape[k]).float() for k in self.task_names} # float32 always
|
137 |
for i in range(len(items)):
|
138 |
for k in self.task_names:
|
139 |
+
res[k][i][:] = items[i][0][k] if items[i][0][k] is not None else float("nan")
|
140 |
return res, items_name, self.task_names
|
141 |
|
142 |
# Private methods
|
|
|
157 |
assert not any(len(x) == 0 for x in in_files.values()), f"{ [k for k, v in in_files.items() if len(v) == 0] }"
|
158 |
return in_files
|
159 |
|
160 |
+
def _build_dataset_drop_missing(self) -> BuildDatasetTuple:
|
161 |
in_files = self.all_files_per_repr
|
162 |
name_to_node_path = {k: {_v.name: _v for _v in v} for k, v in in_files.items()} # {node: {name: path}}
|
163 |
common = set(x.name for x in next(iter(in_files.values())))
|
164 |
+
for node in (nodes := in_files.keys()):
|
|
|
165 |
common = common.intersection([f.name for f in in_files[node]])
|
166 |
assert len(common) > 0, f"Node '{node}' made the intersection null"
|
167 |
common = natsorted(list(common))
|
|
|
170 |
assert len(files_per_repr) > 0
|
171 |
return files_per_repr, common
|
172 |
|
173 |
+
def _build_dataset_fill_missing(self) -> BuildDatasetTuple:
|
174 |
in_files = self.all_files_per_repr
|
175 |
name_to_node_path = {k: {_v.name: _v for _v in v} for k, v in in_files.items()}
|
176 |
all_files = set(x.name for x in next(iter(in_files.values())))
|
177 |
nodes = in_files.keys()
|
178 |
+
for node in (nodes := in_files.keys()):
|
179 |
all_files = all_files.union([f.name for f in in_files[node]])
|
180 |
all_files = natsorted(list(all_files))
|
181 |
logger.info(f"Found {len(all_files)} data points as union of all nodes' data ({len(nodes)} nodes).")
|
|
|
191 |
def _build_dataset(self) -> BuildDatasetTuple:
|
192 |
logger.debug(f"Building dataset from: '{self.path}'")
|
193 |
if self.handle_missing_data == "drop":
|
194 |
+
return self._build_dataset_drop_missing()
|
195 |
else:
|
196 |
+
return self._build_dataset_fill_missing()
|
197 |
|
198 |
# Python magic methods (pretty printing the reader object, reader[0], len(reader) etc.)
|
199 |
|
|
|
208 |
res = {}
|
209 |
item_name = self.file_names[index]
|
210 |
|
211 |
+
for task in self.tasks:
|
212 |
+
file_path = self.files_per_repr[task.name][index]
|
213 |
+
file_path = None if file_path is None or not (fpr := file_path.resolve()).exists() else fpr
|
214 |
+
res[task.name] = task.load_from_disk(file_path) if file_path is not None else self._defaults[task.name]
|
|
|
|
|
215 |
return (res, item_name, self.task_names)
|
216 |
|
217 |
def __len__(self) -> int:
|
|
|
220 |
def __str__(self):
|
221 |
f_str = f"[{str(type(self)).rsplit('.', maxsplit=1)[-1][0:-2]}]"
|
222 |
f_str += f"\n - Path: '{self.path}'"
|
223 |
+
f_str += f"\n - Tasks ({len(self.tasks)}): {self.tasks}"
|
|
|
224 |
f_str += f"\n - Length: {len(self)}"
|
225 |
+
f_str += f"\n - Handle missing data mode: '{self.handle_missing_data}'"
|
226 |
return f_str
|
227 |
|
228 |
def __repr__(self):
|
neo_reader/neo_node.py
CHANGED
@@ -57,7 +57,7 @@ class NEONode(NpzRepresentation):
|
|
57 |
return super().save_to_disk(data.clip(0, 1), path)
|
58 |
|
59 |
def plot_fn(self, x: tr.Tensor) -> np.ndarray:
|
60 |
-
y = np.clip(x.numpy(), 0, 1)
|
61 |
y = y * 255
|
62 |
y[y == 0] = 255
|
63 |
y = y.astype(np.uint).squeeze()
|
|
|
57 |
return super().save_to_disk(data.clip(0, 1), path)
|
58 |
|
59 |
def plot_fn(self, x: tr.Tensor) -> np.ndarray:
|
60 |
+
y = np.clip(x.cpu().numpy(), 0, 1)
|
61 |
y = y * 255
|
62 |
y[y == 0] = 255
|
63 |
y = y.astype(np.uint).squeeze()
|
neo_viewer.ipynb
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f22dabb6d4ab93547a7227e302109e3b4be9a9f627dc41adf03ebcce30b8b33c
|
3 |
+
size 12256316
|