Meehai commited on
Commit
538d2c5
·
1 Parent(s): fb0cb78

implemented the reader based on MultiTaskReader

Browse files
.gitignore CHANGED
@@ -8,4 +8,5 @@ neo_1month/
8
  __pycache__
9
  *.pyc
10
  dataset/
 
11
 
 
8
  __pycache__
9
  *.pyc
10
  dataset/
11
+ *.ttf
12
 
neo_data_analysis.ipynb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0cf0b3146625724c73369a3e064e940f9a5e4fe349d653b322de1fd4e3bf396f
3
+ size 50024
neo_reader/__init__.py CHANGED
@@ -1,3 +1,14 @@
1
  """init file"""
2
- from .neo_reader import NEOReader
3
  from .neo_node import NEONode
 
 
 
 
 
 
 
 
 
 
 
 
1
  """init file"""
2
+ from .multitask_dataset import MultiTaskDataset
3
  from .neo_node import NEONode
4
+
5
+ # For both 1month and 1week datasets
6
+ _name_to_type = {"AOD": "AerosolOpticalDepth", "BS_ALBEDO": "Albedo", "CHLORA": "Chlorophyll",
7
+ "CLD_FR": "CloudFraction", "CLD_RD": "CloudParticleRadius", "CLD_WP": "CloudWaterContent",
8
+ "COT": "CloudOpticalThickness", "CO_M": "CarbonMonoxide", "FIRE": "Fire",
9
+ "INSOL": "SolarInsolation", "LAI": "LeafAreaIndex", "LSTD": "Temperature",
10
+ "LSTD_AN": "TemperatureAnomaly", "LSTN": "Temperature", "LSTN_AN": "TemperatureAnomaly",
11
+ "LWFLUX": "OutgoingLongwaveRadiation", "NDVI": "Vegetation", "NETFLUX": "NetRadiation",
12
+ "NO2": "NitrogenDioxide", "OZONE": "Ozone", "SNOWC": "SnowCover", "SST": "SeaSurfaceTemperature",
13
+ "SWFLUX": "ReflectedShortwaveRadiation", "WV": "WaterVapor"}
14
+ neo_task_types = {k: NEONode(v, k) for k, v in _name_to_type.items()}
neo_reader/multitask_dataset.py ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """MultiTask Dataset module compatible with torch.utils.data.Dataset & DataLoader."""
3
+ from __future__ import annotations
4
+ from pathlib import Path
5
+ from argparse import ArgumentParser
6
+ from pprint import pprint
7
+ from natsort import natsorted
8
+ from loguru import logger
9
+ import torch as tr
10
+ import numpy as np
11
+ from torch.utils.data import Dataset, DataLoader
12
+ from lovely_tensors import monkey_patch
13
+
14
+ monkey_patch()
15
+ BuildDatasetTuple = tuple[dict[str, list[Path]], list[str]]
16
+ MultiTaskItem = tuple[dict[str, tr.Tensor], str, list[str]] # [{task: data}, stem(name) | list[stem(name)], [tasks]]
17
+
18
+ class NpzRepresentation:
19
+ """Generic Task with data read from/saved to npz files. Tries to read data as-is from disk and store it as well"""
20
+ def __init__(self, name: str):
21
+ self.name = name
22
+
23
+ def load_from_disk(self, path: Path) -> tr.Tensor:
24
+ """Reads the npz data from the disk and transforms it properly"""
25
+ data = np.load(path, allow_pickle=False)
26
+ data = data if isinstance(data, np.ndarray) else data["arr_0"] # in case on npz, we need this as well
27
+ return tr.from_numpy(data) # can be uint8, float16, float32 etc.
28
+
29
+ def save_to_disk(self, data: tr.Tensor, path: Path):
30
+ """stores this item to the disk which can then be loaded via `load_from_disk`"""
31
+ np.save(path, data.cpu().numpy(), allow_pickle=False)
32
+
33
+ def plot_fn(self, x: tr.Tensor) -> np.ndarray:
34
+ """very basic implementation of converting this representation to a viewable image. You should overwrite this"""
35
+ assert isinstance(x, tr.Tensor), type(x)
36
+ if len(x.shape) == 2: x = x.unsqueeze(-1)
37
+ assert len(x.shape) == 3, x.shape # guaranteed to be (H, W, C) at this point
38
+ if x.shape[-1] != 3: x = x[..., 0:1]
39
+ if x.shape[-1] == 1: x = x.repeat(1, 1, 3)
40
+ x = x.nan_to_num(0).cpu().numpy() # guaranteed to be (H, W, 3) at this point hopefully
41
+ _min, _max = x.min((0, 1), keepdims=True), x.max((0, 1), keepdims=True)
42
+ if x.dtype != np.uint8: x = np.nan_to_num((x - _min) / (_max - _min) * 255, 0).astype(np.uint8)
43
+ return x
44
+
45
+ def __repr__(self):
46
+ return str(self)
47
+
48
+ def __str__(self):
49
+ return f"{str(type(self)).split('.')[-1][0:-2]}({self.name})"
50
+
51
+ class MultiTaskDataset(Dataset):
52
+ """
53
+ MultiTaskDataset implementation. Reads data from npz files and returns them as a dict.
54
+
55
+ Parameters:
56
+ - path: Path to the directory containing the npz files.
57
+ - task_names: List of tasks that are present in the dataset. If set to None, will infer from the files on disk.
58
+ - handle_missing_data: Modes to handle missing data. Valid options are:
59
+ - drop: Drop the data point if any of the representations is missing.
60
+ - fill_none: Fill the missing data with Nones.
61
+
62
+ Expected directory structure:
63
+ path/
64
+ - task_1/0.npz, ..., N.npz
65
+ - ...
66
+ - task_n/0.npz, ..., N.npz
67
+
68
+ Names can be in a different format (i.e. 2022-01-01.npz), but must be consistent and equal across all tasks.
69
+ """
70
+
71
+ def __init__(self, path: Path, task_names: list[str] | None = None, handle_missing_data: str = "fill_none",
72
+ files_suffix: str = "npz", task_types: dict[str, type] = None):
73
+ assert Path(path).exists(), f"Provided path '{path}' doesn't exist!"
74
+ assert handle_missing_data in ("drop", "fill_none"), f"Invalid handle_missing_data mode: {handle_missing_data}"
75
+ assert files_suffix == "npz", "Only npz supported right now (though trivial to update)"
76
+ self.path = Path(path).absolute()
77
+ self.handle_missing_data = handle_missing_data
78
+ self.suffix = files_suffix
79
+ self.all_files_per_repr = self._get_all_npz_files()
80
+ self.files_per_repr, self.file_names = self._build_dataset() # these are filtered by 'drop' or 'fill_none' logic
81
+ if task_types is None:
82
+ logger.debug("No explicit task types. Defaulting all of them to NpzRepresentation.")
83
+ task_types = {}
84
+
85
+ if task_names is None:
86
+ task_names = list(self.files_per_repr.keys())
87
+ logger.debug(f"No explicit tasks provided. Using all of them as read from the paths ({len(task_names)}).")
88
+ self.task_types = {k: task_types.get(k, NpzRepresentation) for k in task_names}
89
+ assert all(isinstance(x, str) for x in task_names), tuple(zip(task_names, (type(x) for x in task_names)))
90
+ self.task_names = sorted(task_names)
91
+ self._data_shape: tuple[int, ...] | None = None
92
+ self._tasks: list[NpzRepresentation] | None = None
93
+ self.name_to_task = {task.name: task for task in self.tasks}
94
+ logger.info(f"Tasks used in this dataset: {self.task_names}")
95
+
96
+ # Public methods and properties
97
+
98
+ @property
99
+ def data_shape(self) -> dict[str, tuple[int, ...]]:
100
+ """Returns a {task: shape_tuple} for all representations. At least one npz file must exist for each."""
101
+ first_npz = {task: [_v for _v in files if _v is not None][0] for task, files in self.files_per_repr.items()}
102
+ data_shape = {task: self.name_to_task[task].load_from_disk(first_npz[task]).shape for task in self.task_names}
103
+ return data_shape
104
+
105
+ @property
106
+ def tasks(self) -> list[NpzRepresentation]:
107
+ """
108
+ Returns a list of instantiated tasks in the same order as self.task_names. Overwrite this to add
109
+ new tasks and semantics (i.e. plot_fn or doing some preprocessing after loading from disk in some tasks.
110
+ """
111
+ if self._tasks is not None:
112
+ return self._tasks
113
+ self._tasks = []
114
+ for task_name in self.task_names:
115
+ t = self.task_types[task_name]
116
+ if not isinstance(t, NpzRepresentation):
117
+ t = t(task_name)
118
+ self._tasks.append(t)
119
+ assert all(t.name == t_n for t, t_n in zip(self._tasks, self.task_names)), (self._task_names, self._tasks)
120
+ return self._tasks
121
+
122
+ def collate_fn(self, items: list[MultiTaskItem]) -> MultiTaskItem:
123
+ """
124
+ given a list of items (i.e. from a reader[n:n+k] call), return the item batched on 1st dimension.
125
+ Nones (missing data points) are turned into zeros as per the data shape of that dim.
126
+ """
127
+ assert all(item[2] == self.task_names for item in items), ((item[2] for item in items), self.task_names)
128
+ items_name = [item[1] for item in items]
129
+ res = {k: tr.zeros(len(items), *self.data_shape[k]).float() for k in self.task_names} # float32 always
130
+ for i in range(len(items)):
131
+ for k in self.task_names:
132
+ res[k][i] = items[i][0][k] if items[i][0][k] is not None else float("nan")
133
+ return res, items_name, self.task_names
134
+
135
+ # Private methods
136
+
137
+ def _get_all_npz_files(self) -> dict[str, list[Path]]:
138
+ """returns a dict of form: {"rgb": ["0.npz", "1.npz", ..., "N.npz"]}"""
139
+ in_files = {}
140
+ all_repr_dirs: list[str] = [x.name for x in self.path.iterdir() if x.is_dir()]
141
+ for repr_dir_name in all_repr_dirs:
142
+ dir_name = self.path / repr_dir_name
143
+ if all(f.is_dir() for f in dir_name.iterdir()): # dataset is stored as repr/part_x/0.npz, ..., part_k/n.npz
144
+ all_files = []
145
+ for part in dir_name.iterdir():
146
+ all_files.extend(part.glob(f"*.{self.suffix}"))
147
+ else: # dataset is stored as repr/0.npz, ..., repr/n.npz
148
+ all_files = dir_name.glob(f"*.{self.suffix}")
149
+ in_files[repr_dir_name] = natsorted(all_files, key=lambda x: x.name) # important: use natsorted() here
150
+ assert not any(len(x) == 0 for x in in_files.values()), f"{ [k for k, v in in_files.items() if len(v) == 0] }"
151
+ return in_files
152
+
153
+ def _build_dataset_drop(self) -> BuildDatasetTuple:
154
+ in_files = self.all_files_per_repr
155
+ name_to_node_path = {k: {_v.name: _v for _v in v} for k, v in in_files.items()} # {node: {name: path}}
156
+ common = set(x.name for x in next(iter(in_files.values())))
157
+ nodes = in_files.keys()
158
+ for node in nodes:
159
+ common = common.intersection([f.name for f in in_files[node]])
160
+ assert len(common) > 0, f"Node '{node}' made the intersection null"
161
+ common = natsorted(list(common))
162
+ logger.info(f"Found {len(common)} data points for each node ({len(nodes)} nodes).")
163
+ files_per_repr = {node: [name_to_node_path[node][x] for x in common] for node in nodes}
164
+ assert len(files_per_repr) > 0
165
+ return files_per_repr, common
166
+
167
+ def _build_dataset_fill_none(self) -> BuildDatasetTuple:
168
+ in_files = self.all_files_per_repr
169
+ name_to_node_path = {k: {_v.name: _v for _v in v} for k, v in in_files.items()}
170
+ all_files = set(x.name for x in next(iter(in_files.values())))
171
+ nodes = in_files.keys()
172
+ for node in nodes:
173
+ all_files = all_files.union([f.name for f in in_files[node]])
174
+ all_files = natsorted(list(all_files))
175
+ logger.info(f"Found {len(all_files)} data points as union of all nodes' data ({len(nodes)} nodes).")
176
+
177
+ files_per_repr = {node: [] for node in nodes}
178
+ for node in nodes:
179
+ for file_name in all_files:
180
+ file_path = name_to_node_path[node].get(file_name, None)
181
+ files_per_repr[node].append(file_path)
182
+ assert len(files_per_repr) > 0
183
+ return files_per_repr, all_files
184
+
185
+ def _build_dataset(self) -> BuildDatasetTuple:
186
+ logger.debug(f"Building dataset from: '{self.path}'")
187
+ if self.handle_missing_data == "drop":
188
+ return self._build_dataset_drop()
189
+ else:
190
+ return self._build_dataset_fill_none()
191
+
192
+ # Python magic methods (pretty printing the reader object, reader[0], len(reader) etc.)
193
+
194
+ def __getitem__(self, index: int | slice | list[int] | tuple) -> MultiTaskItem:
195
+ """Read the data all the desired nodes"""
196
+ assert isinstance(index, (int, slice, list, tuple)), type(index)
197
+ if isinstance(index, slice):
198
+ assert index.start is not None and index.stop is not None and index.step is None, "Only reader[l:r] allowed"
199
+ index = list(range(index.stop)[index])
200
+ if isinstance(index, (list, tuple)):
201
+ return self.collate_fn([self.__getitem__(ix) for ix in index])
202
+ res = {}
203
+ item_name = self.file_names[index]
204
+
205
+ for _repr in self.tasks:
206
+ file_path = self.files_per_repr[_repr.name][index]
207
+ file_path = file_path.resolve() if file_path is not None else None
208
+ assert self.handle_missing_data == "fill_none" or (file_path is not None and file_path.exists()), item_name
209
+ item = _repr.load_from_disk(file_path) if file_path is not None and file_path.exists() else None
210
+ res[_repr.name] = item
211
+ return (res, item_name, self.task_names)
212
+
213
+ def __len__(self) -> int:
214
+ return len(self.files_per_repr[self.task_names[0]]) # all of them have the same number (filled with None or not)
215
+
216
+ def __str__(self):
217
+ f_str = f"[{str(type(self)).rsplit('.', maxsplit=1)[-1][0:-2]}]"
218
+ f_str += f"\n - Path: '{self.path}'"
219
+ f_str += f"\n - Only full data: {self.handle_missing_data == 'drop'}"
220
+ f_str += f"\n - Representations ({len(self.tasks)}): {self.tasks}"
221
+ f_str += f"\n - Length: {len(self)}"
222
+ return f_str
223
+
224
+ def __repr__(self):
225
+ return str(self)
226
+
227
+ def main():
228
+ """main fn"""
229
+ parser = ArgumentParser()
230
+ parser.add_argument("dataset_path", type=Path)
231
+ parser.add_argument("--handle_missing_data", choices=("drop", "fill_none"), default="fill_none")
232
+ args = parser.parse_args()
233
+
234
+ reader = MultiTaskDataset(args.dataset_path, task_names=None, handle_missing_data=args.handle_missing_data)
235
+ print(reader)
236
+ print(f"Shape: {reader.data_shape}")
237
+
238
+ rand_ix = np.random.randint(len(reader))
239
+ data, name, repr_names = reader[rand_ix] # get a random single data point
240
+ print(f"Name: {name}. Nodes: {repr_names}")
241
+ pprint({k: v for k, v in data.items()})
242
+
243
+ data, name, repr_names = reader[rand_ix: min(len(reader), rand_ix + 5)] # get a random batch
244
+ print(f"Name: {name}. Nodes: {repr_names}")
245
+ pprint({k: v for k, v in data.items()}) # Nones are converted to 0s automagically
246
+
247
+ loader = DataLoader(reader, collate_fn=reader.collate_fn, batch_size=5, shuffle=True)
248
+ data, name, repr_names = next(iter(loader)) # get a random batch using torch DataLoader
249
+ print(f"Name: {name}. Nodes: {repr_names}")
250
+ pprint({k: v for k, v in data.items()}) # Nones are converted to 0s automagically
251
+
252
+ if __name__ == "__main__":
253
+ main()
neo_reader/neo_node.py CHANGED
@@ -2,6 +2,10 @@
2
  from pathlib import Path
3
  import numpy as np
4
  from codecs import encode
 
 
 
 
5
 
6
  def _cmap_hex_to_rgb(hex_list):
7
  res = []
@@ -28,7 +32,7 @@ def _act_to_cmap(act_file: Path) -> np.ndarray:
28
  rgb_colors = _cmap_hex_to_rgb(hex_colors)
29
  return rgb_colors
30
 
31
- class NEONode:
32
  """NEO nodes implementation in ngclib repository"""
33
  def __init__(self, node_type: str, name: str):
34
  # all neo nodes have 1 dimension.
@@ -42,30 +46,25 @@ class NEONode:
42
  self.name = name
43
  self.cmap = _act_to_cmap(Path(__file__).absolute().parent / "cmaps" / f"{self.node_type}.act")
44
 
45
- def load_from_disk(self, x: np.ndarray) -> np.ndarray:
46
- y: np.ndarray = np.float32(x)
 
 
47
  if y.shape[0] == 1: # pylint: disable=unsubscriptable-object
48
  y = y[0] # pylint: disable=unsubscriptable-object
49
  if len(y.shape) == 2:
50
  y = np.expand_dims(y, axis=-1)
51
  y[np.isnan(y)] = 0
52
- return y.astype(np.float32)
53
 
54
- def save_to_disk(self, x: np.ndarray) -> np.ndarray:
55
- return x.clip(0, 1)
 
56
 
57
- def plot_fn(self, x: np.ndarray | None) -> np.ndarray | None:
58
- if x is None:
59
- return x
60
- y = np.clip(x, 0, 1)
61
  y = y * 255
62
  y[y == 0] = 255
63
  y = y.astype(np.uint).squeeze()
64
  y_rgb = self.cmap[y].astype(np.uint8)
65
  return y_rgb
66
-
67
- def __repr__(self):
68
- return self.name
69
-
70
- def __str__(self):
71
- return f"NEONode({self.name})"
 
2
  from pathlib import Path
3
  import numpy as np
4
  from codecs import encode
5
+ from overrides import overrides
6
+ import torch as tr
7
+
8
+ from .multitask_dataset import NpzRepresentation
9
 
10
  def _cmap_hex_to_rgb(hex_list):
11
  res = []
 
32
  rgb_colors = _cmap_hex_to_rgb(hex_colors)
33
  return rgb_colors
34
 
35
+ class NEONode(NpzRepresentation):
36
  """NEO nodes implementation in ngclib repository"""
37
  def __init__(self, node_type: str, name: str):
38
  # all neo nodes have 1 dimension.
 
46
  self.name = name
47
  self.cmap = _act_to_cmap(Path(__file__).absolute().parent / "cmaps" / f"{self.node_type}.act")
48
 
49
+ @overrides
50
+ def load_from_disk(self, path: Path) -> tr.Tensor:
51
+ data = np.load(path, allow_pickle=False)
52
+ y = data if isinstance(data, np.ndarray) else data["arr_0"] # in case on npz, we need this as well
53
  if y.shape[0] == 1: # pylint: disable=unsubscriptable-object
54
  y = y[0] # pylint: disable=unsubscriptable-object
55
  if len(y.shape) == 2:
56
  y = np.expand_dims(y, axis=-1)
57
  y[np.isnan(y)] = 0
58
+ return tr.from_numpy(y).float()
59
 
60
+ @overrides
61
+ def save_to_disk(self, data: tr.Tensor, path: Path):
62
+ return super().save_to_disk(data.clip(0, 1), path)
63
 
64
+ def plot_fn(self, x: tr.Tensor) -> np.ndarray:
65
+ y = np.clip(x.numpy(), 0, 1)
 
 
66
  y = y * 255
67
  y[y == 0] = 255
68
  y = y.astype(np.uint).squeeze()
69
  y_rgb = self.cmap[y].astype(np.uint8)
70
  return y_rgb
 
 
 
 
 
 
neo_reader/neo_reader.py DELETED
@@ -1,183 +0,0 @@
1
- #!/usr/bin/env python3
2
- """NEO Reader module"""
3
- from __future__ import annotations
4
- from pathlib import Path
5
- from argparse import Namespace, ArgumentParser
6
- from pprint import pprint
7
- from natsort import natsorted
8
- from loguru import logger
9
- import numpy as np
10
- from torch.utils.data import Dataset
11
-
12
- try:
13
- from .neo_node import NEONode
14
- except ImportError:
15
- from neo_node import NEONode
16
-
17
- class NEOReader(Dataset):
18
- """
19
- NEO Reader implementation. Reads data from npz files and returns them as a dict.
20
-
21
- Parameters:
22
- - path: Path to the directory containing the npz files.
23
- - nodes: List of nodes that are present in the dataset.
24
- - handle_missing_data: Modes to handle missing data. Valid options are:
25
- - drop: Drop the data point if any of the nodes is missing.
26
- - fill_none: Fill the missing data with Nones.
27
-
28
- Expected directory structure:
29
- path/
30
- - node_1/0.npz, ..., N.npz
31
- - ...
32
- - node_n/0.npz, ..., N.npz
33
-
34
- Names can be in a different format (i.e. 2022-01-01.npz), but must be consistent and equal across all nodes.
35
- """
36
- # The default names and their corresponding types for NEO datasets (1week or 1month)
37
- name_to_type = {"AOD": "AerosolOpticalDepth", "BS_ALBEDO": "Albedo", "CHLORA": "Chlorophyll",
38
- "CLD_FR": "CloudFraction", "CLD_RD": "CloudParticleRadius", "CLD_WP": "CloudWaterContent",
39
- "COT": "CloudOpticalThickness", "CO_M": "CarbonMonoxide", "FIRE": "Fire",
40
- "INSOL": "SolarInsolation", "LAI": "LeafAreaIndex", "LSTD": "Temperature",
41
- "LSTD_AN": "TemperatureAnomaly", "LSTN": "Temperature", "LSTN_AN": "TemperatureAnomaly",
42
- "LWFLUX": "OutgoingLongwaveRadiation", "NDVI": "Vegetation", "NETFLUX": "NetRadiation",
43
- "NO2": "NitrogenDioxide", "OZONE": "Ozone", "SNOWC": "SnowCover", "SST": "SeaSurfaceTemperature",
44
- "SWFLUX": "ReflectedShortwaveRadiation", "WV": "WaterVapor"}
45
-
46
- def __init__(self, path: Path, nodes: list[str] | None = None, handle_missing_data: str = "fill_none"):
47
- assert path.exists(), f"Provided path '{path}' doesn't exist!"
48
- assert handle_missing_data in ("drop", "fill_none"), f"Invalid handle_missing_data mode: {handle_missing_data}"
49
- self.path = Path(path).absolute()
50
- self.files_per_node, self.file_names = self._build_dataset(handle_missing_data)
51
- if nodes is None:
52
- nodes = list(self.files_per_node.keys())
53
- logger.debug("No nodes provided. Using all of them as read from the paths.")
54
- assert all(isinstance(x, str) for x in nodes), tuple(zip(nodes, (type(x) for x in nodes)))
55
-
56
- self.node_names = sorted(nodes)
57
- logger.info(f"Nodes used in this reader: {self.node_names}")
58
- self.nodes = [NEONode(NEOReader.name_to_type[x], x) for x in self.node_names]
59
- self.handle_missing_data = handle_missing_data
60
-
61
- self._images_shape: tuple[int, int, int] | None = None
62
-
63
- # Public methods and properties
64
-
65
- @property
66
- def images_shape(self) -> tuple[int, int, int]:
67
- """Returns a triple of (H, W, C) for all images shape, which are assumed to be consistent for all data points"""
68
- if self._images_shape is None:
69
- i = 0
70
- while True:
71
- for img in self[i][0].values():
72
- if img is not None:
73
- self._images_shape = img.shape
74
- assert len(self._images_shape) == 3 and self._images_shape[-1] == 1, self._images_shape
75
- return self._images_shape
76
- i += 1
77
- return self._images_shape
78
-
79
- # Private methods
80
-
81
- def _get_all_npz_files(self) -> dict[str, list[Path]]:
82
- in_files = {}
83
- nodes = [x for x in self.path.iterdir() if x.is_dir()]
84
- for node in nodes:
85
- dir_name = self.path / node.name
86
- items = dir_name.glob("*.npz")
87
- items = set(natsorted(items, key=lambda x: x.name))
88
- in_files[node.name] = items
89
- assert not any(len(x) == 0 for x in in_files.values()), f"{ [k for k, v in in_files.items() if len(v) == 0] }"
90
- return in_files
91
-
92
- def _build_dataset_drop(self) -> tuple[dict[str, list[Path]], list[str]]:
93
- in_files = self._get_all_npz_files()
94
- common = set(x.name for x in next(iter(in_files.values())))
95
- nodes = in_files.keys()
96
- for node in nodes:
97
- common = common.intersection([f.name for f in in_files[node]])
98
- assert len(common) > 0, f"Node '{node}' made the intersection null"
99
- common = natsorted(list(common))
100
- logger.info(f"Found {len(common)} data points for each node ({len(nodes)} nodes).")
101
- files_per_node = {node: [self.path / node / x for x in common] for node in nodes}
102
- return files_per_node, common
103
-
104
- def _build_dataset_fill_none(self) -> tuple[dict[str, list[Path]], list[str]]:
105
- in_files = self._get_all_npz_files()
106
- all_files = set(x.name for x in next(iter(in_files.values())))
107
- nodes = in_files.keys()
108
- for node in nodes:
109
- all_files = all_files.union([f.name for f in in_files[node]])
110
- all_files = natsorted(list(all_files))
111
- logger.info(f"Found {len(all_files)} data points as union of all nodes' data ({len(nodes)} nodes).")
112
-
113
- files_per_node = {node: [] for node in nodes}
114
- in_file_names = {node: [f.name for f in in_files[node]] for node in nodes}
115
- for node in nodes:
116
- for file_name in all_files:
117
- file_path = self.path / node / file_name if file_name in in_file_names[node] else None
118
- files_per_node[node].append(file_path)
119
- return files_per_node, all_files
120
-
121
- def _build_dataset(self, handle_missing_data: str) -> tuple[dict[str, list[Path]], list[str]]:
122
- logger.debug(f"Building dataset from: '{self.path}'")
123
- if handle_missing_data == "drop":
124
- return self._build_dataset_drop()
125
- else:
126
- return self._build_dataset_fill_none()
127
-
128
- def _read_node_data(self, node: NEONode, index: int) -> np.ndarray | None:
129
- """Reads the npz data from the disk and transforms it properly"""
130
- file_path = self.files_per_node[node.name][index]
131
- if file_path is None:
132
- return None
133
- item = np.load(file_path, allow_pickle=True)["arr_0"]
134
- transformed_item = node.load_from_disk(item)
135
- return transformed_item
136
-
137
- # Python magic methods (pretty printing the reader object, reader[0], len(reader) etc.)
138
-
139
- def __getitem__(self, index: int) -> tuple[dict[str, np.ndarray], str, list[str]]:
140
- """Read the data all the desired nodes"""
141
- assert isinstance(index, int), type(index)
142
- res = {}
143
- item_name = self.file_names[index]
144
-
145
- for node in self.nodes:
146
- item = self._read_node_data(node, index)
147
- assert self.handle_missing_data == "fill_none" or item is not None, item_name
148
- res[node.name] = item
149
- return (res, item_name, self.node_names)
150
-
151
- def __len__(self) -> int:
152
- return len(self.files_per_node[self.node_names[0]])
153
-
154
- def __str__(self):
155
- f_str = "[NGC Npz Reader]"
156
- f_str += f"\n - Path: '{self.path}'"
157
- f_str += f"\n - Only full data: {self.handle_missing_data == 'drop'}"
158
- f_str += f"\n - Nodes ({len(self.nodes)}): {self.nodes}"
159
- f_str += f"\n - Length: {len(self)}"
160
- return f_str
161
-
162
- def __repr__(self):
163
- return str(self)
164
-
165
- def get_args() -> Namespace:
166
- """cli args"""
167
- parser = ArgumentParser()
168
- parser.add_argument("dataset_path", type=Path)
169
- parser.add_argument("--handle_missing_data", choices=("drop", "fill_none"), default="fill_none")
170
- args = parser.parse_args()
171
- return args
172
-
173
- def main(args: Namespace):
174
- """main fn"""
175
- reader = NEOReader(args.dataset_path, nodes=None, handle_missing_data=args.handle_missing_data)
176
- print(reader)
177
- print(f"Shape: {reader.images_shape}")
178
- data, name, node_names = reader[np.random.randint(len(reader))]
179
- print(f"Name: {name}. Nodes: {node_names}")
180
- pprint({k: (v.shape if v is not None else None) for k, v in data.items()})
181
-
182
- if __name__ == "__main__":
183
- main(get_args())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
neo_viewer.ipynb CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:23c8ec700dc0df69d0ee88c6c0998e287fc76ef21b0ce2fc31d5697fb283e8b5
3
- size 11757180
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:694e5b784a08a3aaa7f30d68026a80ba9dc7dd247912f038339bee9a24ba7673
3
+ size 12434984