Meehai commited on
Commit
6d95ae3
·
1 Parent(s): 868825d

deleted semantics statistics (useless) and updated the reader with various fixes

Browse files
data/train_set/.task_statistics.npz CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7f06911591fb45c4584e1fa8fcc099637d76cf6fac663a8400f6d76a2ac6ff97
3
- size 29921
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab8250be8db0b0cbadb587271ea704c8e9e27bab25954d02d2fe4bd0a3510870
3
+ size 16354
dronescapes_reader/multitask_dataset.py CHANGED
@@ -106,10 +106,8 @@ class MultiTaskDataset(Dataset):
106
  self._statistics = None if normalization is None else self._compute_statistics()
107
  if self._statistics is not None:
108
  for task_name, task in self.name_to_task.items():
109
- try:
110
  task.set_normalization(self.normalization[task_name], self._statistics[task_name])
111
- except:
112
- breakpoint()
113
 
114
  # Public methods and properties
115
 
@@ -120,11 +118,9 @@ class MultiTaskDataset(Dataset):
120
  @property
121
  def default_vals(self) -> dict[str, tr.Tensor]:
122
  """default values for __getitem__ if item is not on disk but we retrieve a full batch anyway"""
123
- if self._default_vals is None:
124
- _default_val = float("nan") if self.handle_missing_data == "fill_nan" else 0
125
- self._default_vals = {task: None if self.handle_missing_data == "fill_none" else
126
- tr.full(self.data_shape[task], _default_val) for task in self.task_names}
127
- return self._default_vals
128
 
129
  @property
130
  def data_shape(self) -> dict[str, tuple[int, ...]]:
@@ -291,30 +287,27 @@ class MultiTaskDataset(Dataset):
291
  assert not new_mean.isnan().any() and not new_M2.isnan().any(), (mean, new_mean, counts, counts_delta)
292
  return new_count, new_mean, new_M2
293
 
 
294
  ch = {k: v[-1] if len(v) == 3 else 1 for k, v in self.data_shape.items()}
295
- counts = {task_name: tr.zeros(ch[task_name]).long() for task_name in missing_tasks}
296
- mins = {task_name: tr.zeros(ch[task_name]).type(tr.float64) + 10**10 for task_name in missing_tasks}
297
- maxs = {task_name: tr.zeros(ch[task_name]).type(tr.float64) - 10**10 for task_name in missing_tasks}
298
- means_vec = {task_name: tr.zeros(ch[task_name]).type(tr.float64) for task_name in missing_tasks}
299
- M2s_vec = {task_name: tr.zeros(ch[task_name]).type(tr.float64) for task_name in missing_tasks}
300
 
301
  old_names, old_normalization = self.task_names, self.normalization
302
- missing_tasks_no_classification = [t for t in missing_tasks if not self.name_to_task[t].is_classification]
303
- self.task_names, self.normalization = missing_tasks_no_classification, None # for self[ix]
304
 
305
- res = {}
306
- for task in [t for t in missing_tasks if self.name_to_task[t].is_classification]:
307
- res[task] = (mins[task] * 0, mins[task] * 0 + 1, mins[task] * 0, mins[task] * 0 + 1)
308
-
309
- if len(missing_tasks_no_classification) == 0:
310
- return res
311
 
 
312
  BS = min(len(self), self.batch_size_stats)
313
  n = (len(self) // BS) + (len(self) % BS != 0)
314
  logger.debug(f"Global task statistics. Batch size: {BS}. N iterations: {n}.")
315
  for ix in trange(n, disable=os.getenv("STATS_PBAR", "0") == "0", desc="Computing stats"):
316
  item = self[ix * BS: min(len(self), (ix + 1) * BS)][0]
317
- for task in missing_tasks_no_classification:
318
  item_flat_ch = item[task].reshape(-1, ch[task])
319
  item_no_nan = item_flat_ch.nan_to_num(0).type(tr.float64)
320
  mins[task] = tr.minimum(mins[task], item_no_nan.min(0)[0])
@@ -323,7 +316,7 @@ class MultiTaskDataset(Dataset):
323
  counts[task], means_vec[task], M2s_vec[task] = \
324
  update(counts[task], counts_delta, means_vec[task], M2s_vec[task], item_no_nan)
325
 
326
- for task in missing_tasks_no_classification:
327
  res[task] = (mins[task], maxs[task], means_vec[task], (M2s_vec[task] / counts[task]).sqrt())
328
  assert not any(x[0].isnan().any() for x in res[task]), (task, res[task])
329
  self.task_names, self.normalization = old_names, old_normalization
@@ -356,11 +349,7 @@ class MultiTaskDataset(Dataset):
356
  for task_name in self.task_names:
357
  task = [t for t in self.tasks if t.name == task_name][0]
358
  file_path = self.files_per_repr[task_name][index]
359
- if file_path is None:
360
- # TODO: I had a .resolve() and .exists() here? WTF? To fix in _build_dataset() maybe.
361
- res[task_name] = self.default_vals[task_name]
362
- else:
363
- res[task_name] = task.load_from_disk(file_path)
364
  if not task.is_classification:
365
  if self.normalization is not None and self.normalization[task_name] == "min_max":
366
  res[task_name] = task.normalize(res[task_name])
 
106
  self._statistics = None if normalization is None else self._compute_statistics()
107
  if self._statistics is not None:
108
  for task_name, task in self.name_to_task.items():
109
+ if not task.is_classification:
110
  task.set_normalization(self.normalization[task_name], self._statistics[task_name])
 
 
111
 
112
  # Public methods and properties
113
 
 
118
  @property
119
  def default_vals(self) -> dict[str, tr.Tensor]:
120
  """default values for __getitem__ if item is not on disk but we retrieve a full batch anyway"""
121
+ _default_val = float("nan") if self.handle_missing_data == "fill_nan" else 0
122
+ return {task: None if self.handle_missing_data == "fill_none" else tr.full(self.data_shape[task], _default_val)
123
+ for task in self.task_names}
 
 
124
 
125
  @property
126
  def data_shape(self) -> dict[str, tuple[int, ...]]:
 
287
  assert not new_mean.isnan().any() and not new_M2.isnan().any(), (mean, new_mean, counts, counts_delta)
288
  return new_count, new_mean, new_M2
289
 
290
+ missing_tasks_no_classif = [t for t in missing_tasks if not self.name_to_task[t].is_classification]
291
  ch = {k: v[-1] if len(v) == 3 else 1 for k, v in self.data_shape.items()}
292
+ counts = {task_name: tr.zeros(ch[task_name]).long() for task_name in missing_tasks_no_classif}
293
+ mins = {task_name: tr.zeros(ch[task_name]).type(tr.float64) + 10**10 for task_name in missing_tasks_no_classif}
294
+ maxs = {task_name: tr.zeros(ch[task_name]).type(tr.float64) - 10**10 for task_name in missing_tasks_no_classif}
295
+ means_vec = {task_name: tr.zeros(ch[task_name]).type(tr.float64) for task_name in missing_tasks_no_classif}
296
+ M2s_vec = {task_name: tr.zeros(ch[task_name]).type(tr.float64) for task_name in missing_tasks_no_classif}
297
 
298
  old_names, old_normalization = self.task_names, self.normalization
299
+ self.task_names, self.normalization = missing_tasks_no_classif, None # for self[ix]
 
300
 
301
+ if len(missing_tasks_no_classif) == 0:
302
+ return {}
 
 
 
 
303
 
304
+ res = {}
305
  BS = min(len(self), self.batch_size_stats)
306
  n = (len(self) // BS) + (len(self) % BS != 0)
307
  logger.debug(f"Global task statistics. Batch size: {BS}. N iterations: {n}.")
308
  for ix in trange(n, disable=os.getenv("STATS_PBAR", "0") == "0", desc="Computing stats"):
309
  item = self[ix * BS: min(len(self), (ix + 1) * BS)][0]
310
+ for task in missing_tasks_no_classif:
311
  item_flat_ch = item[task].reshape(-1, ch[task])
312
  item_no_nan = item_flat_ch.nan_to_num(0).type(tr.float64)
313
  mins[task] = tr.minimum(mins[task], item_no_nan.min(0)[0])
 
316
  counts[task], means_vec[task], M2s_vec[task] = \
317
  update(counts[task], counts_delta, means_vec[task], M2s_vec[task], item_no_nan)
318
 
319
+ for task in missing_tasks_no_classif:
320
  res[task] = (mins[task], maxs[task], means_vec[task], (M2s_vec[task] / counts[task]).sqrt())
321
  assert not any(x[0].isnan().any() for x in res[task]), (task, res[task])
322
  self.task_names, self.normalization = old_names, old_normalization
 
349
  for task_name in self.task_names:
350
  task = [t for t in self.tasks if t.name == task_name][0]
351
  file_path = self.files_per_repr[task_name][index]
352
+ res[task_name] = self.default_vals[task_name] if file_path is None else task.load_from_disk(file_path)
 
 
 
 
353
  if not task.is_classification:
354
  if self.normalization is not None and self.normalization[task_name] == "min_max":
355
  res[task_name] = task.normalize(res[task_name])