Meehai commited on
Commit
fff26c3
·
1 Parent(s): 2db37a2

small update to the scripts

Browse files
dronescapes_reader/multitask_dataset.py CHANGED
@@ -76,7 +76,8 @@ class MultiTaskDataset(Dataset):
76
  self.path = Path(path).absolute()
77
  self.handle_missing_data = handle_missing_data
78
  self.suffix = files_suffix
79
- self.files_per_repr, self.file_names = self._build_dataset()
 
80
  if task_types is None:
81
  logger.debug("No explicit task types. Defaulting all of them to NpzRepresentation.")
82
  task_types = {}
@@ -150,7 +151,7 @@ class MultiTaskDataset(Dataset):
150
  return in_files
151
 
152
  def _build_dataset_drop(self) -> BuildDatasetTuple:
153
- in_files = self._get_all_npz_files()
154
  name_to_node_path = {k: {_v.name: _v for _v in v} for k, v in in_files.items()} # {node: {name: path}}
155
  common = set(x.name for x in next(iter(in_files.values())))
156
  nodes = in_files.keys()
@@ -164,7 +165,7 @@ class MultiTaskDataset(Dataset):
164
  return files_per_repr, common
165
 
166
  def _build_dataset_fill_none(self) -> BuildDatasetTuple:
167
- in_files = self._get_all_npz_files()
168
  name_to_node_path = {k: {_v.name: _v for _v in v} for k, v in in_files.items()}
169
  all_files = set(x.name for x in next(iter(in_files.values())))
170
  nodes = in_files.keys()
 
76
  self.path = Path(path).absolute()
77
  self.handle_missing_data = handle_missing_data
78
  self.suffix = files_suffix
79
+ self.all_files_per_repr = self._get_all_npz_files()
80
+ self.files_per_repr, self.file_names = self._build_dataset() # these are filtered by 'drop' or 'fill_none' logic
81
  if task_types is None:
82
  logger.debug("No explicit task types. Defaulting all of them to NpzRepresentation.")
83
  task_types = {}
 
151
  return in_files
152
 
153
  def _build_dataset_drop(self) -> BuildDatasetTuple:
154
+ in_files = self.all_files_per_repr
155
  name_to_node_path = {k: {_v.name: _v for _v in v} for k, v in in_files.items()} # {node: {name: path}}
156
  common = set(x.name for x in next(iter(in_files.values())))
157
  nodes = in_files.keys()
 
165
  return files_per_repr, common
166
 
167
  def _build_dataset_fill_none(self) -> BuildDatasetTuple:
168
+ in_files = self.all_files_per_repr
169
  name_to_node_path = {k: {_v.name: _v for _v in v} for k, v in in_files.items()}
170
  all_files = set(x.name for x in next(iter(in_files.values())))
171
  nodes = in_files.keys()
scripts/evaluate_semantic_segmentation.py CHANGED
@@ -25,7 +25,7 @@ def compute_metrics(tp: np.ndarray, fp: np.ndarray, tn: np.ndarray, fn: np.ndarr
25
  iou = tp / (tp + fp + fn)
26
  return pd.DataFrame([precision, recall, f1, iou], index=["precision", "recall", "f1", "iou"]).T
27
 
28
- def do_one_class(df: pd.DataFrame, class_name: str) -> pd.DataFrame:
29
  df = df.query("class_name == @class_name").drop(columns="class_name")
30
  df.loc["all"] = df.sum()
31
  df[["precision", "recall", "f1", "iou"]] = compute_metrics(df["tp"], df["fp"], df["tn"], df["fn"])
@@ -33,7 +33,7 @@ def do_one_class(df: pd.DataFrame, class_name: str) -> pd.DataFrame:
33
  df = df.fillna(0).round(3)
34
  return df
35
 
36
- def compute_raw_stats_per_class(reader: MultiTaskDataset, classes: list[str]) -> pd.DataFrame:
37
  res = tr.zeros((len(reader), 8, 4)).long() # (N, 8, 4)
38
  index = []
39
  for i in trange(len(reader)):
@@ -65,38 +65,45 @@ def get_args() -> Namespace:
65
  parser.add_argument("--classes", required=True, nargs="+")
66
  parser.add_argument("--class_weights", nargs="+", type=float)
67
  parser.add_argument("--scenes", nargs="+", default=["all"], help="each scene will get separate metrics if provided")
 
68
  args = parser.parse_args()
69
  if args.class_weights is None:
 
70
  args.class_weights = [1 / len(args.classes)] * len(args.classes)
71
  assert (a := len(args.class_weights)) == (b := len(args.classes)), (a, b)
72
  assert np.fabs(sum(args.class_weights) - 1) < 1e-3, (args.class_weights, sum(args.class_weights))
73
  assert args.output_path.suffix == ".csv", f"Prediction file must end in .csv, got: '{args.output_path.suffix}'"
74
  if len(args.scenes) > 0:
75
  logger.info(f"Scenes: {args.scenes}")
 
 
76
  return args
77
 
78
  def main(args: Namespace):
79
- temp_dir = Path(TemporaryDirectory().name)
80
- temp_dir.mkdir(exist_ok=False)
81
  os.symlink(args.y_dir, temp_dir / "pred")
82
  os.symlink(args.gt_dir, temp_dir / "gt")
 
 
 
83
 
 
84
  if not args.output_path.exists():
85
- sema_repr = partial(SemanticRepresentation, classes=args.classes, color_map=[[0, 0, 0]] * len(args.classes))
86
- reader = MultiTaskDataset(temp_dir, handle_missing_data="drop", task_types={"pred": sema_repr, "gt": sema_repr})
87
- raw_stats = compute_raw_stats_per_class(reader, args.classes)
88
  logger.info(f"Stored raw metrics file to: '{args.output_path}'")
89
  raw_stats.to_csv(args.output_path)
90
  else:
91
  logger.info(f"Loading raw metrics from: '{args.output_path}'. Delete this file if you want to recompute.")
92
  raw_stats = pd.read_csv(args.output_path, index_col=0)
93
 
94
- metrics_per_class = pd.concat([do_one_class(raw_stats, class_name) for class_name in args.classes])
 
95
 
 
96
  final_agg = []
97
- for scene in args.scenes:
98
- final_agg.append(compute_final_per_scene(metrics_per_class, scene, classes=args.classes,
99
- class_weights=args.class_weights))
100
  final_agg = pd.DataFrame(final_agg, columns=["scene", "iou", "f1"]).set_index("scene")
101
  if len(args.scenes) > 1:
102
  final_agg.loc["mean"] = final_agg.mean()
 
25
  iou = tp / (tp + fp + fn)
26
  return pd.DataFrame([precision, recall, f1, iou], index=["precision", "recall", "f1", "iou"]).T
27
 
28
+ def compute_metrics_by_class(df: pd.DataFrame, class_name: str) -> pd.DataFrame:
29
  df = df.query("class_name == @class_name").drop(columns="class_name")
30
  df.loc["all"] = df.sum()
31
  df[["precision", "recall", "f1", "iou"]] = compute_metrics(df["tp"], df["fp"], df["tn"], df["fn"])
 
33
  df = df.fillna(0).round(3)
34
  return df
35
 
36
+ def compute_raw_stats_per_frame(reader: MultiTaskDataset, classes: list[str]) -> pd.DataFrame:
37
  res = tr.zeros((len(reader), 8, 4)).long() # (N, 8, 4)
38
  index = []
39
  for i in trange(len(reader)):
 
65
  parser.add_argument("--classes", required=True, nargs="+")
66
  parser.add_argument("--class_weights", nargs="+", type=float)
67
  parser.add_argument("--scenes", nargs="+", default=["all"], help="each scene will get separate metrics if provided")
68
+ parser.add_argument("--overwrite", action="store_true")
69
  args = parser.parse_args()
70
  if args.class_weights is None:
71
+ logger.info("No class weights provided, defaulting to equal weights.")
72
  args.class_weights = [1 / len(args.classes)] * len(args.classes)
73
  assert (a := len(args.class_weights)) == (b := len(args.classes)), (a, b)
74
  assert np.fabs(sum(args.class_weights) - 1) < 1e-3, (args.class_weights, sum(args.class_weights))
75
  assert args.output_path.suffix == ".csv", f"Prediction file must end in .csv, got: '{args.output_path.suffix}'"
76
  if len(args.scenes) > 0:
77
  logger.info(f"Scenes: {args.scenes}")
78
+ if args.output_path.exists() and args.overwrite:
79
+ os.remove(args.output_path)
80
  return args
81
 
82
  def main(args: Namespace):
83
+ # setup to put both directories in the same parent directory for the reader to work.
84
+ (temp_dir := Path(TemporaryDirectory().name)).mkdir(exist_ok=False)
85
  os.symlink(args.y_dir, temp_dir / "pred")
86
  os.symlink(args.gt_dir, temp_dir / "gt")
87
+ sema_repr = partial(SemanticRepresentation, classes=args.classes, color_map=[[0, 0, 0]] * len(args.classes))
88
+ reader = MultiTaskDataset(temp_dir, handle_missing_data="drop", task_types={"pred": sema_repr, "gt": sema_repr})
89
+ assert (a := len(reader.all_files_per_repr["gt"])) == (b := len(reader.all_files_per_repr["pred"])), f"{a} vs {b}"
90
 
91
+ # Compute TP, FP, TN, FN for each frame
92
  if not args.output_path.exists():
93
+ raw_stats = compute_raw_stats_per_frame(reader, args.classes)
 
 
94
  logger.info(f"Stored raw metrics file to: '{args.output_path}'")
95
  raw_stats.to_csv(args.output_path)
96
  else:
97
  logger.info(f"Loading raw metrics from: '{args.output_path}'. Delete this file if you want to recompute.")
98
  raw_stats = pd.read_csv(args.output_path, index_col=0)
99
 
100
+ # Compute Precision, Recall, F1, IoU for each class and put them together in the same df.
101
+ metrics_per_class = pd.concat([compute_metrics_by_class(raw_stats, class_name) for class_name in args.classes])
102
 
103
+ # Aggregate the class-level metrics to the final metrics based on the class weights (compute globally by stats)
104
  final_agg = []
105
+ for scene in args.scenes: # if we have >1 scene in the test set, aggregate the results for each of them separately
106
+ final_agg.append(compute_final_per_scene(metrics_per_class, scene, args.classes, args.class_weights))
 
107
  final_agg = pd.DataFrame(final_agg, columns=["scene", "iou", "f1"]).set_index("scene")
108
  if len(args.scenes) > 1:
109
  final_agg.loc["mean"] = final_agg.mean()