Meehai commited on
Commit
f8d91d8
·
1 Parent(s): 868a71f

added old evaluation script

Browse files
Files changed (2) hide show
  1. .gitignore +1 -0
  2. scripts/eval_script_old.py +189 -0
.gitignore CHANGED
@@ -15,4 +15,5 @@ commands.txt
15
  raw_data/npz_540p_2/
16
  here.csv
17
  *.ttf
 
18
 
 
15
  raw_data/npz_540p_2/
16
  here.csv
17
  *.ttf
18
+ scripts/results/
19
 
scripts/eval_script_old.py ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The old evaluation script.
3
+ To run, you first need to split the test scenes data into 3 different directories:
4
+
5
+ cd /dronescapes/data
6
+ scenes=(comana barsana norway);
7
+ for scene in ${scenes[@]} ; do
8
+ ls test_set_annotated_only | while read task; do
9
+ mkdir -p test_set_annotated_only_per_scene/$scene/$task;
10
+ ls test_set_annotated_only/$task | grep "$scene" | while read line; do
11
+ cp test_set_annotated_only/$task/$line test_set_annotated_only_per_scene/$scene/$task/$line;
12
+ done;
13
+ done
14
+ done
15
+
16
+ Then run this:
17
+ cd /dronescapes/scripts
18
+ python eval_script_old.py --gt_path ../data/test_set_annotated_only_per_scene/comana/semantic_segprop8/ --pred_path ../data/test_set_annotated_only_per_scene/comana/semantic_mask2former_swin_mapillary_converted/ --num_classes 8 -o results/comana --overwrite
19
+ python eval_script_old.py --gt_path ../data/test_set_annotated_only_per_scene/barsana/semantic_segprop8/ --pred_path ../data/test_set_annotated_only_per_scene/barsana/semantic_mask2former_swin_mapillary_converted/ --num_classes 8 -o results/barsana --overwrite
20
+ python eval_script_old.py --gt_path ../data/test_set_annotated_only_per_scene/norway/semantic_segprop8/ --pred_path ../data/test_set_annotated_only_per_scene/norway/semantic_mask2former_swin_mapillary_converted/ --num_classes 8 -o results/norway --overwrite
21
+ """
22
+
23
+ from __future__ import annotations
24
+ import os
25
+ import cv2
26
+ import numpy as np
27
+ import multiprocessing as mp
28
+ from natsort import natsorted
29
+ from pathlib import Path
30
+ import shutil
31
+ import tempfile
32
+ from tqdm import tqdm
33
+
34
+ import argparse
35
+ import warnings
36
+ warnings.filterwarnings("ignore")
37
+
38
+ def convert_label2multi(label, class_id):
39
+ out = np.zeros((label.shape[0], label.shape[1]), dtype=np.uint8)
40
+ data_indices = np.where(np.equal(label, class_id))
41
+ out[data_indices[0], data_indices[1]] = 1
42
+ return np.array(out, dtype=bool)
43
+
44
+ def process_all_video_frames(gt_files: list[Path], pred_files: list[Path], class_id: int):
45
+ global_true_positives = 0
46
+ global_true_negatives = 0
47
+ global_false_positives = 0
48
+ global_false_negatives = 0
49
+
50
+ for gt_file, pred_file in tqdm(zip(gt_files, pred_files), total=len(gt_files), desc=f"{class_id=}"):
51
+ gt_label = np.load(gt_file, allow_pickle=True)["arr_0"]
52
+ net_label = np.load(pred_file, allow_pickle=True)["arr_0"]
53
+
54
+ if gt_label.shape == ():
55
+ gt_label = gt_label.item()['data']
56
+ gt_label = convert_label2multi(gt_label, class_id)
57
+ net_label = convert_label2multi(net_label, class_id)
58
+
59
+ true_positives = np.count_nonzero(gt_label * net_label)
60
+ true_negatives = np.count_nonzero((gt_label + net_label) == 0)
61
+ false_positives = np.count_nonzero((np.array(net_label, dtype=int) - np.array(gt_label, dtype=int)) > 0)
62
+ false_negatives = np.count_nonzero((np.array(gt_label, dtype=int) - np.array(net_label, dtype=int)) > 0)
63
+
64
+ global_true_positives += true_positives
65
+ global_true_negatives += true_negatives
66
+ global_false_positives += false_positives
67
+ global_false_negatives += false_negatives
68
+
69
+ global_precision = global_true_positives / (global_true_positives + global_false_positives + np.spacing(1))
70
+ global_recall = global_true_positives / (global_true_positives + global_false_negatives + np.spacing(1))
71
+ global_f1_score = (2 * global_precision * global_recall) / (global_precision + global_recall + np.spacing(1))
72
+ global_iou = global_true_positives / (global_true_positives + global_false_positives + global_false_negatives + np.spacing(1))
73
+
74
+ return (global_precision, global_recall, global_f1_score, global_iou)
75
+
76
+ def join_results(args: argparse.Namespace):
77
+ assert args.num_classes in (7, 8, 10), args.num_classes
78
+ if args.num_classes == 7:
79
+ CLASS_NAMES = ['land', 'forest', 'residential', 'road', 'little-objects', 'water', 'sky']
80
+ CLASS_WEIGHTS = [0.28172092, 0.37426183, 0.13341699, 0.05937348, 0.00474491, 0.05987466, 0.08660721]
81
+ #[0.37426183 0.28172092 0.13341699 0.08660721 0.05987466 0.05937348 0.00474491]
82
+ elif args.num_classes == 8:
83
+ CLASS_NAMES = ['land', 'forest', 'residential', 'road', 'little-objects', 'water', 'sky', 'hill']
84
+ CLASS_WEIGHTS = [0.28172092, 0.30589653, 0.13341699, 0.05937348, 0.00474491, 0.05987466, 0.08660721, 0.06836531]
85
+ #[0.30589653 0.28172092 0.13341699 0.08660721 0.06836531 0.05987466 0.05937348 0.00474491]
86
+ elif args.num_classes == 10:
87
+ CLASS_NAMES = ['land', 'forest', 'low-level', 'road', 'high-level', 'cars', 'water', 'sky', 'hill', 'person']
88
+ CLASS_WEIGHTS = [0.28172092, 0.30589653, 0.09954808, 0.05937348, 0.03386891, 0.00445865, 0.05987466, 0.08660721, 0.06836531, 0.00028626]
89
+ # [0.30589653 0.28172092 0.09954808 0.08660721 0.06836531 0.05987466 0.05937348 0.03386891 0.00445865 0.00028626]
90
+
91
+
92
+ out_path = os.path.join(args.out_dir, 'joined_results_' + str(args.num_classes) + 'classes.txt')
93
+ out_file = open(out_path, 'w')
94
+
95
+ joined_f1_scores_mean = []
96
+ joined_iou_scores_mean = []
97
+
98
+ for CLASS_ID in range(0, len(CLASS_NAMES)):
99
+
100
+ RESULT_FILE = os.path.join(args.out_dir, 'evaluation_dronescapes_CLASS_' + str(CLASS_ID) + '.txt')
101
+
102
+ result_file_lines = open(RESULT_FILE, 'r').read().splitlines()
103
+
104
+ for idx, line in enumerate(result_file_lines):
105
+ if idx != 0:
106
+ splits = line.split(',')
107
+ f1_score = float(splits[2])
108
+ iou_score = float(splits[3])
109
+
110
+ out_file.write('------------------------- ' + ' CLASS ' + str(CLASS_ID) + ' - ' + CLASS_NAMES[CLASS_ID] + ' --------------------------------------------\n')
111
+ # F1Score
112
+ out_file.write('F1-Score: ' + str(round(f1_score, 4)) + '\n')
113
+ # Mean IOU
114
+ out_file.write('IOU: ' + str(round(iou_score, 4)) + '\n')
115
+ out_file.write('\n\n')
116
+ joined_f1_scores_mean.append(f1_score)
117
+ joined_iou_scores_mean.append(iou_score)
118
+
119
+ out_file.write('\n\n')
120
+ out_file.write('Mean F1-Score all classes: ' + str(round(np.mean(joined_f1_scores_mean), 4)) + '\n')
121
+ out_file.write('Mean IOU all classes: ' + str(round(np.mean(joined_iou_scores_mean), 4)) + '\n')
122
+ out_file.write('\n\n')
123
+
124
+ out_file.write('\n\n')
125
+ out_file.write('Weighted Mean F1-Score all classes: ' + str(round(np.sum(np.dot(joined_f1_scores_mean, CLASS_WEIGHTS)), 4)) + '\n')
126
+ out_file.write('Weighted Mean IOU all classes: ' + str(round(np.sum(np.dot(joined_iou_scores_mean, CLASS_WEIGHTS)), 4)) + '\n')
127
+ out_file.write('\n\n')
128
+
129
+ out_file.close()
130
+ print(f"Written to '{out_path}'")
131
+
132
+ def main(args: argparse.Namespace):
133
+ gt_files = natsorted([x for x in args.gt_path.iterdir()], key=lambda x: Path(x).name)
134
+ pred_files = natsorted([x for x in args.pred_path.iterdir()], key=lambda x: Path(x).name)
135
+ assert all(Path(x).exists() for x in [*gt_files, *pred_files])
136
+ global_precision, global_recall, global_f1, global_iou = process_all_video_frames(gt_files, pred_files, args.class_id)
137
+
138
+ out_path = os.path.join(args.out_dir, 'evaluation_dronescapes_CLASS_' + str(args.class_id) + '.txt')
139
+ out_file = open(out_path, 'w')
140
+ out_file.write('precision,recall,f1,iou\n')
141
+ out_file.write('{0:.6f},{1:.6f},{2:.6f},{3:.6f}\n'.format(global_precision, global_recall, global_f1, global_iou))
142
+ out_file.close()
143
+ print(f"Written to '{out_path}'")
144
+
145
+ if __name__ == "__main__":
146
+ """
147
+ Barsana: /Date3/hpc/datasets/dronescapes/all_scenes/dataset_splits/20220517_train_on_even_semisup_on_odd_validate_on_last_odd_triplet_journal_split/only_manually_annotated_test_files_36.txt
148
+ Norce: /Date3/hpc/datasets/dronescapes/all_scenes/dataset_splits/20220810_new_norce_clip/only_manually_annotated_test_files_50.txt
149
+ Comana: /Date3/hpc/datasets/dronescapes/all_scenes/dataset_splits/20221208_new_comana_clip/only_manually_annotated_test_files_30.txt
150
+ gt_path: /Date3/hpc/datasets/dronescapes/all_scenes
151
+ pred_path/Date3/hpc/code/Mask2Former/demo_dronescapes/outputs_dronescapes_compatible/mapillary_sseg
152
+ """
153
+ parser = argparse.ArgumentParser()
154
+ parser.add_argument("--gt_path", type=Path, required=True)
155
+ parser.add_argument("--pred_path", type=Path, required=True)
156
+ parser.add_argument("--out_dir", "-o", required=True, type=Path, default=Path(__file__).parent / "out_dir")
157
+ parser.add_argument("--num_classes", type=int, default=8)
158
+ parser.add_argument("--txt_path")
159
+ parser.add_argument("--overwrite", action="store_true")
160
+ args = parser.parse_args()
161
+ assert not args.out_dir.exists() or args.overwrite, f"'{args.out_dir}' exists. Use --overwrite"
162
+ shutil.rmtree(args.out_dir, ignore_errors=True)
163
+ os.makedirs(args.out_dir, exist_ok=True)
164
+
165
+ if args.txt_path is not None:
166
+ (tempdir := Path(tempfile.TemporaryDirectory().name)).mkdir()
167
+ (tempdir / "gt").mkdir()
168
+ (tempdir / "pred").mkdir()
169
+ print(f"old pattern detected. Copying files to a temp dir: {tempdir}")
170
+
171
+ test_files = natsorted(open(args.txt_path, "r").read().splitlines())
172
+ scenes = natsorted(set(([os.path.dirname(x) for x in test_files])))
173
+ assert len(scenes) == 1, scenes
174
+ files = natsorted([x for x in test_files if scenes[0] in x])
175
+ gt_files = [f"{args.gt_path}/{f.split('/')[0]}/segprop{args.num_classes}/{f.split('/')[1]}.npz" for f in files]
176
+ pred_files = [f"{args.pred_path}/{f.split('/')[0]}/{int(f.split('/')[1]):06}.npz" for f in files]
177
+ assert all(Path(x).exists() for x in [*gt_files, *pred_files])
178
+ for _file in gt_files:
179
+ os.symlink(_file, tempdir / "gt" / Path(_file).name)
180
+ for _file in pred_files:
181
+ os.symlink(_file, tempdir / "pred" / Path(_file).name)
182
+ args.gt_path = tempdir / "gt"
183
+ args.pred_path = tempdir / "pred"
184
+ args.txt_path = None
185
+
186
+ for class_id in range(args.num_classes):
187
+ args.class_id = class_id
188
+ main(args)
189
+ join_results(args)