File size: 3,491 Bytes
930daa4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
import torch
import numpy as np
from utils import BoxMetrics, concat_labels, concat_preds
import fiftyone as fo
from seametrics.fo_utils.utils import fo_to_payload 
from const import INDEX_MAPPING, CLASS_MAPPING, INDEX_MAPPING_INV
from tqdm import tqdm

tags = ["WHALES"]
cameras = ["thermal_narrow"]

dataset_name = "SENTRY_VIDEOS_DATASET_QA"
#dataset_name = "SENTRY_VIDEOS_DATASET_QA"
model = "cerulean-level-17_11_2023_RL_SPLIT_ep147_CNN"
det_gt_field = "ground_truth_det"

cm = BoxMetrics(nc=10, conf=0, iou_thres=0)

if dataset_name == "SAILING_DATASET_QA":
    cameras = ["thermal_left"]
    dataset_view = fo.load_dataset(dataset_name).match_tags(tags).select_group_slices(cameras).filter_labels(f"{model}", True, only_matches=False)
    sequences = dataset_view.distinct("sequence")
if dataset_name == "SENTRY_VIDEOS_DATASET_QA":
    cameras = ["thermal_wide"]
    dataset_view = fo.load_dataset(dataset_name).match_tags(tags).select_group_slices(cameras).filter_labels(f"frames.{model}", True, only_matches=False)
    sequences = dataset_view.distinct("sequence")

for sequence in tqdm(sequences):
    payload = fo_to_payload(dataset = dataset_name,
                            gt_field = det_gt_field,
                            models = [model],
                            tracking_mode = True,
                            sequence_list = [sequence],
                            excluded_classes = ["BIRD"],)
    
    target = payload["sequences"][sequence][det_gt_field]
    preds = payload["sequences"][sequence][model]
    resolution = payload["sequences"][sequence]["resolution"]
    target_tm = []
    preds_tm = []
    for frame in target:
        target_tm_batch = []
        for det in frame:
            if CLASS_MAPPING[det["label"]] is not None:
                label = INDEX_MAPPING[CLASS_MAPPING[det["label"]]]-1
            else:
                continue
            box = det["bounding_box"]
            x1, y1, x2, y2 = box[0], box[1], box[0]+box[2], box[1]+box[3]
            x1, y1, x2, y2 = x1*resolution[1], y1*resolution[0], x2*resolution[1], y2*resolution[0]
            target_tm_batch.append([label, x1, y1, x2, y2])
        target_tm.append(torch.tensor(target_tm_batch) if len(target_tm_batch) > 0 else torch.empty((0, 5)))

    for frame in preds:
        pred_tm_batch = []
        for det in frame:
            label = INDEX_MAPPING[det["label"]]-1
            box = det["bounding_box"]
            x1, y1, x2, y2 = box[0], box[1], box[0]+box[2], box[1]+box[3]
            x1, y1, x2, y2 = x1*resolution[1], y1*resolution[0], x2*resolution[1], y2*resolution[0]
            conf = 1
            pred_tm_batch.append([x1, y1, x2, y2, conf, label])
            
        preds_tm.append(torch.tensor(pred_tm_batch) if len(pred_tm_batch) > 0 else torch.empty((0, 6)))

    for i in range(len(target_tm)):
        target_batch = target_tm[i]
        pred_batch = preds_tm[i]
        cm.process_batch(pred_batch, target_batch)

print("SUMMARY: ")
print("\nmodel: ", model)
print("\nconfusion matrix: ")
print(cm.matrix.astype(int))

tp = cm.matrix[:-1, :-1].sum()
fp = cm.matrix[:-1, -1].sum()
fn = cm.matrix[-1, :-1].sum()
print("\nTP: ", tp, "FP: ", fp, "FN: ", fn, "support: ", tp + fn)
#Detection Rates:
print("\nDetection Rates:")
for i in range(10):
    tp = cm.matrix[:-1, i].sum()
    fn = cm.matrix[-1, i].sum()
    if tp + fn == 0:
        print(f"{INDEX_MAPPING_INV[i+1]}: NaN")
    else:
        print(f"{INDEX_MAPPING_INV[i+1]}: {tp/(tp+fn)}")