tappyness1
initial app
26364eb
from peekingduck.pipeline.nodes.model import yolo as pkd_yolo
from peekingduck.pipeline.nodes.model import yolact_edge as pkd_yolact
from src.data_ingestion.data_ingestion import AnnotsGTGetter
from src.inference import Inference
from src.confusion_matrix import ConfusionMatrix
import yaml
from itertools import product
import pandas as pd
import numpy as np
def transform_gt_bbox_format(ground_truth, img_size, format = "coco"):
"""transforms ground truth bbox format to pascal voc for confusion matrix
Args:
ground_truth (_type_): nx5 numpy array, if coco - n x [class, x, y, w, h], if yolo - n x [class, x-mid, y-mid, w, h]
img_size (_type_): [Height * Weight * Dimension] values vector
format (str, optional): . Defaults to "coco".
Returns:
_type_: ground_truth. Transformed ground truth to pascal voc format
"""
if format == "coco":
ground_truth[:, 3] = (ground_truth[:, 1] + ground_truth[:, 3])/img_size[1]
ground_truth[:, 1] = (ground_truth[:, 1]) /img_size[1]
ground_truth[:, 4] = (ground_truth[:, 2] + ground_truth[:, 4])/img_size[0]
ground_truth[:, 2] = (ground_truth[:, 2]) /img_size[0]
return ground_truth
def load_model(cfg_obj, iou_threshold, score_threshold):
pkd = cfg_obj['error_analysis']['peekingduck']
task = cfg_obj['error_analysis']['task']
if pkd:
pkd_model = cfg_obj['pkd']['model']
# assert task == "seg" and pkd_model == "yolact_edge", "For segmentation tasks, make sure task is seg and pkd_model is yolact_edge"
# assert task == "det" and pkd_model == "yolo", "For detection tasks, make sure task is det and pkd_model is yolo"
# only instantiates the v4tiny model, but you are free to change this to other pkd model
if pkd_model == "yolo":
yolo_ver = cfg_obj['pkd']['yolo_ver']
model = pkd_yolo.Node(model_type = yolo_ver,
detect= list(cfg_obj['error_analysis']['inference_labels_dict'].keys()),
iou_threshold = iou_threshold,
score_threshold = score_threshold)
if pkd_model == "yolact_edge":
yolact_ver = cfg_obj['pkd']['yolact_ver']
model = pkd_yolact.Node(model_type = yolact_ver,
detect= list(cfg_obj['error_analysis']['inference_labels_dict'].values()),
iou_threshold = iou_threshold,
score_threshold = score_threshold)
else:
# call in your own model
# self.model = <your model import here>
# make sure that your model has iou_threshold and score_threshold attributes
# you can easily set those attributes in this else block
pass
return model
class ErrorAnalysis:
def __init__(self, cfg_path = 'cfg/cfg.yml'):
cfg_file = open(cfg_path)
self.cfg_obj = yaml.load(cfg_file, Loader=yaml.FullLoader)
# self.nms_thresh = self.cfg_obj['error_analysis']['nms_thresholds']
self.iou_thresh = self.cfg_obj['error_analysis']['iou_thresholds']
self.conf_thresh = self.cfg_obj['error_analysis']['conf_thresholds']
self.inference_folder = self.cfg_obj['dataset']['img_folder_path']
self.task = self.cfg_obj['error_analysis']['task']
base_iou_threshold = self.cfg_obj['visual_tool']['iou_threshold']
base_score_threshold = self.cfg_obj['visual_tool']['conf_threshold']
self.cm_results = []
# instantiate a "base" model with configs already
self.model = load_model(self.cfg_obj, base_iou_threshold, base_score_threshold)
def generate_inference(self, img_fname = "000000576052.jpg"):
"""Run inference on img based on the image file name. Path to the folder is determined by cfg
Args:
img_fname (str, optional): _description_. Defaults to "000000576052.jpg".
Returns:
ndarray, tuple: if task is 'det': ndarray - n x [x1, y1, x2, y2, score, class], (H, W, D)
ndarray, tuple: if task is 'seg': list - n x [[array of binary mask], score, class], (H, W, D)
"""
inference_obj = Inference(self.model, self.cfg_obj)
img_path = f"{self.inference_folder}{img_fname}"
inference_outputs = inference_obj.run_inference_path(img_path)
return inference_outputs
def get_annots(self):
"""get GT annotations from dataset
"""
annots_obj = AnnotsGTGetter(cfg_obj = self.cfg_obj)
self.gt_dict = annots_obj.get_gt_annots()
def generate_conf_matrix(self,iou_threshold = 0.5, conf_threshold = 0.2):
"""generate the confusion matrix by running inference on each image
"""
num_classes = len(list(self.cfg_obj['error_analysis']['labels_dict'].keys()))
ground_truth_format = self.cfg_obj["error_analysis"]["ground_truth_format"]
idx_base = self.cfg_obj["error_analysis"]["idx_base"]
# TODO - currently, Conf Matrix is 0 indexed but all my classes are one-based index.
# need to find a better to resolve this
# Infuriating.
cm = ConfusionMatrix(num_classes=num_classes, CONF_THRESHOLD = conf_threshold, IOU_THRESHOLD=iou_threshold)
for fname in list(self.gt_dict.keys()):
inference_output, img_size = self.generate_inference(fname)
ground_truth = self.gt_dict[fname].copy()
if self.task == "det":
# deduct index_base from each inference's class index
inference_output[:, -1] -= idx_base
# deduct index_base from each groundtruth's class index
ground_truth[:, 0] -= idx_base
# inference is in x1, y1, x2, y2, scores, class, so OK
# coco gt is in x, y, width, height - need to change to suit conf matrix
# img shape is (H, W, D) so plug in accordingly to normalise
ground_truth = transform_gt_bbox_format(ground_truth=ground_truth, img_size=img_size, format = ground_truth_format)
else:
# deduct index_base from each groundtruth's class index
ground_truth = [[gt[0] - idx_base, gt[1]] for gt in ground_truth]
cm.process_batch(inference_output, ground_truth, task = self.task)
cm.get_PR()
return cm.matrix, cm.precision, cm.recall
def generate_conf_matrices(self, print_matrix = True):
"""generates the confidence matrices
"""
# get all combinations of the threshold values:
combinations = list(product(self.iou_thresh, self.conf_thresh))
# print (combinations)
comb_cms = {}
for comb in combinations:
# print (f"IOU: {comb[0]}, Conf: {comb[1]}")
self.model = load_model(self.cfg_obj, iou_threshold=comb[0], score_threshold=comb[1])
returned_matrix, precision, recall = self.generate_conf_matrix(iou_threshold = comb[0], conf_threshold = comb[1])
# print (returned_matrix)
# print (f"precision: {precision}")
# print (f"recall: {recall}")
comb_cms[f"IOU: {comb[0]}, Conf: {comb[1]}"] = returned_matrix
self.cm_results.append([comb[0], comb[1], precision, recall])
if print_matrix:
for k, v in comb_cms.items():
print (k)
print (v)
def proc_pr_table(self):
self.cm_table = pd.DataFrame(self.cm_results, columns = ['IOU_Threshold', 'Score Threshold', 'Precision', 'Recall'])
print (self.cm_table)
if __name__ == "__main__":
ea_games = ErrorAnalysis()
# print (ea_games.generate_inference())
ea_games.get_annots()
ea_games.generate_conf_matrices()
# print (ea_games.generate_conf_matrix())
# print (ea_games.gt_dict)