tappyness1
initial commit
b78b0dc
error_analysis:
# detection_classes: ["person", "bicycle"]
labels_dict: {"person": 1, "bicycle": 2}
conf_thresholds: [0.2, 0.35, 0.5, 0.65, 0.8] # some call it score threshold
iou_thresholds: [0.2, 0.35, 0.5, 0.65, 0.8] # back in my day we call it NMS threshold *shakes fist*
# nms_thresholds: [0.2, 0.5, 0.8]
bbox_format: "pascal_voc" # yolo / coco / pascal_voc (WIP feature)
peekingduck: True # False if using your own model for inference without peekingduck wrapper, else True
ground_truth_format: "coco" # yolo / coco / pascal_voc (WIP feature)
idx_base : 1 # to indicate whether the class index is zero or one based. Applies to both GT and pred class
pkd:
model: "yolo"
yolo_ver: "v4tiny"
dataset:
classes: ["person", "bicycle"] # same as ['general']['detection_classes'] field above
img_folder_path: 'data/annotations_trainval2017/coco_small/' # relative path from root for saving the coco dataset images
annotations_folder_path: 'data/annotations_trainval2017/annotations/' # relative path from root to the annotations file
annotations_fname: "instances_val2017.json" # what is the name of your json file?
visual_tool:
bbox_thickness: 2 # how thicc you want the bbox to be
font_scale: 1 # how big you want the fonts to be
font_thickness: 2 # how thicc you want the fonts to be
pred_colour: [255, 0, 0] # prediction colour, [B,G,R]
gt_colour: [0, 255, 0] # Ground truth colour, [B,G,R]