Spaces:
Sleeping
Sleeping
tappyness1
commited on
Commit
·
b78b0dc
1
Parent(s):
71f8e0c
initial commit
Browse files- .gitignore +3 -0
- app.py +65 -0
- cfg/cfg.yml +27 -0
- data/annotations_trainval2017/annotations/instances_val2017.json +1 -0
- data/annotations_trainval2017/coco_small/000000011149.jpg +0 -0
- data/annotations_trainval2017/coco_small/000000441586.jpg +0 -0
- data/annotations_trainval2017/coco_small/000000576031.jpg +0 -0
- data/annotations_trainval2017/coco_small/000000576052.jpg +0 -0
- environment.yml +16 -0
- requirements.txt +14 -0
- src/confusion_matrix.py +153 -0
- src/data_ingestion/data_ingestion.py +78 -0
- src/error_analysis.py +156 -0
- src/get_data_coco/get_img.py +33 -0
- src/inference.py +100 -0
- src/st_image_tools.py +329 -0
.gitignore
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
*.ipynb_checkpoints
|
2 |
+
__pycache__
|
3 |
+
*.ipynb
|
app.py
ADDED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from src.st_image_tools import ImageTool
|
3 |
+
|
4 |
+
def call_in_image_tool(cfg_path):
|
5 |
+
image_tool = ImageTool(cfg_path)
|
6 |
+
return image_tool
|
7 |
+
|
8 |
+
def main(cfg_path="cfg/cfg.yml"):
|
9 |
+
"""_summary_
|
10 |
+
|
11 |
+
Args:
|
12 |
+
cfg_path (str, optional): _description_. Defaults to "cfg/cfg.yml".
|
13 |
+
|
14 |
+
Returns:
|
15 |
+
_type_: _description_
|
16 |
+
"""
|
17 |
+
st.set_page_config(layout="wide")
|
18 |
+
|
19 |
+
st.markdown(
|
20 |
+
""" <style>
|
21 |
+
#MainMenu {visibility: hidden;}
|
22 |
+
footer {visibility: hidden;}
|
23 |
+
</style> """,
|
24 |
+
unsafe_allow_html=True,
|
25 |
+
)
|
26 |
+
|
27 |
+
image_tool = call_in_image_tool(cfg_path)
|
28 |
+
|
29 |
+
# Select Plot Option
|
30 |
+
# st.sidebar.markdown("Checkboxes")
|
31 |
+
# checkbox_one = st.sidebar.checkbox("Show Image", value=True) # rename as necessary
|
32 |
+
checkbox_two = st.sidebar.checkbox("Show Inference", value=True)
|
33 |
+
checkbox_three = st.sidebar.checkbox("Show Ground Truth", value=True)
|
34 |
+
checkbox_four = st.sidebar.checkbox("Show Side by Side (GT and Pred)", value=True)
|
35 |
+
|
36 |
+
option = st.sidebar.selectbox("Select Image", image_tool.all_img)
|
37 |
+
|
38 |
+
if checkbox_two:
|
39 |
+
|
40 |
+
if checkbox_three:
|
41 |
+
if checkbox_four:
|
42 |
+
image_tool.plot_with_preds_gt(option=option, side_by_side=True)
|
43 |
+
else:
|
44 |
+
image_tool.plot_with_preds_gt(option=option, plot_type="all")
|
45 |
+
|
46 |
+
else:
|
47 |
+
image_tool.plot_with_preds_gt(option=option, plot_type="pred")
|
48 |
+
|
49 |
+
elif checkbox_three:
|
50 |
+
|
51 |
+
if checkbox_two:
|
52 |
+
if checkbox_four:
|
53 |
+
image_tool.plot_with_preds_gt(option=option, side_by_side=True)
|
54 |
+
else:
|
55 |
+
image_tool.plot_with_preds_gt(option=option, plot_type="all")
|
56 |
+
|
57 |
+
else:
|
58 |
+
image_tool.plot_with_preds_gt(option=option, plot_type="gt")
|
59 |
+
|
60 |
+
else:
|
61 |
+
image_tool.plot_with_preds_gt(option=option)
|
62 |
+
|
63 |
+
|
64 |
+
if __name__ == "__main__":
|
65 |
+
main()
|
cfg/cfg.yml
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
error_analysis:
|
2 |
+
# detection_classes: ["person", "bicycle"]
|
3 |
+
labels_dict: {"person": 1, "bicycle": 2}
|
4 |
+
conf_thresholds: [0.2, 0.35, 0.5, 0.65, 0.8] # some call it score threshold
|
5 |
+
iou_thresholds: [0.2, 0.35, 0.5, 0.65, 0.8] # back in my day we call it NMS threshold *shakes fist*
|
6 |
+
# nms_thresholds: [0.2, 0.5, 0.8]
|
7 |
+
bbox_format: "pascal_voc" # yolo / coco / pascal_voc (WIP feature)
|
8 |
+
peekingduck: True # False if using your own model for inference without peekingduck wrapper, else True
|
9 |
+
ground_truth_format: "coco" # yolo / coco / pascal_voc (WIP feature)
|
10 |
+
idx_base : 1 # to indicate whether the class index is zero or one based. Applies to both GT and pred class
|
11 |
+
|
12 |
+
pkd:
|
13 |
+
model: "yolo"
|
14 |
+
yolo_ver: "v4tiny"
|
15 |
+
|
16 |
+
dataset:
|
17 |
+
classes: ["person", "bicycle"] # same as ['general']['detection_classes'] field above
|
18 |
+
img_folder_path: 'data/annotations_trainval2017/coco_small/' # relative path from root for saving the coco dataset images
|
19 |
+
annotations_folder_path: 'data/annotations_trainval2017/annotations/' # relative path from root to the annotations file
|
20 |
+
annotations_fname: "instances_val2017.json" # what is the name of your json file?
|
21 |
+
|
22 |
+
visual_tool:
|
23 |
+
bbox_thickness: 2 # how thicc you want the bbox to be
|
24 |
+
font_scale: 1 # how big you want the fonts to be
|
25 |
+
font_thickness: 2 # how thicc you want the fonts to be
|
26 |
+
pred_colour: [255, 0, 0] # prediction colour, [B,G,R]
|
27 |
+
gt_colour: [0, 255, 0] # Ground truth colour, [B,G,R]
|
data/annotations_trainval2017/annotations/instances_val2017.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"annotations": [{"segmentation": [[406.42,181.47,410.12,183.82,423.56,175.42,423.56,179.12,419.53,183.82,425.92,193.91,426.92,209.03,424.91,214.41,418.86,223.49,411.8,224.5,406.76,222.14,404.07,206.01,400.03,207.69,395.66,189.2,403.39,181.81],[400.86,150.77,418.55,149.9,417.68,146.88,400.43,146.02]],"area": 1052.5614000000007,"iscrowd": 0,"image_id": 441586,"bbox": [395.66,146.02,31.26,78.48],"category_id": 2,"id": 128928},{"segmentation": [[550.06,162.05,552.14,159.37,558.69,158.48,562.25,158.48,561.36,162.94,565.82,166.51,579.8,171.57,583.07,167.4,590.8,165.02,592.58,169.78,597.34,174.54,603.29,175.43,605.37,182.87,604.77,196.84,591.1,202.79,578.01,200.41,574.15,194.46,569.98,196.25,571.17,191.79,563.44,179.0,560.17,175.43,557.5,174.54,558.09,176.62,558.98,186.73,545.9,195.35,536.98,191.19,536.38,181.97,541.44,176.03,547.09,171.86,551.55,170.67,554.52,168.3,555.41,165.02,553.63,161.75,552.44,161.75]],"area": 1679.4667500000005,"iscrowd": 0,"image_id": 441586,"bbox": [536.38,158.48,68.99,44.31],"category_id": 2,"id": 131214},{"segmentation": [[350.34,272.33,348.42,263.71,347.47,257.01,346.51,239.78,342.68,218.72,336.94,193.83,334.07,175.65,335.98,151.72,336.94,136.4,336.94,122.04,336.94,104.81,336.94,103.86,334.07,96.2,334.07,96.2,325.45,113.43,323.54,124.92,319.71,137.36,313.01,141.19,309.18,140.23,306.31,135.44,309.18,108.64,311.09,98.11,312.05,88.54,316.84,79.93,326.41,70.35,330.24,66.53,339.81,56.95,337.89,54.08,336.94,43.55,335.98,34.94,338.85,24.41,355.12,17.71,371.4,19.62,376.18,27.28,382.88,38.77,387.67,52.17,397.24,69.4,403.94,81.84,407.77,96.2,407.77,103.86,405.86,123.96,397.24,141.19,388.63,161.29,387.67,198.62,386.71,225.42,383.84,236.91,380.97,244.57,380.01,250.31,378.1,257.01,377.14,262.75,371.4,265.63,366.61,263.71,366.61,246.48,366.61,233.08,367.57,223.51,367.57,200.53,367.57,192.88,365.65,175.65,358.95,194.79,358.95,210.11,361.82,223.51,358.95,235.95,355.12,253.18,356.08,264.67,356.08,268.5]],"area": 13597.403899999998,"iscrowd": 0,"image_id": 441586,"bbox": [306.31,17.71,101.46,254.62],"category_id": 1,"id": 224536},{"segmentation": [[252.78,131.62,255.52,126.13,256.35,124.21,257.44,118.45,258.27,115.71,263.75,119.28,265.67,123.94,269.51,130.25,269.51,134.09,268.14,138.75,266.22,151.91,265.95,157.95,265.4,163.16,259.36,167.0,258.54,165.9,257.17,158.22,257.17,151.09,256.35,148.07,255.25,145.61,255.52,140.12,252.78,135.73,249.77,132.44]],"area": 529.5225499999995,"iscrowd": 0,"image_id": 441586,"bbox": [249.77,115.71,19.74,51.29],"category_id": 1,"id": 236500},{"segmentation": [[405.9,403.99,459.38,394.44,459.38,394.44,482.3,372.47,488.99,331.4,470.84,282.7,455.56,263.6,452.7,262.64,427.87,250.22,387.75,248.31,387.75,248.31,365.79,265.51,357.19,276.97,341.91,276.97,347.64,260.73,340.96,209.16,340.96,209.16,326.63,203.43,333.31,166.18,333.31,161.4,319.94,135.62,317.08,145.17,318.03,154.72,322.81,169.04,322.81,169.04,325.67,173.82,316.12,177.64,290.34,185.28,286.52,197.7,286.52,198.65,276.97,215.84,276.01,258.82,286.52,285.56,326.63,289.38,345.73,324.72,349.55,332.36,351.46,335.22,364.83,375.34,364.83,375.34,387.75,401.12]],"area": 24496.91995,"iscrowd": 0,"image_id": 441586,"bbox": [276.01,135.62,212.98,268.37],"category_id": 4,"id": 245321},{"segmentation": [[100.49,132.54,99.78,129.85,99.78,127.97,100.13,125.56,101.12,124.58,102.73,123.15,104.34,123.15,104.43,122.34,104.52,121.45,103.89,120.91,103.35,120.82,103.44,118.94,104.07,118.14,104.7,117.78,106.22,117.15,108.81,117.24,109.52,118.14,109.52,121.18,109.88,122.16,113.55,124.04,114.71,125.29,115.43,127.26,111.13,126.99,106.57,126.63,105.5,126.9,104.07,127.08,103.98,128.24,103.71,130.84,103.71,131.28,103.8,133.52,103.71,133.79,103.71,134.06,101.39,134.15],[105.59,134.06,105.23,136.74,112.21,136.74,113.91,135.67,115.78,134.95,116.14,133.61,116.14,133.61,116.14,133.61,105.32,133.34]],"area": 140.68389999999982,"iscrowd": 0,"image_id": 441586,"bbox": [99.78,117.15,16.36,19.59],"category_id": 1,"id": 1237637},{"segmentation": [[115.82,126.99,128.23,127.43,129.88,132.7,130.98,132.04,130.98,128.52,127.24,123.47,124.6,121.82,123.07,116.11,118.23,115.45,117.57,118.64,118.45,122.26,114.72,122.81],[116.58,133.8,118.23,136.76,127.13,136.21,127.35,133.91]],"area": 136.59869999999984,"iscrowd": 0,"image_id": 441586,"bbox": [114.72,115.45,16.26,21.31],"category_id": 1,"id": 1254020},{"segmentation": [[525.33,82.16,533.15,74.5,543.26,74.19,549.23,80.63,548.31,88.9,540.65,96.72,529.93,96.56,525.18,91.66]],"area": 432.5689499999999,"iscrowd": 0,"image_id": 441586,"bbox": [525.18,74.19,24.05,22.53],"category_id": 13,"id": 1389230},{"segmentation": [[399.9,147.78,400.2,156.12,401.09,157.91,410.03,154.04,413.01,152.25,416.59,152.25,418.68,150.76,415.7,147.48,417.49,140.63,421.06,137.05,426.13,136.75,430.0,140.03,429.71,145.1,427.32,149.86,427.02,154.93,431.2,158.81,427.92,164.77,422.25,175.8,416.29,179.67,411.82,180.56,407.35,182.95,402.28,183.25,406.76,176.99,413.91,164.77,410.63,162.68,407.05,161.79,397.22,163.58,396.03,152.85]],"area": 743.4173000000001,"iscrowd": 0,"image_id": 441586,"bbox": [396.03,136.75,35.17,46.5],"category_id": 1,"id": 1727927},{"segmentation": [[103.28,272.37,73.12,280.59,48.44,274.19,35.65,249.52,30.16,248.6,31.08,233.98,51.18,210.22,94.14,207.47,102.37,214.78,103.28,198.33,93.23,194.68,85.0,188.28,90.48,181.88,105.11,182.8,116.08,186.45,121.56,191.02,113.33,192.85,108.76,192.85,105.11,197.42,172.74,197.42,171.83,191.02,160.86,190.11,162.69,180.05,163.6,170.91,148.98,174.57,150.81,169.09,159.03,163.6,167.26,166.34,166.34,160.86,157.2,158.12,166.34,151.72,172.74,169.09,166.34,184.62,175.48,189.19,182.8,208.39,181.88,215.7,200.16,212.96,221.18,223.01,233.06,244.95,226.67,266.88,216.61,282.42,195.59,286.99,171.83,281.51,161.77,273.28,155.38,259.57,150.81,243.12,159.95,227.58,173.66,219.35,174.57,215.7,171.83,204.73,163.6,206.56,157.2,220.27,148.06,214.78,141.67,214.78,129.78,223.92,120.65,212.96,115.16,208.39,109.68,210.22,111.51,217.53,116.08,223.01,122.47,243.12,132.53,246.77,130.7,254.09,125.22,260.48,123.39,260.48,118.82,260.48,116.08,262.31,112.42,263.23]],"area": 11828.584050000001,"iscrowd": 0,"image_id": 441586,"bbox": [30.16,151.72,202.9,135.27],"category_id": 2,"id": 126632},{"segmentation": [[222.16,121.37,204.15,131.43,198.32,148.39,198.32,171.17,205.21,201.89,210.51,210.37,211.03,222.56,214.74,222.56,220.57,211.96,246.53,222.03,267.19,205.6,276.73,216.2,294.21,211.96,298.45,193.42,345.6,158.98,345.6,166.93,340.3,174.35,330.24,198.72,324.41,224.68,332.35,227.32,350.9,262.29,372.62,271.83,403.35,272.36,422.95,242.16,416.59,203.48,404.93,176.47,389.57,161.63,395.93,156.33,402.82,113.42,371.56,116.6,342.42,112.36,357.78,106.0,345.6,103.36,320.17,103.36,296.86,98.06,294.21,99.65,293.15,107.06,320.17,107.06,333.94,110.24,338.71,112.36,339.77,136.73,275.14,119.25,268.25,108.65,282.56,107.59,286.26,100.18,283.09,99.12,275.14,98.06,272.49,98.06,268.78,94.88,266.13,92.76,258.19,91.17,252.36,95.41,256.6,102.83,261.36,104.94,263.48,106.53,266.66,108.12,266.66,113.42,264.01,118.19,260.83,118.72,252.89,113.95,249.18,111.3,241.76,109.71,238.05,108.65,228.52,108.12,224.81,108.12,216.33,110.24,216.33,120.31]],"area": 23082.81575,"iscrowd": 0,"image_id": 11149,"bbox": [198.32,91.17,224.63,181.19],"category_id": 2,"id": 126643},{"segmentation": [[0.0,133.87,0.0,265.63,14.36,280.83,21.96,279.14,26.18,269.85,30.41,261.4,38.85,263.09,50.68,263.09,54.05,271.54,55.74,286.74,61.66,308.7,68.41,323.06,72.64,333.19,83.61,345.86,96.28,350.93,120.78,349.24,130.91,339.95,135.14,324.75,136.82,310.39,136.82,295.19,135.98,280.83,132.6,259.71,126.69,247.89,119.93,235.22,113.18,223.4,108.95,212.42,103.89,208.19,99.66,202.28,95.44,192.99,92.91,182.01,90.37,171.88,89.53,163.43,89.53,154.98,90.37,151.6,108.11,138.94,103.04,130.49,95.44,130.49,90.37,132.18,88.68,133.02,81.93,133.02,77.7,132.18,79.39,126.27,81.08,122.04,85.3,119.51,88.68,111.91,88.68,103.46,92.91,93.33,97.13,89.95,103.04,84.88,111.49,76.44,111.49,69.68,106.42,68.83,90.37,68.83,86.15,68.83,85.3,75.59,86.15,77.28,84.46,102.62,75.17,105.15,71.79,106.84,67.57,110.22,61.66,106.84,53.21,97.55,47.3,97.55,39.7,103.46,36.32,106.0,36.32,110.22,33.78,115.29,29.56,118.67,27.03,121.2,21.11,121.2,16.05,115.29,8.45,109.38,0.84,104.31]],"area": 22842.389549999996,"iscrowd": 0,"image_id": 11149,"bbox": [0.0,68.83,136.82,282.1],"category_id": 4,"id": 151281}, {"segmentation": [[21.11,120.35,30.41,103.46,34.63,95.02,39.7,99.24,58.28,99.24,67.57,103.46,75.17,103.46,73.48,97.55,51.52,76.44,13.51,51.94,22.8,40.12,22.8,23.23,19.43,13.09,17.74,7.18,10.14,5.49,3.38,3.8,0.0,46.03,1.69,108.53,1.69,108.53,19.43,121.2]],"area": 3560.2923000000005,"iscrowd": 0,"image_id": 11149,"bbox": [0.0,3.8,75.17,117.4],"category_id": 1,"id": 197935},{"segmentation": [[355.4,44.07,359.52,125.03,386.96,119.54,400.68,120.91,400.68,134.64,380.1,145.61,369.12,146.99,375.98,174.43,393.82,200.5,389.7,284.2,391.08,326.74,403.43,341.84,426.75,341.84,434.99,341.84,448.71,334.98,452.83,321.25,441.85,318.51,437.73,317.14,419.89,285.58,421.26,273.23,421.26,260.88,426.75,219.71,424.01,193.64,422.64,173.06,430.87,174.43,441.85,163.45,437.73,152.47,437.73,126.4,445.96,75.63,448.71,49.56,448.71,34.47,445.96,27.6,419.89,24.86,374.61,24.86,367.75,30.35,366.38,35.84,359.52,42.7,359.52,45.44]],"area": 17385.67285,"iscrowd": 0,"image_id": 492937,"bbox": [355.4,24.86,97.43,316.98],"category_id": 1,"id": 198827},{"segmentation": [[417.13,288.9,436.52,268.68,439.04,228.23,434.83,143.12,445.79,111.1,445.79,98.46,458.43,79.92,450.0,57.16,453.37,34.41,471.07,1.54,498.88,1.54,500.0,245.08,488.76,260.25,486.24,281.32,485.39,310.81,474.44,315.03,467.7,315.87,460.96,313.34,471.91,233.29,470.22,222.33,469.38,222.33,460.11,259.41,453.37,293.12,423.88,297.33]],"area": 15959.356599999996,"iscrowd": 0,"image_id": 11149,"bbox": [417.13,1.54,82.87,314.33],"category_id": 1,"id": 199247}, {"segmentation": [[94.17,98.47,95.01,90.91,98.37,89.22,109.3,89.22,113.51,94.27,114.35,97.63,114.35,106.88,116.03,117.81,118.55,140.51,131.17,112.77,131.17,91.75,125.28,82.5,119.39,76.61,108.46,71.57,108.46,69.89,110.99,63.16,116.03,60.64,125.28,58.95,143.78,72.41,153.87,82.5,146.3,97.63,148.82,124.54,176.57,131.26,193.39,153.97,199.27,212.82,161.43,230.48,134.53,228.8,110.99,206.1,103.42,178.35,97.53,175.83,88.28,171.62,91.65,148.08,105.1,146.4,110.99,133.79,108.46,127.06,111.83,107.72,98.37,97.63]],"area": 10372.88825,"iscrowd": 0,"image_id": 11149,"bbox": [88.28,58.95,110.99,171.53],"category_id": 2,"id": 240899}, {"segmentation": [[282.61,469.21,290.16,422.83,299.87,354.88,285.84,340.85,253.48,330.07,231.91,309.57,224.36,296.63,239.46,258.88,237.3,239.46,240.54,207.1,258.88,172.58,283.69,147.78,299.87,134.83,303.1,114.34,310.65,87.37,319.28,78.74,339.78,84.13,351.64,102.47,362.43,122.97,359.19,142.38,352.72,151.01,362.43,173.66,362.43,195.24,361.35,221.12,376.45,244.85,403.42,273.98,382.92,285.84,366.74,288.0,354.88,306.34,341.93,318.2,344.09,347.33,347.33,374.29,347.33,393.71,345.17,408.81,362.43,422.83,369.98,432.54,351.64,441.17,337.62,440.09,333.3,459.51]],"area": 33491.37034999999,"iscrowd": 0,"image_id": 576031,"bbox": [224.36,78.74,179.06,390.47],"category_id": 1,"id": 445741}, {"segmentation": [[269.13,162.23,265.79,160.38,265.79,158.02,272.35,149.85,272.1,138.09,273.59,129.68,276.19,123.24,279.78,121.26,285.6,121.63,289.56,124.11,290.67,127.2,295.13,137.6,295.87,137.85,287.08,148.0]],"area": 613.1277999999993,"iscrowd": 0,"image_id": 576031,"bbox": [265.79,121.26,30.08,40.97],"category_id": 1,"id": 473571}, {"segmentation": [[188.78,186.73,182.84,195.79,182.84,205.17,182.84,221.42,177.84,224.55,168.78,224.55,164.72,223.92,166.59,219.55,169.72,218.92,170.03,211.73,172.84,201.73,172.84,190.17,174.09,183.29,171.28,175.16,170.97,165.79,170.03,154.22,161.59,157.03,149.09,155.47,145.02,158.28,144.09,153.28,143.77,148.91,147.84,147.66,155.03,150.78,165.97,142.97,170.34,140.47,172.53,134.22,168.78,130.15,167.53,122.96,170.34,115.15,176.59,112.02,183.47,116.4,183.78,124.21,184.1,128.28,186.6,131.4,192.22,134.53,195.35,137.65,195.35,145.78,203.79,167.66,205.35,172.66,205.35,178.29,202.54,185.48,200.97,193.29,207.85,210.17,212.23,218.92,211.6,221.74,206.29,223.92,200.97,225.49,197.54,225.49,196.6,222.67,200.35,219.24,197.85,214.55,192.53,206.73,190.97,201.42,188.78,189.23]],"area": 3038.4778499999998,"iscrowd": 0,"image_id": 576031,"bbox": [143.77,112.02,68.46,113.47],"category_id": 1,"id": 498483}, {"segmentation": [[169.58,141.88,170.12,139.69,170.67,136.95,169.58,133.94,167.66,130.1,167.66,125.71,167.38,125.17,163.27,126.54,157.52,133.39,155.6,136.95,155.33,139.96,155.33,141.88,152.86,145.45,152.59,147.64,152.59,149.28,152.59,149.83,155.87,149.28,159.16,146.27,163.27,143.53],[152.04,157.23,152.31,163.26,153.68,166.82,154.78,170.38,156.15,174.77,154.78,181.89,147.65,192.58,144.09,198.34,141.9,203.27,143.27,204.91,146.01,206.01,146.83,207.38,149.57,208.2,151.22,209.57,153.68,209.85,156.15,209.85,154.5,208.2,152.86,202.72,153.96,200.53,157.79,193.13,161.9,188.47,163.82,187.65,166.56,197.79,167.66,203.54,168.21,207.65,168.21,210.67,170.67,208.75,173.14,194.77,172.86,187.1,172.59,181.62,171.77,176.96,169.03,170.66,168.75,167.37,168.75,163.26,169.3,156.96,169.85,155.04]],"area": 1103.7693,"iscrowd": 0,"image_id": 576031,"bbox": [141.9,125.17,31.24,85.5],"category_id": 1,"id": 515669}, {"segmentation": [[201.13,438.05,288.11,443.26,283.65,434.34,202.62,433.59],[320.07,446.23,545.32,461.1,570.59,451.43,549.04,454.41,323.05,436.57],[208.57,455.89,300.75,480.0,314.87,478.2,259.86,466.3,210.8,453.66]],"area": 2864.165400000003,"iscrowd": 0,"image_id": 576031,"bbox": [201.13,433.59,369.46,46.41],"category_id": 35,"id": 1453611}, {"segmentation": [[431.59,362.72,438.02,334.25,435.26,320.48,433.43,304.87,430.67,290.18,422.41,271.81,424.25,260.79,425.16,247.94,404.96,247.94,405.88,222.22,410.47,191.0,403.12,186.41,414.14,169.88,402.21,158.86,405.88,147.84,419.65,149.68,448.12,144.17,451.79,161.62,448.12,170.8,463.73,183.66,481.18,217.63,492.2,234.16,492.2,248.85,496.79,247.94,491.28,255.28,493.12,275.48,495.87,292.01,486.69,296.6,487.61,314.05,490.36,328.74,477.51,331.5,473.83,319.56,479.34,297.52,479.34,278.24,479.34,270.89,472.0,309.46,472.91,328.74,470.16,333.34,470.16,352.62,470.16,358.13,477.51,370.98,473.83,382.92,455.47,378.33,452.71,370.07,446.28,372.82,431.59,373.74,428.84,361.8]],"area": 13001.949300000002,"iscrowd": 0,"image_id": 576052,"bbox": [402.21,144.17,94.58,238.75],"category_id": 19,"id": 53914}, {"segmentation": [[428.25,158.67,427.93,145.36,433.45,129.77,445.14,121.0,439.94,113.86,437.99,109.96,438.32,101.52,444.49,92.1,449.68,91.13,457.15,90.8,459.42,93.4,465.92,102.17,467.87,107.69,464.94,115.16,468.52,120.68,478.91,126.85,483.78,151.2,474.69,165.49,499.04,190.49,507.16,238.2,515.27,244.7,516.25,248.27,508.78,252.16,494.17,245.02,484.1,199.6,467.54,178.82,453.9,170.38,438.32,162.58]],"area": 4388.146849999999,"iscrowd": 0,"image_id": 576052,"bbox": [427.93,90.8,88.32,161.36],"category_id": 1,"id": 196260}]}
|
data/annotations_trainval2017/coco_small/000000011149.jpg
ADDED
![]() |
data/annotations_trainval2017/coco_small/000000441586.jpg
ADDED
![]() |
data/annotations_trainval2017/coco_small/000000576031.jpg
ADDED
![]() |
data/annotations_trainval2017/coco_small/000000576052.jpg
ADDED
![]() |
environment.yml
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
dependencies:
|
2 |
+
- python=3.8
|
3 |
+
- pip
|
4 |
+
- pip:
|
5 |
+
- peekingduck
|
6 |
+
- typeguard == 2.13.3
|
7 |
+
- beautifulsoup4==4.11.2
|
8 |
+
- opencv-python==4.7.0.68
|
9 |
+
- pandas==1.5.3
|
10 |
+
- numpy==1.24.2
|
11 |
+
- cython
|
12 |
+
- pycocotools-windows
|
13 |
+
- jupyter==1.0.0
|
14 |
+
- pyyaml
|
15 |
+
- streamlit==1.20.0
|
16 |
+
- plotly==5.13.1
|
requirements.txt
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
pandas
|
2 |
+
# plotly
|
3 |
+
datasets
|
4 |
+
peekingduck
|
5 |
+
beautifulsoup4==4.11.2
|
6 |
+
opencv-python==4.7.0.68
|
7 |
+
pandas==1.5.3
|
8 |
+
numpy==1.24.2
|
9 |
+
cython
|
10 |
+
pycocotools
|
11 |
+
jupyter==1.0.0
|
12 |
+
pyyaml
|
13 |
+
# streamlit==1.20.0
|
14 |
+
plotly==5.13.1
|
src/confusion_matrix.py
ADDED
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pandas as pd
|
3 |
+
|
4 |
+
|
5 |
+
def box_iou_calc(boxes1, boxes2):
|
6 |
+
# https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
|
7 |
+
"""
|
8 |
+
Return intersection-over-union (Jaccard index) of boxes.
|
9 |
+
Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
|
10 |
+
Arguments:
|
11 |
+
boxes1 (Array[N, 4])
|
12 |
+
boxes2 (Array[M, 4])
|
13 |
+
Returns:
|
14 |
+
iou (Array[N, M]): the NxM matrix containing the pairwise
|
15 |
+
IoU values for every element in boxes1 and boxes2
|
16 |
+
|
17 |
+
This implementation is taken from the above link and changed so that it only uses numpy.
|
18 |
+
"""
|
19 |
+
|
20 |
+
def box_area(box):
|
21 |
+
# box = 4xn
|
22 |
+
return (box[2] - box[0]) * (box[3] - box[1])
|
23 |
+
|
24 |
+
|
25 |
+
area1 = box_area(boxes1.T)
|
26 |
+
area2 = box_area(boxes2.T)
|
27 |
+
|
28 |
+
lt = np.maximum(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]
|
29 |
+
rb = np.minimum(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]
|
30 |
+
|
31 |
+
inter = np.prod(np.clip(rb - lt, a_min = 0, a_max = None), 2)
|
32 |
+
return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)
|
33 |
+
|
34 |
+
def mask_iou_calc(mask1, mask2):
|
35 |
+
|
36 |
+
# build function to take in two masks, compare them and see what their iou is.
|
37 |
+
# similar to above but in mask.
|
38 |
+
|
39 |
+
return
|
40 |
+
|
41 |
+
|
42 |
+
class ConfusionMatrix:
|
43 |
+
def __init__(self, num_classes, CONF_THRESHOLD = 0.2, IOU_THRESHOLD = 0.5):
|
44 |
+
self.matrix = np.zeros((num_classes + 1, num_classes + 1))
|
45 |
+
self.num_classes = num_classes
|
46 |
+
self.CONF_THRESHOLD = CONF_THRESHOLD
|
47 |
+
self.IOU_THRESHOLD = IOU_THRESHOLD
|
48 |
+
self.got_tpfpfn = False
|
49 |
+
|
50 |
+
def process_batch(self, detections, labels, return_matches=False):
|
51 |
+
'''
|
52 |
+
Return intersection-over-union (Jaccard index) of boxes.
|
53 |
+
Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
|
54 |
+
Arguments:
|
55 |
+
detections (Array[N, 6]), x1, y1, x2, y2, conf, class
|
56 |
+
labels (Array[M, 5]), class, x1, y1, x2, y2
|
57 |
+
Returns:
|
58 |
+
None, updates confusion matrix accordingly
|
59 |
+
'''
|
60 |
+
detections = detections[detections[:, 4] > self.CONF_THRESHOLD]
|
61 |
+
gt_classes = labels[:, 0].astype(np.int16)
|
62 |
+
detection_classes = detections[:, 5].astype(np.int16)
|
63 |
+
|
64 |
+
all_ious = box_iou_calc(labels[:, 1:], detections[:, :4])
|
65 |
+
# print()
|
66 |
+
# print('=== all_ious ===')
|
67 |
+
# print(all_ious)
|
68 |
+
want_idx = np.where(all_ious > self.IOU_THRESHOLD)
|
69 |
+
# print('=== want_idx ===')
|
70 |
+
# print(want_idx)
|
71 |
+
# print()
|
72 |
+
|
73 |
+
all_matches = []
|
74 |
+
for i in range(want_idx[0].shape[0]):
|
75 |
+
all_matches.append([want_idx[0][i], want_idx[1][i], all_ious[want_idx[0][i], want_idx[1][i]]])
|
76 |
+
|
77 |
+
all_matches = np.array(all_matches)
|
78 |
+
if all_matches.shape[0] > 0: # if there is match
|
79 |
+
all_matches = all_matches[all_matches[:, 2].argsort()[::-1]]
|
80 |
+
|
81 |
+
all_matches = all_matches[np.unique(all_matches[:, 1], return_index = True)[1]]
|
82 |
+
|
83 |
+
all_matches = all_matches[all_matches[:, 2].argsort()[::-1]]
|
84 |
+
|
85 |
+
all_matches = all_matches[np.unique(all_matches[:, 0], return_index = True)[1]]
|
86 |
+
|
87 |
+
for i, label in enumerate(labels):
|
88 |
+
if all_matches.shape[0] > 0 and all_matches[all_matches[:, 0] == i].shape[0] == 1:
|
89 |
+
gt_class = gt_classes[i]
|
90 |
+
detection_class = detection_classes[int(all_matches[all_matches[:, 0] == i, 1][0])]
|
91 |
+
self.matrix[(gt_class), detection_class] += 1
|
92 |
+
else:
|
93 |
+
gt_class = gt_classes[i]
|
94 |
+
self.matrix[(gt_class), self.num_classes] += 1
|
95 |
+
|
96 |
+
for i, detection in enumerate(detections):
|
97 |
+
if all_matches.shape[0] and all_matches[all_matches[:, 1] == i].shape[0] == 0:
|
98 |
+
detection_class = detection_classes[i]
|
99 |
+
self.matrix[self.num_classes ,detection_class] += 1
|
100 |
+
|
101 |
+
if return_matches:
|
102 |
+
return all_matches
|
103 |
+
|
104 |
+
def get_tpfpfn(self):
|
105 |
+
self.tp = np.diag(self.matrix).sum()
|
106 |
+
fp = self.matrix.copy()
|
107 |
+
np.fill_diagonal(fp, 0)
|
108 |
+
self.fp = fp[:,:-1].sum()
|
109 |
+
self.fn = self.matrix[:-1, -1].sum()
|
110 |
+
self.got_tpfpfn = True
|
111 |
+
|
112 |
+
def get_PR(self):
|
113 |
+
if not self.got_tpfpfn:
|
114 |
+
self.get_tpfpfn()
|
115 |
+
# print (tp, fp, fn)
|
116 |
+
self.precision = self.tp / (self.tp+self.fp)
|
117 |
+
self.recall = self.tp/(self.tp+self.fn)
|
118 |
+
|
119 |
+
def return_matrix(self):
|
120 |
+
return self.matrix
|
121 |
+
|
122 |
+
def process_full_matrix(self):
|
123 |
+
"""method to process matrix to something more readable
|
124 |
+
"""
|
125 |
+
for idx, i in enumerate(self.matrix):
|
126 |
+
i[0] = idx
|
127 |
+
self.matrix = np.delete(self.matrix, 0, 0)
|
128 |
+
|
129 |
+
def print_matrix_as_df(self):
|
130 |
+
"""method to print out processed matrix
|
131 |
+
"""
|
132 |
+
df = pd.DataFrame(self.matrix)
|
133 |
+
print (df.to_string(index=False))
|
134 |
+
|
135 |
+
# def print_matrix(self):
|
136 |
+
# for i in range(self.num_classes + 1):
|
137 |
+
# print(' '.join(map(str, self.matrix[i])))
|
138 |
+
|
139 |
+
def return_as_csv(self, csv_file_path):
|
140 |
+
"""method to print out processed matrix
|
141 |
+
"""
|
142 |
+
df = pd.DataFrame(self.matrix)
|
143 |
+
df.to_csv(csv_file_path, index = False)
|
144 |
+
print (f"saved to: {csv_file_path}")
|
145 |
+
|
146 |
+
def return_as_df(self):
|
147 |
+
"""method to print out processed matrix
|
148 |
+
"""
|
149 |
+
df = pd.DataFrame(self.matrix)
|
150 |
+
# df = df.set_index(0)
|
151 |
+
# df.set_index(0)
|
152 |
+
# print(df.columns)
|
153 |
+
return df
|
src/data_ingestion/data_ingestion.py
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import json
|
3 |
+
import os
|
4 |
+
import numpy as np
|
5 |
+
|
6 |
+
class AnnotsGTGetter:
|
7 |
+
|
8 |
+
def __init__(self, cfg_obj):
|
9 |
+
|
10 |
+
self.cfg_obj = cfg_obj
|
11 |
+
|
12 |
+
self.img_folder_path = cfg_obj['dataset']['img_folder_path']
|
13 |
+
self.json_folder_path = cfg_obj['dataset']['annotations_folder_path']
|
14 |
+
self.annot_json_fname = cfg_obj['dataset']['annotations_fname']
|
15 |
+
self.labels_dict = cfg_obj['error_analysis']['labels_dict']
|
16 |
+
|
17 |
+
json_file = open(self.json_folder_path + self.annot_json_fname)
|
18 |
+
self.annot_data = json.load(json_file)
|
19 |
+
|
20 |
+
self.img_ids_in_json = [annot['image_id'] for annot in self.annot_data['annotations']]
|
21 |
+
self.all_imgs = os.listdir(self.img_folder_path)
|
22 |
+
|
23 |
+
return
|
24 |
+
|
25 |
+
def get_imgs(self):
|
26 |
+
"""method to get the mutually -inclusive- images between the img_ids in json and those in the folder path
|
27 |
+
|
28 |
+
not needed because all images in folder were accounted for in the json...
|
29 |
+
"""
|
30 |
+
all_img_ids_in_folder = [int(i[:-4]) for i in self.all_imgs]
|
31 |
+
|
32 |
+
all_imgs_found = [i for i in all_img_ids_in_folder if i in self.img_ids_in_json]
|
33 |
+
|
34 |
+
print (len(all_imgs_found))
|
35 |
+
|
36 |
+
|
37 |
+
def get_annots(self, img_fname = '000000576052.jpg'):
|
38 |
+
"""retrieve annotation given a filename
|
39 |
+
|
40 |
+
Args:
|
41 |
+
img_fname (_type_): image file name
|
42 |
+
|
43 |
+
Returns:
|
44 |
+
np array: all annotations of an image
|
45 |
+
"""
|
46 |
+
|
47 |
+
# change img_fname for extraction purpose
|
48 |
+
# assumes jpg, png, but not jpeg...
|
49 |
+
# TODO - what if jpeg?
|
50 |
+
annots = []
|
51 |
+
img_id = int(img_fname[:-4])
|
52 |
+
for annot in self.annot_data['annotations']:
|
53 |
+
if img_id == annot['image_id']:
|
54 |
+
if annot['category_id'] in list(self.labels_dict.values()):
|
55 |
+
annots.append([annot['category_id'],annot['bbox'][0],annot['bbox'][1],annot['bbox'][2],annot['bbox'][3]])
|
56 |
+
|
57 |
+
return np.array(annots)
|
58 |
+
|
59 |
+
def get_gt_annots(self):
|
60 |
+
"""goes into the image folder, calls get_annots to extract image annotation
|
61 |
+
|
62 |
+
Returns:
|
63 |
+
dict: all annotations
|
64 |
+
"""
|
65 |
+
|
66 |
+
# create dictionary of gt annots
|
67 |
+
# for img in os.listdir(self.img_folder_path):
|
68 |
+
# self.get_annots(img)
|
69 |
+
all_gt_annots = {img: self.get_annots(img) for img in os.listdir(self.img_folder_path)}
|
70 |
+
return all_gt_annots
|
71 |
+
|
72 |
+
if __name__ == '__main__':
|
73 |
+
# get_annots()
|
74 |
+
annots_obj = AnnotsGTGetter()
|
75 |
+
gt_dict = annots_obj.get_gt_annots()
|
76 |
+
print (gt_dict)
|
77 |
+
# annots_obj.get_imgs()
|
78 |
+
|
src/error_analysis.py
ADDED
@@ -0,0 +1,156 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from peekingduck.pipeline.nodes.model import yolo as pkd_yolo
|
2 |
+
from src.data_ingestion.data_ingestion import AnnotsGTGetter
|
3 |
+
from src.inference import Inference
|
4 |
+
from src.confusion_matrix import ConfusionMatrix
|
5 |
+
import yaml
|
6 |
+
from itertools import product
|
7 |
+
import pandas as pd
|
8 |
+
|
9 |
+
def transform_gt_bbox_format(ground_truth, img_size, format = "coco"):
|
10 |
+
"""transforms ground truth bbox format to pascal voc for confusion matrix
|
11 |
+
|
12 |
+
Args:
|
13 |
+
ground_truth (_type_): nx5 numpy array, if coco - n x [class, x, y, w, h], if yolo - n x [class, x-mid, y-mid, w, h]
|
14 |
+
img_size (_type_): [Height * Weight * Dimension] values vector
|
15 |
+
format (str, optional): . Defaults to "coco".
|
16 |
+
|
17 |
+
Returns:
|
18 |
+
_type_: ground_truth. Transformed ground truth to pascal voc format
|
19 |
+
"""
|
20 |
+
if format == "coco":
|
21 |
+
ground_truth[:, 3] = (ground_truth[:, 1] + ground_truth[:, 3])/img_size[1]
|
22 |
+
ground_truth[:, 1] = (ground_truth[:, 1]) /img_size[1]
|
23 |
+
ground_truth[:, 4] = (ground_truth[:, 2] + ground_truth[:, 4])/img_size[0]
|
24 |
+
ground_truth[:, 2] = (ground_truth[:, 2]) /img_size[0]
|
25 |
+
|
26 |
+
return ground_truth
|
27 |
+
|
28 |
+
class ErrorAnalysis:
|
29 |
+
|
30 |
+
def __init__(self, cfg_path = 'cfg/cfg.yml'):
|
31 |
+
|
32 |
+
cfg_file = open(cfg_path)
|
33 |
+
self.cfg_obj = yaml.load(cfg_file, Loader=yaml.FullLoader)
|
34 |
+
# self.nms_thresh = self.cfg_obj['error_analysis']['nms_thresholds']
|
35 |
+
self.iou_thresh = self.cfg_obj['error_analysis']['iou_thresholds']
|
36 |
+
self.conf_thresh = self.cfg_obj['error_analysis']['conf_thresholds']
|
37 |
+
self.inference_folder = self.cfg_obj['dataset']['img_folder_path']
|
38 |
+
pkd = self.cfg_obj['error_analysis']['peekingduck']
|
39 |
+
self.cm_results = []
|
40 |
+
|
41 |
+
# todo - generalise the model
|
42 |
+
if pkd:
|
43 |
+
pkd_model = self.cfg_obj['pkd']['model']
|
44 |
+
# only instantiates the v4tiny model, but you are free to change this to other pkd model
|
45 |
+
if pkd_model == "yolo":
|
46 |
+
yolo_ver = self.cfg_obj['pkd']['yolo_ver']
|
47 |
+
self.model = pkd_yolo.Node(model_type = yolo_ver, detect= list(self.cfg_obj['error_analysis']['labels_dict'].keys()))
|
48 |
+
else:
|
49 |
+
# call in your own model
|
50 |
+
# self.model = <your model import here>
|
51 |
+
# make sure that your model has iou_threshold and score_threshold attributes
|
52 |
+
pass
|
53 |
+
|
54 |
+
def generate_inference(self, img_fname = "000000000139.jpg"):
|
55 |
+
"""Run inference on img based on the image file name. Path to the folder is determined by cfg
|
56 |
+
|
57 |
+
Args:
|
58 |
+
img_fname (str, optional): _description_. Defaults to "000000000139.jpg".
|
59 |
+
|
60 |
+
Returns:
|
61 |
+
ndarray, tuple: ndarray - n x [x1, y1, x2, y2, score, class], (H, W, D)
|
62 |
+
"""
|
63 |
+
|
64 |
+
inference_obj = Inference(self.model, self.cfg_obj)
|
65 |
+
img_path = f"{self.inference_folder}{img_fname}"
|
66 |
+
inference_outputs = inference_obj.run_inference_path(img_path)
|
67 |
+
|
68 |
+
return inference_outputs
|
69 |
+
|
70 |
+
def get_annots(self):
|
71 |
+
"""get GT annotations from dataset
|
72 |
+
"""
|
73 |
+
|
74 |
+
annots_obj = AnnotsGTGetter(cfg_obj = self.cfg_obj)
|
75 |
+
self.gt_dict = annots_obj.get_gt_annots()
|
76 |
+
|
77 |
+
|
78 |
+
def generate_conf_matrix(self,iou_threshold = 0.5, conf_threshold = 0.2):
|
79 |
+
"""generate the confusion matrix by running inference on each image
|
80 |
+
"""
|
81 |
+
|
82 |
+
num_classes = len(list(self.cfg_obj['error_analysis']['labels_dict'].keys()))
|
83 |
+
ground_truth_format = self.cfg_obj["error_analysis"]["ground_truth_format"]
|
84 |
+
idx_base = self.cfg_obj["error_analysis"]["idx_base"]
|
85 |
+
|
86 |
+
# TODO - currently, Conf Matrix is 0 indexed but all my classes are one-based index.
|
87 |
+
# need to find a better to resolve this
|
88 |
+
# Infuriating.
|
89 |
+
cm = ConfusionMatrix(num_classes=num_classes, CONF_THRESHOLD = conf_threshold, IOU_THRESHOLD=iou_threshold)
|
90 |
+
|
91 |
+
for fname in list(self.gt_dict.keys()):
|
92 |
+
inference_output, img_size = self.generate_inference(fname)
|
93 |
+
|
94 |
+
# deduct index_base from each inference's class index
|
95 |
+
inference_output[:, -1] -= idx_base
|
96 |
+
|
97 |
+
ground_truth = self.gt_dict[fname].copy()
|
98 |
+
|
99 |
+
# deduct index_base from each groundtruth's class index
|
100 |
+
ground_truth[:, 0] -= idx_base
|
101 |
+
|
102 |
+
# print (f"ground_truth: {ground_truth}")
|
103 |
+
|
104 |
+
# print (f"inference: {inference_output}")
|
105 |
+
|
106 |
+
# inference is in x1, y1, x2, y2, scores, class, so OK
|
107 |
+
|
108 |
+
# coco gt is in x, y, width, height - need to change to suit conf matrix
|
109 |
+
# img shape is (H, W, D) so plug in accordingly to normalise
|
110 |
+
ground_truth = transform_gt_bbox_format(ground_truth=ground_truth, img_size=img_size, format = ground_truth_format)
|
111 |
+
|
112 |
+
# print (f"ground_truth: {ground_truth}")
|
113 |
+
|
114 |
+
cm.process_batch(inference_output, ground_truth)
|
115 |
+
|
116 |
+
cm.get_PR()
|
117 |
+
|
118 |
+
return cm.matrix, cm.precision, cm.recall
|
119 |
+
|
120 |
+
def generate_conf_matrices(self, print_matrix = True):
|
121 |
+
"""generates the confidence matrices
|
122 |
+
"""
|
123 |
+
|
124 |
+
# get all combinations of the threshold values:
|
125 |
+
combinations = list(product(self.iou_thresh, self.conf_thresh))
|
126 |
+
# print (combinations)
|
127 |
+
comb_cms = {}
|
128 |
+
for comb in combinations:
|
129 |
+
# print (f"IOU: {comb[0]}, Conf: {comb[1]}")
|
130 |
+
self.model.iou_threshold, self.model.score_threshold = comb[0], comb[1]
|
131 |
+
returned_matrix, precision, recall = self.generate_conf_matrix(iou_threshold = comb[0], conf_threshold = comb[1])
|
132 |
+
# print (returned_matrix)
|
133 |
+
# print (f"precision: {precision}")
|
134 |
+
# print (f"recall: {recall}")
|
135 |
+
comb_cms[f"IOU: {comb[0]}, Conf: {comb[1]}"] = returned_matrix
|
136 |
+
self.cm_results.append([comb[0], comb[1], precision, recall])
|
137 |
+
|
138 |
+
if print_matrix:
|
139 |
+
for k, v in comb_cms.items():
|
140 |
+
print (k)
|
141 |
+
print (v)
|
142 |
+
|
143 |
+
def proc_pr_table(self):
|
144 |
+
|
145 |
+
self.cm_table = pd.DataFrame(self.cm_results, columns = ['IOU_Threshold', 'Score Threshold', 'Precision', 'Recall'])
|
146 |
+
|
147 |
+
print (self.cm_table)
|
148 |
+
|
149 |
+
|
150 |
+
if __name__ == "__main__":
|
151 |
+
ea_games = ErrorAnalysis()
|
152 |
+
# print (ea_games.generate_inference())
|
153 |
+
ea_games.get_annots()
|
154 |
+
ea_games.generate_conf_matrices()
|
155 |
+
# print (ea_games.generate_conf_matrix())
|
156 |
+
# print (ea_games.gt_dict)
|
src/get_data_coco/get_img.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pycocotools.coco import COCO
|
2 |
+
import requests
|
3 |
+
import yaml
|
4 |
+
|
5 |
+
def get_images(cfg_path = 'cfg/cfg.yml'):
|
6 |
+
"""To query and get coco dataset by a certain subset e.g. 'person'
|
7 |
+
|
8 |
+
Args:
|
9 |
+
path (str, optional): _description_. Defaults to 'data/annotations_trainval2017/annotations/instances_val2017.json'.
|
10 |
+
catNms (list, optional): _description_. Defaults to ['person'].
|
11 |
+
"""
|
12 |
+
|
13 |
+
cfg_file = open(cfg_path)
|
14 |
+
cfg = yaml.load(cfg_file, Loader=yaml.FullLoader)
|
15 |
+
|
16 |
+
# instantiate COCO specifying the annotations json path
|
17 |
+
coco = COCO(cfg['dataset']['annotations_folder_path'] + cfg['dataset']['annotations_fname'])
|
18 |
+
# Specify a list of category names of interest
|
19 |
+
catIds = coco.getCatIds(catNms=cfg['dataset']['classes'])
|
20 |
+
# Get the corresponding image ids and images using loadImgs
|
21 |
+
imgIds = coco.getImgIds(catIds=catIds)
|
22 |
+
images = coco.loadImgs(imgIds)
|
23 |
+
|
24 |
+
# Save the images into a local folder
|
25 |
+
for im in images:
|
26 |
+
img_data = requests.get(im['coco_url']).content
|
27 |
+
with open(cfg['dataset']['img_folder_path'] + im['file_name'], 'wb') as handler:
|
28 |
+
handler.write(img_data)
|
29 |
+
|
30 |
+
return
|
31 |
+
|
32 |
+
if __name__ == '__main__':
|
33 |
+
get_images(cfg_path = 'cfg/cfg.yml')
|
src/inference.py
ADDED
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from peekingduck.pipeline.nodes.model import yolo as pkd_yolo
|
2 |
+
import cv2
|
3 |
+
from collections import defaultdict
|
4 |
+
import numpy as np
|
5 |
+
import warnings
|
6 |
+
warnings.simplefilter(action='ignore', category=FutureWarning)
|
7 |
+
|
8 |
+
def convert_labels(labels_dict, bbox_labels):
|
9 |
+
for k, v in labels_dict.items():
|
10 |
+
bbox_labels[bbox_labels == k] = v
|
11 |
+
|
12 |
+
# FutureWarning: elementwise comparison failed; returning scalar, but in the future will perform elementwise comparison
|
13 |
+
# throws up this warning because making a change string to int is something that numpy disagrees with (???).
|
14 |
+
|
15 |
+
return bbox_labels
|
16 |
+
|
17 |
+
def run_inference(img_matrix, model, labels_dict = {'person': 1, 'bicycle': 2}):
|
18 |
+
"""Helper function to run per image inference, get bbox, labels and scores and stack them for confusion matrix output
|
19 |
+
|
20 |
+
Args:
|
21 |
+
img_matrix (np.array): _description_
|
22 |
+
model: _description_
|
23 |
+
labels_dict (dict, optional): _description_. Defaults to {'person': 0, 'bicycle': 1}.
|
24 |
+
|
25 |
+
Returns:
|
26 |
+
concated (np.array): concatenated inference of n x (bbox (default is x1, y1, x2, y2), score, class)
|
27 |
+
img_matrix.shape (np vector): vector with [Height * Weight * Dimension] values
|
28 |
+
"""
|
29 |
+
# print(img_matrix.shape)
|
30 |
+
# for img_matrix, it's HxWxD. Need to resize it for the confusion matrix
|
31 |
+
|
32 |
+
inference_inputs = {"img": img_matrix}
|
33 |
+
|
34 |
+
# modify this to change the run to your model's inference method eg model(img) in pytorch
|
35 |
+
inference_outputs = model.run(inference_inputs)
|
36 |
+
|
37 |
+
bbox_labels = inference_outputs["bbox_labels"]
|
38 |
+
bbox_labels = convert_labels(labels_dict, bbox_labels)
|
39 |
+
bboxes = inference_outputs["bboxes"]
|
40 |
+
bbox_scores = inference_outputs["bbox_scores"]
|
41 |
+
|
42 |
+
# stack the bbox_scores and bbox_labels
|
43 |
+
# hence, array(['score', 'score','score']) and array(['class','class','class'])
|
44 |
+
# becomes array([['score','class'], ['score','class'],['score','class']])
|
45 |
+
stacked = np.stack((bbox_scores, bbox_labels), axis = 1)
|
46 |
+
|
47 |
+
# concatenate the values of the bbox wih the stacked values above
|
48 |
+
# use concatenate here because it is 1xnxm with 1xnxl dimension so it works
|
49 |
+
# it's just maths, people!
|
50 |
+
concated = np.concatenate((bboxes, stacked), axis = 1)
|
51 |
+
|
52 |
+
return concated.astype(np.float32), img_matrix.shape
|
53 |
+
|
54 |
+
|
55 |
+
class Inference:
|
56 |
+
|
57 |
+
def __init__(self, model, cfg_obj):
|
58 |
+
|
59 |
+
self.model = model
|
60 |
+
self.labels_dict = cfg_obj['error_analysis']['labels_dict']
|
61 |
+
|
62 |
+
def run_inference_path(self, img_path):
|
63 |
+
"""use if img_path is specified
|
64 |
+
|
65 |
+
Args:
|
66 |
+
img_path (_type_): _description_
|
67 |
+
|
68 |
+
Returns:
|
69 |
+
_type_: _description_
|
70 |
+
"""
|
71 |
+
image_orig = cv2.imread(img_path)
|
72 |
+
image_orig = cv2.cvtColor(image_orig, cv2.COLOR_BGR2RGB)
|
73 |
+
|
74 |
+
output = run_inference(image_orig, self.model, labels_dict = self.labels_dict)
|
75 |
+
|
76 |
+
return output
|
77 |
+
|
78 |
+
def run_inference_byte(self, img_bytes):
|
79 |
+
"""use if the img_bytes is passed in instead of path
|
80 |
+
|
81 |
+
Args:
|
82 |
+
img_bytes (_type_): _description_
|
83 |
+
|
84 |
+
Returns:
|
85 |
+
_type_: _description_
|
86 |
+
"""
|
87 |
+
img_decoded = cv2.imdecode(np.frombuffer(img_bytes, np.uint8), -1)
|
88 |
+
img_decoded = cv2.cvtColor(img_decoded, cv2.COLOR_BGR2RGB)
|
89 |
+
|
90 |
+
output = run_inference(img_decoded, self.model, labels_dict = self.labels_dict)
|
91 |
+
|
92 |
+
return output
|
93 |
+
|
94 |
+
if __name__ == "__main__":
|
95 |
+
import yaml
|
96 |
+
cfg_file = open(cfg_path)
|
97 |
+
cfg_obj = yaml.load(cfg_file, Loader=yaml.FullLoader)
|
98 |
+
img_path = "./data/annotations_trainval2017/coco_person/000000000139.jpg"
|
99 |
+
inference_obj = Inference(model = pkd_yolo.Node(model_type = "v4tiny", detect= ["Person"] , cfg_obj = cfg_obj))
|
100 |
+
print (inference_obj.run_inference_path(img_path))
|
src/st_image_tools.py
ADDED
@@ -0,0 +1,329 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import numpy as np
|
3 |
+
import plotly.express as px
|
4 |
+
import cv2
|
5 |
+
from src.error_analysis import ErrorAnalysis, transform_gt_bbox_format
|
6 |
+
import yaml
|
7 |
+
import os
|
8 |
+
from src.confusion_matrix import ConfusionMatrix
|
9 |
+
from plotly.subplots import make_subplots
|
10 |
+
import plotly.graph_objects as go
|
11 |
+
import pandas as pd
|
12 |
+
|
13 |
+
|
14 |
+
def amend_cm_df(cm_df, labels_dict):
|
15 |
+
"""Helper function to amend the index and column name for readability
|
16 |
+
Example - index currently is 0, 1 ... -> GT - person
|
17 |
+
Likewise in Column - 0, 1 ... -> Pred - person etc
|
18 |
+
|
19 |
+
Args:
|
20 |
+
cm_df (_type_): _description_
|
21 |
+
labels_dict (_type_): _description_
|
22 |
+
|
23 |
+
Returns:
|
24 |
+
_type_: _description_
|
25 |
+
"""
|
26 |
+
|
27 |
+
index_list = list(labels_dict.values())
|
28 |
+
index_list.append("background")
|
29 |
+
|
30 |
+
cm_df = cm_df.set_axis([f"GT - {elem}" for elem in index_list])
|
31 |
+
cm_df = cm_df.set_axis([f"Pred - {elem}" for elem in index_list], axis=1)
|
32 |
+
cm_df = cm_df.astype(int)
|
33 |
+
|
34 |
+
return cm_df
|
35 |
+
|
36 |
+
|
37 |
+
class ImageTool:
|
38 |
+
def __init__(self, cfg_path="cfg/cfg.yml"):
|
39 |
+
|
40 |
+
# inistialising the model and getting the annotations
|
41 |
+
self.ea_obj = ErrorAnalysis(cfg_path)
|
42 |
+
cfg_file = open(cfg_path)
|
43 |
+
self.cfg_obj = yaml.load(cfg_file, Loader=yaml.FullLoader)
|
44 |
+
self.inference_folder = self.ea_obj.inference_folder
|
45 |
+
self.ea_obj.get_annots()
|
46 |
+
self.gt_annots = self.ea_obj.gt_dict
|
47 |
+
self.all_img = os.listdir(self.inference_folder)
|
48 |
+
|
49 |
+
# for labels
|
50 |
+
self.labels_dict = self.cfg_obj["error_analysis"]["labels_dict"]
|
51 |
+
self.labels_dict = {v: k for k, v in self.labels_dict.items()}
|
52 |
+
self.idx_base = self.cfg_obj["error_analysis"]["idx_base"]
|
53 |
+
|
54 |
+
# for visualisation
|
55 |
+
self.bbox_thickness = self.cfg_obj["visual_tool"]["bbox_thickness"]
|
56 |
+
self.font_scale = self.cfg_obj["visual_tool"]["font_scale"]
|
57 |
+
self.font_thickness = self.cfg_obj["visual_tool"]["font_thickness"]
|
58 |
+
self.pred_colour = tuple(self.cfg_obj["visual_tool"]["pred_colour"])
|
59 |
+
self.gt_colour = tuple(self.cfg_obj["visual_tool"]["gt_colour"])
|
60 |
+
|
61 |
+
def show_img(self, img_fname="000000011149.jpg", show_preds=False, show_gt=False):
|
62 |
+
"""_summary_
|
63 |
+
|
64 |
+
Args:
|
65 |
+
img_fname (str, optional): _description_. Defaults to "000000011149.jpg".
|
66 |
+
show_preds (bool, optional): _description_. Defaults to False.
|
67 |
+
show_gt (bool, optional): _description_. Defaults to False.
|
68 |
+
|
69 |
+
Returns:
|
70 |
+
_type_: _description_
|
71 |
+
"""
|
72 |
+
|
73 |
+
img = cv2.imread(f"{self.inference_folder}{img_fname}")
|
74 |
+
|
75 |
+
labels = {"x": "X", "y": "Y", "color": "Colour"}
|
76 |
+
|
77 |
+
if show_preds:
|
78 |
+
|
79 |
+
preds = self.get_preds(img_fname)
|
80 |
+
img = self.draw_pred_bboxes(img, preds)
|
81 |
+
|
82 |
+
if show_gt:
|
83 |
+
|
84 |
+
gt_annots = self.get_gt_annot(img_fname)
|
85 |
+
img = self.draw_gt_bboxes(img, gt_annots)
|
86 |
+
|
87 |
+
fig = px.imshow(img[..., ::-1], aspect="equal", labels=labels)
|
88 |
+
|
89 |
+
if show_gt and show_preds:
|
90 |
+
|
91 |
+
cm_df, cm_tpfpfn_dict = self.generate_cm_one_image(preds, gt_annots)
|
92 |
+
return [fig, cm_df, cm_tpfpfn_dict]
|
93 |
+
|
94 |
+
return fig
|
95 |
+
|
96 |
+
def show_img_sbs(self, img_fname="000000011149.jpg"):
|
97 |
+
"""_summary_
|
98 |
+
|
99 |
+
Args:
|
100 |
+
img_fname (str, optional): _description_. Defaults to "000000011149.jpg".
|
101 |
+
|
102 |
+
Returns:
|
103 |
+
_type_: _description_
|
104 |
+
"""
|
105 |
+
|
106 |
+
# shows the image side by side
|
107 |
+
img = cv2.imread(f"{self.inference_folder}{img_fname}")
|
108 |
+
labels = {"x": "X", "y": "Y", "color": "Colour"}
|
109 |
+
|
110 |
+
img_pred = img.copy()
|
111 |
+
img_gt = img.copy()
|
112 |
+
preds = self.get_preds(img_fname)
|
113 |
+
img_pred = self.draw_pred_bboxes(img_pred, preds)
|
114 |
+
gt_annots = self.get_gt_annot(img_fname)
|
115 |
+
img_gt = self.draw_gt_bboxes(img_gt, gt_annots)
|
116 |
+
|
117 |
+
fig1 = px.imshow(img_gt[..., ::-1], aspect="equal", labels=labels)
|
118 |
+
fig2 = px.imshow(img_pred[..., ::-1], aspect="equal", labels=labels)
|
119 |
+
fig2.update_yaxes(visible=False)
|
120 |
+
|
121 |
+
cm_df, cm_tpfpfn_df = self.generate_cm_one_image(preds, gt_annots)
|
122 |
+
|
123 |
+
return [fig1, fig2, cm_df, cm_tpfpfn_df]
|
124 |
+
|
125 |
+
def generate_cm_one_image(self, preds, gt_annots):
|
126 |
+
"""_summary_
|
127 |
+
|
128 |
+
Args:
|
129 |
+
preds (_type_): _description_
|
130 |
+
gt_annots (_type_): _description_
|
131 |
+
|
132 |
+
Returns:
|
133 |
+
_type_: _description_
|
134 |
+
"""
|
135 |
+
|
136 |
+
num_classes = len(list(self.cfg_obj["error_analysis"]["labels_dict"].keys()))
|
137 |
+
idx_base = self.cfg_obj["error_analysis"]["idx_base"]
|
138 |
+
|
139 |
+
conf_threshold, iou_threshold = (
|
140 |
+
self.ea_obj.model.score_threshold,
|
141 |
+
self.ea_obj.model.iou_threshold,
|
142 |
+
)
|
143 |
+
cm = ConfusionMatrix(
|
144 |
+
num_classes=num_classes,
|
145 |
+
CONF_THRESHOLD=conf_threshold,
|
146 |
+
IOU_THRESHOLD=iou_threshold,
|
147 |
+
)
|
148 |
+
|
149 |
+
gt_annots[:, 0] -= idx_base
|
150 |
+
preds[:, -1] -= idx_base
|
151 |
+
|
152 |
+
cm.process_batch(preds, gt_annots)
|
153 |
+
confusion_matrix_df = cm.return_as_df()
|
154 |
+
cm.get_tpfpfn()
|
155 |
+
cm_tpfpfn_dict = {
|
156 |
+
"True Positive": cm.tp,
|
157 |
+
"False Positive": cm.fp,
|
158 |
+
"False Negative": cm.fn,
|
159 |
+
}
|
160 |
+
cm_tpfpfn_df = pd.DataFrame(cm_tpfpfn_dict, index=[0])
|
161 |
+
cm_tpfpfn_df = cm_tpfpfn_df.set_axis(["Values"], axis=0)
|
162 |
+
cm_tpfpfn_df = cm_tpfpfn_df.astype(int)
|
163 |
+
# amend df
|
164 |
+
|
165 |
+
confusion_matrix_df = amend_cm_df(confusion_matrix_df, self.labels_dict)
|
166 |
+
# print (cm.matrix)
|
167 |
+
|
168 |
+
return confusion_matrix_df, cm_tpfpfn_df
|
169 |
+
|
170 |
+
def get_preds(self, img_fname="000000011149.jpg"):
|
171 |
+
"""_summary_
|
172 |
+
|
173 |
+
Args:
|
174 |
+
img_fname (str, optional): _description_. Defaults to "000000011149.jpg".
|
175 |
+
|
176 |
+
Returns:
|
177 |
+
_type_: _description_
|
178 |
+
"""
|
179 |
+
|
180 |
+
# run inference using the error analysis object per image
|
181 |
+
outputs, img_shape = self.ea_obj.generate_inference(img_fname)
|
182 |
+
|
183 |
+
# converts image coordinates from normalised to integer values
|
184 |
+
# image shape is [Y, X, C] (because Rows are Y)
|
185 |
+
# So don't get confused!
|
186 |
+
outputs[:, 0] *= img_shape[1]
|
187 |
+
outputs[:, 1] *= img_shape[0]
|
188 |
+
outputs[:, 2] *= img_shape[1]
|
189 |
+
outputs[:, 3] *= img_shape[0]
|
190 |
+
|
191 |
+
return outputs
|
192 |
+
|
193 |
+
def get_gt_annot(self, img_fname):
|
194 |
+
"""_summary_
|
195 |
+
|
196 |
+
Args:
|
197 |
+
img_fname (_type_): _description_
|
198 |
+
|
199 |
+
Returns:
|
200 |
+
_type_: _description_
|
201 |
+
"""
|
202 |
+
ground_truth = self.gt_annots[img_fname].copy()
|
203 |
+
img = cv2.imread(f"{self.inference_folder}{img_fname}")
|
204 |
+
img_shape = img.shape
|
205 |
+
ground_truth = transform_gt_bbox_format(ground_truth, img_shape, format="coco")
|
206 |
+
|
207 |
+
# converts image coordinates from normalised to integer values
|
208 |
+
# image shape is [Y, X, C] (because Rows are Y)
|
209 |
+
# So don't get confused!
|
210 |
+
ground_truth[:, 1] *= img_shape[1]
|
211 |
+
ground_truth[:, 2] *= img_shape[0]
|
212 |
+
ground_truth[:, 3] *= img_shape[1]
|
213 |
+
ground_truth[:, 4] *= img_shape[0]
|
214 |
+
|
215 |
+
return ground_truth
|
216 |
+
|
217 |
+
def draw_pred_bboxes(self, img_pred, preds):
|
218 |
+
"""_summary_
|
219 |
+
|
220 |
+
Args:
|
221 |
+
img_pred (_type_): _description_
|
222 |
+
preds (_type_): _description_
|
223 |
+
|
224 |
+
Returns:
|
225 |
+
_type_: _description_
|
226 |
+
"""
|
227 |
+
for pred in preds:
|
228 |
+
pred = pred.astype(int)
|
229 |
+
img_pred = cv2.rectangle(
|
230 |
+
img_pred,
|
231 |
+
(pred[0], pred[1]),
|
232 |
+
(pred[2], pred[3]),
|
233 |
+
color=self.pred_colour,
|
234 |
+
thickness=self.bbox_thickness,
|
235 |
+
)
|
236 |
+
img_pred = cv2.putText(
|
237 |
+
img_pred,
|
238 |
+
self.labels_dict[pred[5]],
|
239 |
+
(pred[0] + 5, pred[1] + 25),
|
240 |
+
color=self.pred_colour,
|
241 |
+
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
|
242 |
+
fontScale=self.font_scale,
|
243 |
+
thickness=self.font_thickness,
|
244 |
+
)
|
245 |
+
return img_pred
|
246 |
+
|
247 |
+
def draw_gt_bboxes(self, img_gt, gt_annots, **kwargs):
|
248 |
+
"""_summary_
|
249 |
+
|
250 |
+
Args:
|
251 |
+
img_gt (_type_): _description_
|
252 |
+
gt_annots (_type_): _description_
|
253 |
+
|
254 |
+
Returns:
|
255 |
+
_type_: _description_
|
256 |
+
"""
|
257 |
+
for annot in gt_annots:
|
258 |
+
annot = annot.astype(int)
|
259 |
+
# print (annot)
|
260 |
+
img_gt = cv2.rectangle(
|
261 |
+
img_gt,
|
262 |
+
(annot[1], annot[2]),
|
263 |
+
(annot[3], annot[4]),
|
264 |
+
color=self.gt_colour,
|
265 |
+
thickness=self.bbox_thickness,
|
266 |
+
)
|
267 |
+
img_gt = cv2.putText(
|
268 |
+
img_gt,
|
269 |
+
self.labels_dict[annot[0]],
|
270 |
+
(annot[1] + 5, annot[2] + 25),
|
271 |
+
color=(0, 255, 0),
|
272 |
+
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
|
273 |
+
fontScale=self.font_scale,
|
274 |
+
thickness=self.font_thickness,
|
275 |
+
)
|
276 |
+
return img_gt
|
277 |
+
|
278 |
+
def plot_with_preds_gt(self, option, side_by_side=False, plot_type=None):
|
279 |
+
"""Rules on what plot to generate
|
280 |
+
|
281 |
+
Args:
|
282 |
+
option (_string_): image filename. Toggled on the app itself. See app.py
|
283 |
+
side_by_side (bool, optional): Whether to have two plots side by side.
|
284 |
+
Defaults to False.
|
285 |
+
plot_type (_type_, optional): "all" - both GT and pred will be plotted,
|
286 |
+
"pred" - only preds,
|
287 |
+
"GT" - only ground truth
|
288 |
+
None - only image generated
|
289 |
+
Will be overridden if side_by_side = True
|
290 |
+
Defaults to None.
|
291 |
+
"""
|
292 |
+
|
293 |
+
if plot_type == "all":
|
294 |
+
plot, df, cm_tpfpfn_df = self.show_img(
|
295 |
+
option, show_preds=True, show_gt=True
|
296 |
+
)
|
297 |
+
st.plotly_chart(plot, use_container_width=True)
|
298 |
+
st.caption("Blue: Model BBox, Green: GT BBox")
|
299 |
+
|
300 |
+
st.table(df)
|
301 |
+
st.table(cm_tpfpfn_df)
|
302 |
+
|
303 |
+
elif plot_type == "pred":
|
304 |
+
st.plotly_chart(
|
305 |
+
self.show_img(option, show_preds=True), use_container_width=True
|
306 |
+
)
|
307 |
+
|
308 |
+
elif plot_type == "gt":
|
309 |
+
st.plotly_chart(
|
310 |
+
self.show_img(option, show_gt=True), use_container_width=True
|
311 |
+
)
|
312 |
+
|
313 |
+
elif side_by_side:
|
314 |
+
|
315 |
+
plot1, plot2, df, cm_tpfpfn_df = self.show_img_sbs(option)
|
316 |
+
col1, col2 = st.columns(2)
|
317 |
+
|
318 |
+
with col1:
|
319 |
+
col1.subheader("Ground Truth")
|
320 |
+
st.plotly_chart(plot1, use_container_width=True)
|
321 |
+
with col2:
|
322 |
+
col2.subheader("Prediction")
|
323 |
+
st.plotly_chart(plot2, use_container_width=True)
|
324 |
+
|
325 |
+
st.table(df)
|
326 |
+
st.table(cm_tpfpfn_df)
|
327 |
+
|
328 |
+
else:
|
329 |
+
st.plotly_chart(self.show_img(option), use_container_width=True)
|