|
import os |
|
import re |
|
import cv2 |
|
import time |
|
|
|
import numpy as np |
|
import pandas as pd |
|
import xml.etree.ElementTree as ET |
|
|
|
from pathlib import Path |
|
from torchvision import transforms |
|
from configparser import ConfigParser, ExtendedInterpolation |
|
from ast import literal_eval |
|
|
|
from src.models.model import Model |
|
from src.models.eval.confusion_matrix import ConfusionMatrix |
|
|
|
|
|
def generate_inference_from_img_folder(csv_file, model_cfg, img_folder, ckpt_file, |
|
nms_thresh, conf_thresh, device="cuda" ,csv_path=None): |
|
"""[Retrieve the inference information of the test images given a model checkpoint trained] |
|
|
|
Parameters |
|
---------- |
|
csv_file : [str] |
|
[path of the csv file containing the information of the test images] |
|
model_cfg : [str] |
|
[path of the model config file to use, specific to the checkpoint file] |
|
img_folder : [str] |
|
[folder containing the images] |
|
ckpt_file : [str] |
|
[path of the model checkpoint file to use for model inference] |
|
nms_thresh : [float] |
|
[Non-maximum suppression threshold to use for the model inference, values between 0 to 1] |
|
conf_thresh : [float] |
|
[Confidence threshold to use for the model inference, values between 0 to 1] |
|
device : str, optional |
|
[device to use for inference, option: "cuda" or "cpu"], by default "cuda" |
|
csv_path : [str], optional |
|
[path to save the pandas.DataFrame output as a csv], by default None i.e. csv not generated |
|
|
|
Returns |
|
------- |
|
df : [pandas.DataFrame] |
|
[dataframe containing the inference information of the test images] |
|
""" |
|
|
|
pl_config = ConfigParser(interpolation=ExtendedInterpolation()) |
|
pl_config.read(model_cfg) |
|
|
|
model_selected = Model(pl_config) |
|
|
|
df_original = pd.read_csv(csv_file) |
|
|
|
df_test = df_original[df_original['remarks_xml'] == 'Available xml file'].reset_index() |
|
df_test = df_test[df_test['set_type'] == 'Test'].reset_index() |
|
|
|
img_number = 0 |
|
prediction_info_list = [] |
|
for _,rows in df_test.iterrows(): |
|
img_file = rows["image_file_name"] |
|
img_number += 1 |
|
inference_start_time = time.time() |
|
img_file_path = os.path.join(img_folder,img_file) |
|
|
|
|
|
|
|
img_inference = model_selected.inference( |
|
device=device, img_path=img_file_path, ckpt_path=ckpt_file, nms_thresh=nms_thresh, conf_thresh=conf_thresh) |
|
|
|
|
|
predicted_boxes_unsorted = img_inference[0].tolist() |
|
predicted_labels_unsorted = img_inference[1].tolist() |
|
predicted_confidence_unsorted = img_inference[2].tolist() |
|
|
|
|
|
|
|
|
|
|
|
|
|
predicted_boxes = [x for _,x in sorted(zip(predicted_confidence_unsorted,predicted_boxes_unsorted), reverse=True)] |
|
predicted_labels = [x for _,x in sorted(zip(predicted_confidence_unsorted,predicted_labels_unsorted), reverse=True)] |
|
predicted_confidence = sorted(predicted_confidence_unsorted, reverse=True) |
|
|
|
|
|
|
|
|
|
|
|
predicted_boxes_int = [] |
|
for box in predicted_boxes: |
|
box_int = [round(x) for x in box] |
|
predicted_boxes_int.append(box_int) |
|
|
|
|
|
cm_detections_list = [] |
|
for prediction in range(len(predicted_boxes)): |
|
detection_list = predicted_boxes[prediction] |
|
detection_list.append(predicted_confidence[prediction]) |
|
detection_list.append(predicted_labels[prediction]) |
|
cm_detections_list.append(detection_list) |
|
|
|
|
|
predicted_boxes = [x for _,x in sorted(zip(predicted_confidence_unsorted,predicted_boxes_unsorted), reverse=True)] |
|
|
|
inference_time_per_image = round(time.time() - inference_start_time, 2) |
|
if img_number%100 == 0: |
|
print(f'Performing inference on Image {img_number}: {img_file_path}') |
|
print(f'Time taken for image: {inference_time_per_image}') |
|
|
|
prediction_info = { |
|
"image_file_path": img_file_path, |
|
"image_file_name": img_file, |
|
"number_of_predictions": len(predicted_boxes), |
|
"predicted_boxes": predicted_boxes, |
|
"predicted_boxes_int": predicted_boxes_int, |
|
"predicted_labels": predicted_labels, |
|
"predicted_confidence": predicted_confidence, |
|
"cm_detections_list": cm_detections_list, |
|
"inference_time": inference_time_per_image |
|
} |
|
prediction_info_list.append(prediction_info) |
|
|
|
df = pd.DataFrame(prediction_info_list) |
|
|
|
if csv_path is not None: |
|
df.to_csv(csv_path, index=False) |
|
print ("Dataframe saved as csv to " + csv_path) |
|
|
|
return df |
|
|
|
def get_gt_from_img_folder(csv_file, img_folder, xml_folder, names_file, map_start_index=1, csv_path=None): |
|
"""[Retrieve the ground truth information of the test images] |
|
|
|
Parameters |
|
---------- |
|
csv_file : [str] |
|
[path of the csv file containing the information of the test images] |
|
img_folder : [str] |
|
[folder containing the images] |
|
xml_folder : [str] |
|
[folder containing the xml files associated with the images] |
|
names_file : [str] |
|
[names file containing the class labels of interest] |
|
map_start_index : int, optional |
|
[attach a number to each class label listed in names file, starting from number given by map_start_index], by default 1 |
|
csv_path : [str], optional |
|
[path to save the pandas.DataFrame output as a csv], by default None i.e. csv not generated |
|
|
|
Returns |
|
------- |
|
df : [pandas.DataFrame] |
|
[dataframe containing the ground truth information of the test images] |
|
""" |
|
|
|
df_original = pd.read_csv(csv_file) |
|
|
|
|
|
df_test = df_original[df_original['remarks_xml'] == 'Available xml file'].reset_index() |
|
df_test = df_test[df_test['set_type'] == 'Test'].reset_index() |
|
|
|
|
|
class_labels_dict = {} |
|
with open(names_file) as f: |
|
for index,line in enumerate(f): |
|
idx = index + map_start_index |
|
class_labels = line.splitlines()[0] |
|
class_labels_dict[class_labels] = idx |
|
|
|
gt_info_list = [] |
|
|
|
|
|
for _,rows in df_test.iterrows(): |
|
img_file = rows["image_file_name"] |
|
|
|
|
|
|
|
img_file_path = os.path.join(img_folder,img_file) |
|
img = cv2.imread(filename = img_file_path) |
|
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) |
|
|
|
|
|
file_stem = Path(img_file_path).stem |
|
xml_file_path = xml_folder + file_stem + ".xml" |
|
|
|
tree = ET.parse(xml_file_path) |
|
root = tree.getroot() |
|
|
|
for image_detail in root.findall('size'): |
|
image_width = float(image_detail.find('width').text) |
|
image_height = float(image_detail.find('height').text) |
|
|
|
class_index_list = [] |
|
bb_list = [] |
|
truncated_list = [] |
|
occluded_list = [] |
|
for item in root.findall('object'): |
|
if item.find('truncated') is not None: |
|
truncated = int(item.find('truncated').text) |
|
else: |
|
truncated = 0 |
|
|
|
if item.find('occluded').text is not None: |
|
occluded = int(item.find('occluded').text) |
|
else: |
|
occluded = 0 |
|
|
|
for bb_details in item.findall('bndbox'): |
|
class_label = item.find('name').text |
|
class_index = class_labels_dict[class_label] |
|
xmin = float(bb_details.find('xmin').text) |
|
ymin = float(bb_details.find('ymin').text) |
|
xmax = float(bb_details.find('xmax').text) |
|
ymax = float(bb_details.find('ymax').text) |
|
|
|
class_index_list.append(class_index) |
|
bb_list.append([xmin,ymin,xmax,ymax]) |
|
truncated_list.append(truncated) |
|
occluded_list.append(occluded) |
|
|
|
transform = A.Compose([ |
|
A.Resize(608,608), |
|
ToTensor() |
|
], |
|
bbox_params=A.BboxParams(format='pascal_voc', |
|
label_fields=['class_labels']), |
|
) |
|
|
|
augmented = transform(image=img, bboxes = bb_list, class_labels = class_index_list) |
|
|
|
img = augmented['image'].float() |
|
gt_boxes = augmented['bboxes'] |
|
gt_boxes_list = [list(box) for box in gt_boxes] |
|
gt_labels = augmented['class_labels'] |
|
|
|
gt_boxes_int = [] |
|
for box in gt_boxes: |
|
box_int = [round(x) for x in box] |
|
gt_boxes_int.append(box_int) |
|
|
|
cm_gt_list = [] |
|
for gt in range(len(gt_boxes)): |
|
gt_list = [gt_labels[gt]] |
|
gt_list.extend(gt_boxes[gt]) |
|
cm_gt_list.append(gt_list) |
|
|
|
|
|
gt_area_list = [] |
|
gt_area_type = [] |
|
for gt_box in gt_boxes: |
|
gt_area = (gt_box[3] - gt_box[1]) * (gt_box[2] - gt_box[0]) |
|
gt_area_list.append(gt_area) |
|
|
|
if gt_area < 32*32: |
|
area_type = "S" |
|
gt_area_type.append(area_type) |
|
elif gt_area < 96*96: |
|
area_type = "M" |
|
gt_area_type.append(area_type) |
|
else: |
|
area_type = "L" |
|
gt_area_type.append(area_type) |
|
|
|
gt_info = { |
|
"image_file_path": img_file_path, |
|
"image_file_name": img_file, |
|
"image_width": image_width, |
|
"image_height": image_height, |
|
"number_of_gt": len(gt_boxes_list), |
|
"gt_labels": gt_labels, |
|
"gt_boxes": gt_boxes_list, |
|
"gt_boxes_int": gt_boxes_int, |
|
"cm_gt_list": cm_gt_list, |
|
"gt_area_list": gt_area_list, |
|
"gt_area_type": gt_area_type, |
|
"truncated_list": truncated_list, |
|
"occluded_list": occluded_list |
|
} |
|
gt_info_list.append(gt_info) |
|
|
|
df = pd.DataFrame(gt_info_list) |
|
|
|
if csv_path is not None: |
|
df.to_csv(csv_path, index=False) |
|
print ("Dataframe saved as csv to " + csv_path) |
|
|
|
return df |
|
|
|
def combine_gt_predictions(csv_file, img_folder, xml_folder, names_file, model_cfg, ckpt_file, csv_save_folder, |
|
device="cuda", nms_threshold=0.1, confidence_threshold=0.7, iou_threshold=0.4, gt_statistics=True): |
|
"""[Retrieve the combined inference and ground truth information of the test images] |
|
|
|
Parameters |
|
---------- |
|
csv_file : [str] |
|
[path of the csv file containing the information of the test images] |
|
img_folder : [str] |
|
[folder containing the images] |
|
xml_folder : [str] |
|
[folder containing the xml files associated with the images] |
|
names_file : [str] |
|
[names file containing the class labels of interest] |
|
model_cfg : [str] |
|
[path of the model config file to use, specific to the checkpoint file] |
|
ckpt_file : [str] |
|
[path of the model checkpoint file to use for model inference] |
|
csv_save_folder : [str] |
|
[folder to save the generated csv files] |
|
device : str, optional |
|
[device to use for inference, option: "cuda" or "cpu"], by default "cuda" |
|
nms_threshold : float, optional |
|
[Non-maximum suppression threshold to use for the model inference, values between 0 to 1], by default 0.1 |
|
confidence_threshold : float, optional |
|
[Confidence threshold to use for the model inference, values between 0 to 1], by default 0.7 |
|
iou_threshold : float, optional |
|
[IOU threshold to use for identifying true positives from the predictions and ground truth], by default 0.4 |
|
gt_statistics : bool, optional |
|
[option to generate the df_gt_analysis], by default True |
|
|
|
Returns |
|
------- |
|
df_full : [pandas.DataFrame] |
|
[dataframe containing the combined inference and ground truth information of the test images by image] |
|
df_gt_analysis : pandas.DataFrame, optional |
|
[dataframe containing the combined inference and ground truth information of the test images by ground truth] |
|
""" |
|
|
|
print(f"NMS Threshold: {nms_threshold}") |
|
print(f"Confidence Threshold: {confidence_threshold}") |
|
print(f"IOU Threshold: {iou_threshold}") |
|
|
|
df_gt = get_gt_from_img_folder( |
|
csv_file, img_folder, xml_folder, names_file) |
|
print("Successful Generation of Ground Truth Information") |
|
df_predictions = generate_inference_from_img_folder( |
|
csv_file, model_cfg, img_folder, ckpt_file, |
|
nms_thresh=nms_threshold, conf_thresh=confidence_threshold, device=device) |
|
print("Successful Generation of Inference") |
|
|
|
df_all = pd.merge(df_gt, df_predictions, how='left', on=["image_file_path", "image_file_name"]) |
|
print("Successful Merging") |
|
|
|
class_labels_list = [] |
|
with open(names_file) as f: |
|
for index,line in enumerate(f): |
|
class_labels = line.splitlines()[0] |
|
class_labels_list.append(class_labels) |
|
|
|
combined_info_list = [] |
|
for _,rows in df_all.iterrows(): |
|
img_file = rows["image_file_name"] |
|
predicted_boxes = rows["predicted_boxes"] |
|
predicted_labels = rows["predicted_labels"] |
|
predicted_confidence = rows["predicted_confidence"] |
|
gt_boxes = rows["gt_boxes"] |
|
gt_labels = rows["gt_labels"] |
|
cm_gt_list = rows["cm_gt_list"] |
|
cm_detections_list = rows["cm_detections_list"] |
|
|
|
if rows["number_of_predictions"] == 0: |
|
|
|
gt_summary_list = [] |
|
gt_match_list = [] |
|
gt_match_idx_list = [] |
|
gt_match_idx_conf_list = [] |
|
gt_match_idx_bb_list = [] |
|
for idx in range(len(gt_labels)): |
|
gt_summary = "NO" |
|
match = ["GT", idx, "-"] |
|
match_idx = "-" |
|
match_bb = "-" |
|
gt_summary_list.append(gt_summary) |
|
gt_match_list.append(tuple(match)) |
|
gt_match_idx_list.append(match_idx) |
|
gt_match_idx_conf_list.append(match_idx) |
|
gt_match_idx_bb_list.append(match_bb) |
|
|
|
combined_info = { |
|
"image_file_name": img_file, |
|
"number_of_predictions_conf": [], |
|
"predicted_labels_conf": [], |
|
"predicted_confidence_conf": [], |
|
"num_matches": [], |
|
"num_mismatch": [], |
|
"labels_hit": [], |
|
"pairs_mislabel_gt_prediction": [], |
|
"gt_match_idx_list": gt_match_idx_list, |
|
"gt_match_idx_conf_list": gt_match_idx_conf_list, |
|
"gt_match_idx_bb_list": gt_match_idx_bb_list, |
|
"prediction_match": [], |
|
"gt_analysis": gt_summary_list, |
|
"prediction_analysis": [], |
|
"gt_match": gt_match_list |
|
} |
|
|
|
else: |
|
|
|
|
|
CM = ConfusionMatrix( |
|
num_classes=len(class_labels_list)+1, |
|
CONF_THRESHOLD = confidence_threshold, |
|
IOU_THRESHOLD = iou_threshold) |
|
|
|
matching_boxes = CM.process_batch( |
|
detections=np.asarray(cm_detections_list), |
|
labels=np.asarray(cm_gt_list), |
|
return_matches=True) |
|
|
|
predicted_confidence_count = len([confidence for confidence in predicted_confidence if confidence > confidence_threshold]) |
|
predicted_confidence_round = [round(confidence, 4) for confidence in predicted_confidence] |
|
|
|
predicted_confidence_conf = predicted_confidence_round[:predicted_confidence_count] |
|
predicted_labels_conf = predicted_labels[:predicted_confidence_count] |
|
predicted_boxes_conf = predicted_boxes[:predicted_confidence_count] |
|
|
|
number_of_predictions_conf = len(predicted_labels_conf) |
|
|
|
match_correct_list = [] |
|
match_wrong_list = [] |
|
gt_matched_idx_dict = {} |
|
predicted_matched_idx_dict = {} |
|
gt_mismatch_idx_dict = {} |
|
predicted_mismatch_idx_dict = {} |
|
labels_hit = [] |
|
pairs_mislabel_gt_prediction = [] |
|
|
|
for match in matching_boxes: |
|
gt_idx = int(match[0]) |
|
predicted_idx = int(match[1]) |
|
iou = round(match[2], 4) |
|
match = [gt_idx, predicted_idx, iou] |
|
|
|
if gt_labels[gt_idx] == predicted_labels_conf[predicted_idx]: |
|
match_correct_list.append(match) |
|
gt_matched_idx_dict[gt_idx] = match |
|
predicted_matched_idx_dict[predicted_idx] = match |
|
labels_hit.append(gt_labels[gt_idx]) |
|
else: |
|
match_wrong_list.append(match) |
|
gt_mismatch_idx_dict[gt_idx] = match |
|
predicted_mismatch_idx_dict[predicted_idx] = match |
|
pairs_mislabel_gt_prediction.append( |
|
[gt_labels[gt_idx],predicted_labels_conf[predicted_idx]]) |
|
|
|
|
|
gt_summary_list = [] |
|
gt_match_list = [] |
|
gt_match_idx_list = [] |
|
gt_match_idx_conf_list = [] |
|
gt_match_idx_bb_list = [] |
|
for idx in range(len(gt_labels)): |
|
if idx in gt_matched_idx_dict.keys(): |
|
gt_summary = "MATCH" |
|
match = gt_matched_idx_dict[idx] |
|
match_idx = predicted_labels_conf[match[1]] |
|
match_conf = predicted_confidence_conf[match[1]] |
|
match_bb = predicted_boxes_conf[match[1]] |
|
elif idx in gt_mismatch_idx_dict.keys(): |
|
gt_summary = "MISMATCH" |
|
match = gt_mismatch_idx_dict[idx] |
|
match_idx = predicted_labels_conf[match[1]] |
|
match_conf = predicted_confidence_conf[match[1]] |
|
match_bb = predicted_boxes_conf[match[1]] |
|
else: |
|
gt_summary = "NO" |
|
match = ["GT", idx, "-"] |
|
match_idx = "-" |
|
match_conf = "-" |
|
match_bb = "-" |
|
gt_summary_list.append(gt_summary) |
|
gt_match_list.append(tuple(match)) |
|
gt_match_idx_list.append(match_idx) |
|
gt_match_idx_conf_list.append(match_conf) |
|
gt_match_idx_bb_list.append(match_bb) |
|
|
|
|
|
prediction_summary_list = [] |
|
prediction_match_list = [] |
|
for idx in range(len(predicted_labels_conf)): |
|
if idx in predicted_matched_idx_dict.keys(): |
|
prediction_summary = "MATCH" |
|
match = predicted_matched_idx_dict[idx] |
|
elif idx in predicted_mismatch_idx_dict.keys(): |
|
prediction_summary = "MISMATCH" |
|
match = predicted_mismatch_idx_dict[idx] |
|
else: |
|
prediction_summary = "NO" |
|
match = [idx, "P", "-"] |
|
prediction_summary_list.append(prediction_summary) |
|
prediction_match_list.append(tuple(match)) |
|
|
|
combined_info = { |
|
"image_file_name": img_file, |
|
"number_of_predictions_conf": number_of_predictions_conf, |
|
"predicted_labels_conf": predicted_labels_conf, |
|
"predicted_confidence_conf": predicted_confidence_conf, |
|
"num_matches": len(match_correct_list), |
|
"num_mismatch": len(match_wrong_list), |
|
"labels_hit": labels_hit, |
|
"pairs_mislabel_gt_prediction": pairs_mislabel_gt_prediction, |
|
"gt_match_idx_list": gt_match_idx_list, |
|
"gt_match_idx_conf_list": gt_match_idx_conf_list, |
|
"gt_match_idx_bb_list": gt_match_idx_bb_list, |
|
"gt_match": gt_match_list, |
|
"prediction_match": prediction_match_list, |
|
"gt_analysis": gt_summary_list, |
|
"prediction_analysis": prediction_summary_list |
|
} |
|
|
|
combined_info_list.append(combined_info) |
|
|
|
df_combined = pd.DataFrame(combined_info_list) |
|
|
|
df_full = pd.merge(df_all, df_combined , how='left', on=["image_file_name"]) |
|
|
|
csv_path_combined = f"{csv_save_folder}df_inference_details_nms_{nms_threshold}_conf_{confidence_threshold}_iou_{iou_threshold}.csv" |
|
|
|
df_full.to_csv(csv_path_combined, index=False) |
|
print ("Dataframe saved as csv to " + csv_path_combined) |
|
|
|
if gt_statistics: |
|
print("Generating Statistics for Single Ground Truth") |
|
csv_path_gt = f"{csv_save_folder}df_gt_details_nms_{nms_threshold}_conf_{confidence_threshold}_iou_{iou_threshold}.csv" |
|
df_gt_analysis = __get_single_gt_analysis(csv_output=csv_path_gt, df_input=df_full) |
|
|
|
return df_full, df_gt_analysis |
|
|
|
else: |
|
return df_full |
|
|
|
def __get_single_gt_analysis(csv_output, df_input=None,csv_input=None): |
|
|
|
if df_input is None: |
|
df_gt = pd.read_csv(csv_input) |
|
|
|
|
|
df_gt.gt_labels = df_gt.gt_labels.apply(literal_eval) |
|
df_gt.gt_boxes = df_gt.gt_boxes.apply(literal_eval) |
|
df_gt.gt_boxes_int = df_gt.gt_boxes_int.apply(literal_eval) |
|
df_gt.gt_area_list = df_gt.gt_area_list.apply(literal_eval) |
|
df_gt.gt_area_type = df_gt.gt_area_type.apply(literal_eval) |
|
df_gt.truncated_list = df_gt.truncated_list.apply(literal_eval) |
|
df_gt.occluded_list = df_gt.occluded_list.apply(literal_eval) |
|
df_gt.gt_match_idx_list = df_gt.gt_match_idx_list.apply(literal_eval) |
|
df_gt.gt_match_idx_conf_list = df_gt.gt_match_idx_conf_list.apply(literal_eval) |
|
df_gt.gt_match_idx_bb_list = df_gt.gt_match_idx_bb_list.apply(literal_eval) |
|
df_gt.gt_match = df_gt.gt_match.apply(literal_eval) |
|
df_gt.gt_analysis = df_gt.gt_analysis.apply(literal_eval) |
|
|
|
else: |
|
df_gt = df_input |
|
|
|
gt_info_list = [] |
|
for _,rows in df_gt.iterrows(): |
|
|
|
for idx in range(rows["number_of_gt"]): |
|
df_gt_image_dict = { |
|
"GT_Image": rows["image_file_name"], |
|
"GT_Label": rows["gt_labels"][idx], |
|
"GT_Boxes": rows["gt_boxes"][idx], |
|
"GT_Boxes_Int": rows["gt_boxes_int"][idx], |
|
"GT_Area": rows["gt_area_list"][idx], |
|
"GT_Area_Type": rows["gt_area_type"][idx], |
|
"Truncated": rows["truncated_list"][idx], |
|
"Occluded": rows["occluded_list"][idx], |
|
"GT_Match": rows["gt_match"][idx], |
|
"IOU": rows["gt_match"][idx][2], |
|
"GT_Match_IDX": rows["gt_match_idx_list"][idx], |
|
"GT_Confidence_IDX": rows["gt_match_idx_conf_list"][idx], |
|
"GT_Predicted_Boxes_IDX": rows["gt_match_idx_bb_list"][idx], |
|
"GT_Analysis": rows["gt_analysis"][idx] |
|
} |
|
gt_info_list.append(df_gt_image_dict) |
|
|
|
df_final = pd.DataFrame(gt_info_list) |
|
df_final = df_final.reset_index(drop=True) |
|
|
|
df_final.to_csv(csv_output, index=False) |
|
print ("Dataframe saved as csv to " + csv_output) |
|
|
|
return df_final |
|
|
|
if __name__ == '__main__': |
|
|
|
combine_gt_predictions( |
|
csv_file="/polyaxon-data/workspace/stee/voc_image_annotations_batch123.csv", |
|
img_folder="/polyaxon-data/workspace/stee/data_batch123", |
|
xml_folder="/polyaxon-data/workspace/stee/data_batch123/Annotations/", |
|
names_file="/polyaxon-data/workspace/stee/data_batch123/obj.names", |
|
model_cfg="cfg/cfg_frcn.ini", |
|
ckpt_file="/polyaxon-data/workspace/stee/andy/epoch=99-step=61899.ckpt", |
|
csv_save_folder="/polyaxon-data/workspace/stee/andy/generation/", |
|
nms_threshold=0.9, |
|
confidence_threshold=0.3, |
|
iou_threshold=0.4, |
|
gt_statistics=False) |