Ge commited on
Commit
7fd430f
·
1 Parent(s): 22857d6

fix default settings for demo

Browse files
README.md CHANGED
@@ -69,15 +69,15 @@ Step1. Download a pretrained model from the benchmark table.
69
  Step2. Use either -n or -f to specify your detector's config. For example:
70
 
71
  ```shell
72
- python tools/demo.py image -n yolox-s -c /path/to/your/yolox_s.pth.tar --path assets/dog.jpg --conf 0.3 --nms 0.65 --tsize 640 --save_result --device [cpu/gpu]
73
  ```
74
  or
75
  ```shell
76
- python tools/demo.py image -f exps/default/yolox_s.py -c /path/to/your/yolox_s.pth.tar --path assets/dog.jpg --conf 0.3 --nms 0.65 --tsize 640 --save_result --device [cpu/gpu]
77
  ```
78
  Demo for video:
79
  ```shell
80
- python tools/demo.py video -n yolox-s -c /path/to/your/yolox_s.pth.tar --path /path/to/your/video --conf 0.3 --nms 0.65 --tsize 640 --save_result --device [cpu/gpu]
81
  ```
82
 
83
 
 
69
  Step2. Use either -n or -f to specify your detector's config. For example:
70
 
71
  ```shell
72
+ python tools/demo.py image -n yolox-s -c /path/to/your/yolox_s.pth.tar --path assets/dog.jpg --conf 0.25 --nms 0.45 --tsize 640 --save_result --device [cpu/gpu]
73
  ```
74
  or
75
  ```shell
76
+ python tools/demo.py image -f exps/default/yolox_s.py -c /path/to/your/yolox_s.pth.tar --path assets/dog.jpg --conf 0.25 --nms 0.45 --tsize 640 --save_result --device [cpu/gpu]
77
  ```
78
  Demo for video:
79
  ```shell
80
+ python tools/demo.py video -n yolox-s -c /path/to/your/yolox_s.pth.tar --path /path/to/your/video --conf 0.25 --nms 0.45 --tsize 640 --save_result --device [cpu/gpu]
81
  ```
82
 
83
 
demo/ONNXRuntime/onnx_inference.py CHANGED
@@ -83,8 +83,7 @@ if __name__ == '__main__':
83
  boxes_xyxy[:, 2] = boxes[:, 0] + boxes[:, 2]/2.
84
  boxes_xyxy[:, 3] = boxes[:, 1] + boxes[:, 3]/2.
85
  boxes_xyxy /= ratio
86
- dets = multiclass_nms(boxes_xyxy, scores, nms_thr=0.65, score_thr=0.1)
87
-
88
  if dets is not None:
89
  final_boxes, final_scores, final_cls_inds = dets[:, :4], dets[:, 4], dets[:, 5]
90
  origin_img = vis(origin_img, final_boxes, final_scores, final_cls_inds,
 
83
  boxes_xyxy[:, 2] = boxes[:, 0] + boxes[:, 2]/2.
84
  boxes_xyxy[:, 3] = boxes[:, 1] + boxes[:, 3]/2.
85
  boxes_xyxy /= ratio
86
+ dets = multiclass_nms(boxes_xyxy, scores, nms_thr=0.45, score_thr=0.1)
 
87
  if dets is not None:
88
  final_boxes, final_scores, final_cls_inds = dets[:, :4], dets[:, 4], dets[:, 5]
89
  origin_img = vis(origin_img, final_boxes, final_scores, final_cls_inds,
demo/OpenVINO/cpp/yolox_openvino.cpp CHANGED
@@ -18,7 +18,7 @@ using namespace InferenceEngine;
18
  #define tcout std::cout
19
  #define file_name_t std::string
20
  #define imread_t cv::imread
21
- #define NMS_THRESH 0.65
22
  #define BBOX_CONF_THRESH 0.3
23
 
24
  static const int INPUT_W = 416;
 
18
  #define tcout std::cout
19
  #define file_name_t std::string
20
  #define imread_t cv::imread
21
+ #define NMS_THRESH 0.45
22
  #define BBOX_CONF_THRESH 0.3
23
 
24
  static const int INPUT_W = 416;
demo/OpenVINO/python/openvino_inference.py CHANGED
@@ -141,8 +141,8 @@ def main():
141
  boxes_xyxy[:, 2] = boxes[:, 0] + boxes[:, 2]/2.
142
  boxes_xyxy[:, 3] = boxes[:, 1] + boxes[:, 3]/2.
143
  boxes_xyxy /= ratio
144
- dets = multiclass_nms(boxes_xyxy, scores, nms_thr=0.65, score_thr=0.1)
145
-
146
  if dets is not None:
147
  final_boxes = dets[:, :4]
148
  final_scores, final_cls_inds = dets[:, 4], dets[:, 5]
 
141
  boxes_xyxy[:, 2] = boxes[:, 0] + boxes[:, 2]/2.
142
  boxes_xyxy[:, 3] = boxes[:, 1] + boxes[:, 3]/2.
143
  boxes_xyxy /= ratio
144
+ dets = multiclass_nms(boxes_xyxy, scores, nms_thr=0.45, score_thr=0.1)
145
+
146
  if dets is not None:
147
  final_boxes = dets[:, :4]
148
  final_scores, final_cls_inds = dets[:, 4], dets[:, 5]
demo/TensorRT/cpp/yolox.cpp CHANGED
@@ -22,7 +22,7 @@
22
  } while (0)
23
 
24
  #define DEVICE 0 // GPU id
25
- #define NMS_THRESH 0.65
26
  #define BBOX_CONF_THRESH 0.3
27
 
28
  using namespace nvinfer1;
 
22
  } while (0)
23
 
24
  #define DEVICE 0 // GPU id
25
+ #define NMS_THRESH 0.45
26
  #define BBOX_CONF_THRESH 0.3
27
 
28
  using namespace nvinfer1;
tools/demo.py CHANGED
@@ -81,12 +81,13 @@ def get_image_list(path):
81
 
82
 
83
  class Predictor(object):
84
- def __init__(self, model, exp, cls_names=COCO_CLASSES, trt_file=None, decoder=None, device="cpu"):
85
  self.model = model
86
  self.cls_names = cls_names
87
  self.decoder = decoder
88
  self.num_classes = exp.num_classes
89
  self.confthre = exp.test_conf
 
90
  self.nmsthre = exp.nmsthre
91
  self.test_size = exp.test_size
92
  self.device = device
@@ -158,7 +159,7 @@ def image_demo(predictor, vis_folder, path, current_time, save_result):
158
  files.sort()
159
  for image_name in files:
160
  outputs, img_info = predictor.inference(image_name)
161
- result_image = predictor.visual(outputs[0], img_info)
162
  if save_result:
163
  save_folder = os.path.join(
164
  vis_folder, time.strftime("%Y_%m_%d_%H_%M_%S", current_time)
@@ -191,7 +192,7 @@ def imageflow_demo(predictor, vis_folder, current_time, args):
191
  ret_val, frame = cap.read()
192
  if ret_val:
193
  outputs, img_info = predictor.inference(frame)
194
- result_frame = predictor.visual(outputs[0], img_info)
195
  if args.save_result:
196
  vid_writer.write(result_frame)
197
  ch = cv2.waitKey(1)
@@ -260,7 +261,7 @@ def main(exp, args):
260
  trt_file = None
261
  decoder = None
262
 
263
- predictor = Predictor(model, exp, COCO_CLASSES, trt_file, decoder, args.device)
264
  current_time = time.localtime()
265
  if args.demo == 'image':
266
  image_demo(predictor, vis_folder, args.path, current_time, args.save_result)
 
81
 
82
 
83
  class Predictor(object):
84
+ def __init__(self, model, exp, cls_names=COCO_CLASSES, trt_file=None, decoder=None, conf_vis=0.3, device="cpu"):
85
  self.model = model
86
  self.cls_names = cls_names
87
  self.decoder = decoder
88
  self.num_classes = exp.num_classes
89
  self.confthre = exp.test_conf
90
+ self.conv_vis = conf_vis
91
  self.nmsthre = exp.nmsthre
92
  self.test_size = exp.test_size
93
  self.device = device
 
159
  files.sort()
160
  for image_name in files:
161
  outputs, img_info = predictor.inference(image_name)
162
+ result_image = predictor.visual(outputs[0], img_info, predictor.conf_vis)
163
  if save_result:
164
  save_folder = os.path.join(
165
  vis_folder, time.strftime("%Y_%m_%d_%H_%M_%S", current_time)
 
192
  ret_val, frame = cap.read()
193
  if ret_val:
194
  outputs, img_info = predictor.inference(frame)
195
+ result_frame = predictor.visual(outputs[0], img_info, predictor.conf_vis)
196
  if args.save_result:
197
  vid_writer.write(result_frame)
198
  ch = cv2.waitKey(1)
 
261
  trt_file = None
262
  decoder = None
263
 
264
+ predictor = Predictor(model, exp, COCO_CLASSES, trt_file, decoder, args.conf, args.device)
265
  current_time = time.localtime()
266
  if args.demo == 'image':
267
  image_demo(predictor, vis_folder, args.path, current_time, args.save_result)