Ge Zheng commited on
Commit
7e1e02f
·
1 Parent(s): 8d570cc

remove 'tar' suffix and improve yolox-tiny from 31.7 to 32.8 (#329)

Browse files
README.md CHANGED
@@ -33,7 +33,7 @@ For more details, please refer to our [report on Arxiv](https://arxiv.org/abs/21
33
  |Model |size |mAP<sup>val<br>0.5:0.95 | Params<br>(M) |FLOPs<br>(G)| weights |
34
  | ------ |:---: | :---: |:---: |:---: | :---: |
35
  |[YOLOX-Nano](./exps/default/nano.py) |416 |25.3 | 0.91 |1.08 | [onedrive](https://megvii-my.sharepoint.cn/:u:/g/personal/gezheng_megvii_com/EdcREey-krhLtdtSnxolxiUBjWMy6EFdiaO9bdOwZ5ygCQ?e=yQpdds)/[github](https://github.com/Megvii-BaseDetection/storage/releases/download/0.0.1/yolox_nano.pth) |
36
- |[YOLOX-Tiny](./exps/default/yolox_tiny.py) |416 |31.7 | 5.06 |6.45 | [onedrive](https://megvii-my.sharepoint.cn/:u:/g/personal/gezheng_megvii_com/EYtjNFPqvZBBrQ-VowLcSr4B6Z5TdTflUsr_gO2CwhC3bQ?e=SBTwXj)/[github](https://github.com/Megvii-BaseDetection/storage/releases/download/0.0.1/yolox_tiny.pth) |
37
 
38
  ## Quick Start
39
 
@@ -71,15 +71,15 @@ Step1. Download a pretrained model from the benchmark table.
71
  Step2. Use either -n or -f to specify your detector's config. For example:
72
 
73
  ```shell
74
- python tools/demo.py image -n yolox-s -c /path/to/your/yolox_s.pth.tar --path assets/dog.jpg --conf 0.25 --nms 0.45 --tsize 640 --save_result --device [cpu/gpu]
75
  ```
76
  or
77
  ```shell
78
- python tools/demo.py image -f exps/default/yolox_s.py -c /path/to/your/yolox_s.pth.tar --path assets/dog.jpg --conf 0.25 --nms 0.45 --tsize 640 --save_result --device [cpu/gpu]
79
  ```
80
  Demo for video:
81
  ```shell
82
- python tools/demo.py video -n yolox-s -c /path/to/your/yolox_s.pth.tar --path /path/to/your/video --conf 0.25 --nms 0.45 --tsize 640 --save_result --device [cpu/gpu]
83
  ```
84
 
85
 
@@ -130,7 +130,7 @@ python tools/train.py -f exps/default/yolox-s.py -d 8 -b 64 --fp16 -o
130
  We support batch testing for fast evaluation:
131
 
132
  ```shell
133
- python tools/eval.py -n yolox-s -c yolox_s.pth.tar -b 64 -d 8 --conf 0.001 [--fp16] [--fuse]
134
  yolox-m
135
  yolox-l
136
  yolox-x
@@ -141,7 +141,7 @@ python tools/eval.py -n yolox-s -c yolox_s.pth.tar -b 64 -d 8 --conf 0.001 [--f
141
 
142
  To reproduce speed test, we use the following command:
143
  ```shell
144
- python tools/eval.py -n yolox-s -c yolox_s.pth.tar -b 1 -d 1 --conf 0.001 --fp16 --fuse
145
  yolox-m
146
  yolox-l
147
  yolox-x
 
33
  |Model |size |mAP<sup>val<br>0.5:0.95 | Params<br>(M) |FLOPs<br>(G)| weights |
34
  | ------ |:---: | :---: |:---: |:---: | :---: |
35
  |[YOLOX-Nano](./exps/default/nano.py) |416 |25.3 | 0.91 |1.08 | [onedrive](https://megvii-my.sharepoint.cn/:u:/g/personal/gezheng_megvii_com/EdcREey-krhLtdtSnxolxiUBjWMy6EFdiaO9bdOwZ5ygCQ?e=yQpdds)/[github](https://github.com/Megvii-BaseDetection/storage/releases/download/0.0.1/yolox_nano.pth) |
36
+ |[YOLOX-Tiny](./exps/default/yolox_tiny.py) |416 |32.8 | 5.06 |6.45 | [onedrive](https://megvii-my.sharepoint.cn/:u:/g/personal/gezheng_megvii_com/EbZuinX5X1dJmNy8nqSRegABWspKw3QpXxuO82YSoFN1oQ?e=Q7V7XE)/[github](https://github.com/Megvii-BaseDetection/storage/releases/download/0.0.1/yolox_tiny_32dot8.pth) |
37
 
38
  ## Quick Start
39
 
 
71
  Step2. Use either -n or -f to specify your detector's config. For example:
72
 
73
  ```shell
74
+ python tools/demo.py image -n yolox-s -c /path/to/your/yolox_s.pth --path assets/dog.jpg --conf 0.25 --nms 0.45 --tsize 640 --save_result --device [cpu/gpu]
75
  ```
76
  or
77
  ```shell
78
+ python tools/demo.py image -f exps/default/yolox_s.py -c /path/to/your/yolox_s.pth --path assets/dog.jpg --conf 0.25 --nms 0.45 --tsize 640 --save_result --device [cpu/gpu]
79
  ```
80
  Demo for video:
81
  ```shell
82
+ python tools/demo.py video -n yolox-s -c /path/to/your/yolox_s.pth --path /path/to/your/video --conf 0.25 --nms 0.45 --tsize 640 --save_result --device [cpu/gpu]
83
  ```
84
 
85
 
 
130
  We support batch testing for fast evaluation:
131
 
132
  ```shell
133
+ python tools/eval.py -n yolox-s -c yolox_s.pth -b 64 -d 8 --conf 0.001 [--fp16] [--fuse]
134
  yolox-m
135
  yolox-l
136
  yolox-x
 
141
 
142
  To reproduce speed test, we use the following command:
143
  ```shell
144
+ python tools/eval.py -n yolox-s -c yolox_s.pth -b 1 -d 1 --conf 0.001 --fp16 --fuse
145
  yolox-m
146
  yolox-l
147
  yolox-x
demo/MegEngine/python/README.md CHANGED
@@ -13,7 +13,7 @@ python3 -m pip install megengine -f https://megengine.org.cn/whl/mge.html
13
  ### Step2: convert checkpoint weights from torch's path file
14
 
15
  ```
16
- python3 convert_weights.py -w yolox_s.pth.tar -o yolox_s_mge.pkl
17
  ```
18
 
19
  ### Step3: run demo
 
13
  ### Step2: convert checkpoint weights from torch's path file
14
 
15
  ```
16
+ python3 convert_weights.py -w yolox_s.pth -o yolox_s_mge.pkl
17
  ```
18
 
19
  ### Step3: run demo
demo/ONNXRuntime/README.md CHANGED
@@ -6,7 +6,7 @@ This doc introduces how to convert your pytorch model into onnx, and how to run
6
  | Model | Parameters | GFLOPs | Test Size | mAP | Weights |
7
  |:------| :----: | :----: | :---: | :---: | :---: |
8
  | YOLOX-Nano | 0.91M | 1.08 | 416x416 | 25.3 | [onedrive](https://megvii-my.sharepoint.cn/:u:/g/personal/gezheng_megvii_com/EfAGwvevU-lNhW5OqFAyHbwBJdI_7EaKu5yU04fgF5BU7w?e=gvq4hf)/[github](https://github.com/Megvii-BaseDetection/storage/releases/download/0.0.1/yolox_nano.onnx) |
9
- | YOLOX-Tiny | 5.06M | 6.45 | 416x416 |31.7 | [onedrive](https://megvii-my.sharepoint.cn/:u:/g/personal/gezheng_megvii_com/EVigCszU1ilDn-MwLwHCF1ABsgTy06xFdVgZ04Yyo4lHVA?e=hVKiCw)/[github](https://github.com/Megvii-BaseDetection/storage/releases/download/0.0.1/yolox_tiny.onnx) |
10
  | YOLOX-S | 9.0M | 26.8 | 640x640 |39.6 | [onedrive](https://megvii-my.sharepoint.cn/:u:/g/personal/gezheng_megvii_com/Ec0L1d1x2UtIpbfiahgxhtgBZVjb1NCXbotO8SCOdMqpQQ?e=siyIsK)/[github](https://github.com/Megvii-BaseDetection/storage/releases/download/0.0.1/yolox_s.onnx) |
11
  | YOLOX-M | 25.3M | 73.8 | 640x640 |46.4 | [onedrive](https://megvii-my.sharepoint.cn/:u:/g/personal/gezheng_megvii_com/ERUKlQe-nlxBoTKPy1ynbxsBmAZ_h-VBEV-nnfPdzUIkZQ?e=hyQQtl)/[github](https://github.com/Megvii-BaseDetection/storage/releases/download/0.0.1/yolox_m.onnx) |
12
  | YOLOX-L | 54.2M | 155.6 | 640x640 |50.0 | [onedrive](https://megvii-my.sharepoint.cn/:u:/g/personal/gezheng_megvii_com/ET5w926jCA5GlVfg9ixB4KEBiW0HYl7SzaHNRaRG9dYO_A?e=ISmCYX)/[github](https://github.com/Megvii-BaseDetection/storage/releases/download/0.0.1/yolox_l.onnx) |
@@ -24,7 +24,7 @@ Then, you can:
24
 
25
  1. Convert a standard YOLOX model by -n:
26
  ```shell
27
- python3 tools/export_onnx.py --output-name yolox_s.onnx -n yolox-s -c yolox_s.pth.tar
28
  ```
29
  Notes:
30
  * -n: specify a model name. The model name must be one of the [yolox-s,m,l,x and yolox-nane, yolox-tiny, yolov3]
@@ -40,13 +40,13 @@ Notes:
40
  2. Convert a standard YOLOX model by -f. When using -f, the above command is equivalent to:
41
 
42
  ```shell
43
- python3 tools/export_onnx.py --output-name yolox_s.onnx -f exps/default/yolox_s.py -c yolox_s.pth.tar
44
  ```
45
 
46
  3. To convert your customized model, please use -f:
47
 
48
  ```shell
49
- python3 tools/export_onnx.py --output-name your_yolox.onnx -f exps/your_dir/your_yolox.py -c your_yolox.pth.tar
50
  ```
51
 
52
  ### ONNXRuntime Demo
 
6
  | Model | Parameters | GFLOPs | Test Size | mAP | Weights |
7
  |:------| :----: | :----: | :---: | :---: | :---: |
8
  | YOLOX-Nano | 0.91M | 1.08 | 416x416 | 25.3 | [onedrive](https://megvii-my.sharepoint.cn/:u:/g/personal/gezheng_megvii_com/EfAGwvevU-lNhW5OqFAyHbwBJdI_7EaKu5yU04fgF5BU7w?e=gvq4hf)/[github](https://github.com/Megvii-BaseDetection/storage/releases/download/0.0.1/yolox_nano.onnx) |
9
+ | YOLOX-Tiny | 5.06M | 6.45 | 416x416 |32.8 | [onedrive](https://megvii-my.sharepoint.cn/:u:/g/personal/gezheng_megvii_com/ET64VPoEV8FAm5YBiEj5JXwBVn_KYHM38iJQ_lpcK2slYw?e=uuJ7Ii)/[github](https://github.com/Megvii-BaseDetection/storage/releases/download/0.0.1/yolox_tiny_32dot8.onnx) |
10
  | YOLOX-S | 9.0M | 26.8 | 640x640 |39.6 | [onedrive](https://megvii-my.sharepoint.cn/:u:/g/personal/gezheng_megvii_com/Ec0L1d1x2UtIpbfiahgxhtgBZVjb1NCXbotO8SCOdMqpQQ?e=siyIsK)/[github](https://github.com/Megvii-BaseDetection/storage/releases/download/0.0.1/yolox_s.onnx) |
11
  | YOLOX-M | 25.3M | 73.8 | 640x640 |46.4 | [onedrive](https://megvii-my.sharepoint.cn/:u:/g/personal/gezheng_megvii_com/ERUKlQe-nlxBoTKPy1ynbxsBmAZ_h-VBEV-nnfPdzUIkZQ?e=hyQQtl)/[github](https://github.com/Megvii-BaseDetection/storage/releases/download/0.0.1/yolox_m.onnx) |
12
  | YOLOX-L | 54.2M | 155.6 | 640x640 |50.0 | [onedrive](https://megvii-my.sharepoint.cn/:u:/g/personal/gezheng_megvii_com/ET5w926jCA5GlVfg9ixB4KEBiW0HYl7SzaHNRaRG9dYO_A?e=ISmCYX)/[github](https://github.com/Megvii-BaseDetection/storage/releases/download/0.0.1/yolox_l.onnx) |
 
24
 
25
  1. Convert a standard YOLOX model by -n:
26
  ```shell
27
+ python3 tools/export_onnx.py --output-name yolox_s.onnx -n yolox-s -c yolox_s.pth
28
  ```
29
  Notes:
30
  * -n: specify a model name. The model name must be one of the [yolox-s,m,l,x and yolox-nane, yolox-tiny, yolov3]
 
40
  2. Convert a standard YOLOX model by -f. When using -f, the above command is equivalent to:
41
 
42
  ```shell
43
+ python3 tools/export_onnx.py --output-name yolox_s.onnx -f exps/default/yolox_s.py -c yolox_s.pth
44
  ```
45
 
46
  3. To convert your customized model, please use -f:
47
 
48
  ```shell
49
+ python3 tools/export_onnx.py --output-name your_yolox.onnx -f exps/your_dir/your_yolox.py -c your_yolox.pth
50
  ```
51
 
52
  ### ONNXRuntime Demo
demo/TensorRT/python/README.md CHANGED
@@ -16,7 +16,7 @@ YOLOX models can be easily conveted to TensorRT models using torch2trt
16
  ```
17
  For example:
18
  ```shell
19
- python tools/trt.py -n yolox-s -c your_ckpt.pth.tar
20
  ```
21
  <YOLOX_MODEL_NAME> can be: yolox-nano, yolox-tiny. yolox-s, yolox-m, yolox-l, yolox-x.
22
 
@@ -26,7 +26,7 @@ YOLOX models can be easily conveted to TensorRT models using torch2trt
26
  ```
27
  For example:
28
  ```shell
29
- python tools/trt.py -f /path/to/your/yolox/exps/yolox_s.py -c your_ckpt.pth.tar
30
  ```
31
  *yolox_s.py* can be any exp file modified by you.
32
 
 
16
  ```
17
  For example:
18
  ```shell
19
+ python tools/trt.py -n yolox-s -c your_ckpt.pth
20
  ```
21
  <YOLOX_MODEL_NAME> can be: yolox-nano, yolox-tiny. yolox-s, yolox-m, yolox-l, yolox-x.
22
 
 
26
  ```
27
  For example:
28
  ```shell
29
+ python tools/trt.py -f /path/to/your/yolox/exps/yolox_s.py -c your_ckpt.pth
30
  ```
31
  *yolox_s.py* can be any exp file modified by you.
32
 
docs/train_custom_data.md CHANGED
@@ -73,13 +73,13 @@ python tools/train.py -f /path/to/your/Exp/file -d 8 -b 64 --fp16 -o -c /path/to
73
 
74
  or take the `YOLOX-S` VOC training for example:
75
  ```bash
76
- python tools/train.py -f exps/example/yolox_voc/yolox_voc_s.py -d 8 -b 64 --fp16 -o -c /path/to/yolox_s.pth.tar
77
  ```
78
 
79
  ✧✧✧ For example:
80
  - If you download the [mini-coco128](https://drive.google.com/file/d/16N3u36ycNd70m23IM7vMuRQXejAJY9Fs/view?usp=sharing) and unzip it to the `datasets`, you can direct run the following training code.
81
  ```bash
82
- python tools/train.py -f exps/example/custom/yolox_s.py -d 8 -b 64 --fp16 -o -c /path/to/yolox_s.pth.tar
83
  ```
84
 
85
  (Don't worry for the different shape of detection head between the pretrained weights and your own model, we will handle it)
 
73
 
74
  or take the `YOLOX-S` VOC training for example:
75
  ```bash
76
+ python tools/train.py -f exps/example/yolox_voc/yolox_voc_s.py -d 8 -b 64 --fp16 -o -c /path/to/yolox_s.pth
77
  ```
78
 
79
  ✧✧✧ For example:
80
  - If you download the [mini-coco128](https://drive.google.com/file/d/16N3u36ycNd70m23IM7vMuRQXejAJY9Fs/view?usp=sharing) and unzip it to the `datasets`, you can direct run the following training code.
81
  ```bash
82
+ python tools/train.py -f exps/example/custom/yolox_s.py -d 8 -b 64 --fp16 -o -c /path/to/yolox_s.pth
83
  ```
84
 
85
  (Don't worry for the different shape of detection head between the pretrained weights and your own model, we will handle it)
tools/demo.py CHANGED
@@ -255,7 +255,7 @@ def main(exp, args):
255
 
256
  if not args.trt:
257
  if args.ckpt is None:
258
- ckpt_file = os.path.join(file_name, "best_ckpt.pth.tar")
259
  else:
260
  ckpt_file = args.ckpt
261
  logger.info("loading checkpoint")
 
255
 
256
  if not args.trt:
257
  if args.ckpt is None:
258
+ ckpt_file = os.path.join(file_name, "best_ckpt.pth")
259
  else:
260
  ckpt_file = args.ckpt
261
  logger.info("loading checkpoint")
tools/eval.py CHANGED
@@ -147,7 +147,7 @@ def main(exp, args, num_gpu):
147
 
148
  if not args.speed and not args.trt:
149
  if args.ckpt is None:
150
- ckpt_file = os.path.join(file_name, "best_ckpt.pth.tar")
151
  else:
152
  ckpt_file = args.ckpt
153
  logger.info("loading checkpoint")
 
147
 
148
  if not args.speed and not args.trt:
149
  if args.ckpt is None:
150
+ ckpt_file = os.path.join(file_name, "best_ckpt.pth")
151
  else:
152
  ckpt_file = args.ckpt
153
  logger.info("loading checkpoint")
tools/export_onnx.py CHANGED
@@ -63,7 +63,7 @@ def main():
63
  model = exp.get_model()
64
  if args.ckpt is None:
65
  file_name = os.path.join(exp.output_dir, args.experiment_name)
66
- ckpt_file = os.path.join(file_name, "best_ckpt.pth.tar")
67
  else:
68
  ckpt_file = args.ckpt
69
 
 
63
  model = exp.get_model()
64
  if args.ckpt is None:
65
  file_name = os.path.join(exp.output_dir, args.experiment_name)
66
+ ckpt_file = os.path.join(file_name, "best_ckpt.pth")
67
  else:
68
  ckpt_file = args.ckpt
69
 
tools/trt.py CHANGED
@@ -42,7 +42,7 @@ def main():
42
  file_name = os.path.join(exp.output_dir, args.experiment_name)
43
  os.makedirs(file_name, exist_ok=True)
44
  if args.ckpt is None:
45
- ckpt_file = os.path.join(file_name, "best_ckpt.pth.tar")
46
  else:
47
  ckpt_file = args.ckpt
48
 
 
42
  file_name = os.path.join(exp.output_dir, args.experiment_name)
43
  os.makedirs(file_name, exist_ok=True)
44
  if args.ckpt is None:
45
+ ckpt_file = os.path.join(file_name, "best_ckpt.pth")
46
  else:
47
  ckpt_file = args.ckpt
48
 
yolox/core/trainer.py CHANGED
@@ -269,7 +269,7 @@ class Trainer:
269
  if self.args.resume:
270
  logger.info("resume training")
271
  if self.args.ckpt is None:
272
- ckpt_file = os.path.join(self.file_name, "latest" + "_ckpt.pth.tar")
273
  else:
274
  ckpt_file = self.args.ckpt
275
 
 
269
  if self.args.resume:
270
  logger.info("resume training")
271
  if self.args.ckpt is None:
272
+ ckpt_file = os.path.join(self.file_name, "latest" + "_ckpt.pth")
273
  else:
274
  ckpt_file = self.args.ckpt
275
 
yolox/utils/checkpoint.py CHANGED
@@ -37,8 +37,8 @@ def load_ckpt(model, ckpt):
37
  def save_checkpoint(state, is_best, save_dir, model_name=""):
38
  if not os.path.exists(save_dir):
39
  os.makedirs(save_dir)
40
- filename = os.path.join(save_dir, model_name + "_ckpt.pth.tar")
41
  torch.save(state, filename)
42
  if is_best:
43
- best_filename = os.path.join(save_dir, "best_ckpt.pth.tar")
44
  shutil.copyfile(filename, best_filename)
 
37
  def save_checkpoint(state, is_best, save_dir, model_name=""):
38
  if not os.path.exists(save_dir):
39
  os.makedirs(save_dir)
40
+ filename = os.path.join(save_dir, model_name + "_ckpt.pth")
41
  torch.save(state, filename)
42
  if is_best:
43
+ best_filename = os.path.join(save_dir, "best_ckpt.pth")
44
  shutil.copyfile(filename, best_filename)