Yulv-git
commited on
Commit
Β·
6ea9687
1
Parent(s):
0573285
fix(README & model) some typos (#1256)
Browse files- README.md +1 -1
- demo/MegEngine/python/models/darknet.py +1 -1
- demo/OpenVINO/cpp/README.md +1 -1
- demo/OpenVINO/python/README.md +1 -1
- docs/updates_note.md +1 -1
- tools/demo.py +1 -1
- tools/eval.py +1 -1
- tools/export_onnx.py +1 -1
- tools/export_torchscript.py +1 -1
- tools/trt.py +1 -1
- yolox/evaluators/coco_evaluator.py +1 -1
- yolox/evaluators/voc_evaluator.py +1 -1
README.md
CHANGED
@@ -17,7 +17,7 @@ This repo is an implementation of PyTorch version YOLOX, there is also a [MegEng
|
|
17 |
* γ2021/07/26γ We now support [MegEngine](https://github.com/Megvii-BaseDetection/YOLOX/tree/main/demo/MegEngine) deployment.
|
18 |
* γ2021/07/20γ We have released our technical report on [Arxiv](https://arxiv.org/abs/2107.08430).
|
19 |
|
20 |
-
##
|
21 |
- [ ] YOLOX-P6 and larger model.
|
22 |
- [ ] Objects365 pretrain.
|
23 |
- [ ] Transformer modules.
|
|
|
17 |
* γ2021/07/26γ We now support [MegEngine](https://github.com/Megvii-BaseDetection/YOLOX/tree/main/demo/MegEngine) deployment.
|
18 |
* γ2021/07/20γ We have released our technical report on [Arxiv](https://arxiv.org/abs/2107.08430).
|
19 |
|
20 |
+
## Coming soon
|
21 |
- [ ] YOLOX-P6 and larger model.
|
22 |
- [ ] Objects365 pretrain.
|
23 |
- [ ] Transformer modules.
|
demo/MegEngine/python/models/darknet.py
CHANGED
@@ -18,7 +18,7 @@ class Darknet(M.Module):
|
|
18 |
Args:
|
19 |
depth (int): depth of darknet used in model, usually use [21, 53] for this param.
|
20 |
in_channels (int): number of input channels, for example, use 3 for RGB image.
|
21 |
-
stem_out_channels (int): number of output
|
22 |
It decides channels of darknet layer2 to layer5.
|
23 |
out_features (Tuple[str]): desired output layer name.
|
24 |
"""
|
|
|
18 |
Args:
|
19 |
depth (int): depth of darknet used in model, usually use [21, 53] for this param.
|
20 |
in_channels (int): number of input channels, for example, use 3 for RGB image.
|
21 |
+
stem_out_channels (int): number of output channels of darknet stem.
|
22 |
It decides channels of darknet layer2 to layer5.
|
23 |
out_features (Tuple[str]): desired output layer name.
|
24 |
"""
|
demo/OpenVINO/cpp/README.md
CHANGED
@@ -52,7 +52,7 @@ source ~/.bashrc
|
|
52 |
|
53 |
1. Export ONNX model
|
54 |
|
55 |
-
Please refer to the [ONNX
|
56 |
|
57 |
2. Convert ONNX to OpenVINO
|
58 |
|
|
|
52 |
|
53 |
1. Export ONNX model
|
54 |
|
55 |
+
Please refer to the [ONNX tutorial](../../ONNXRuntime). **Note that you should set --opset to 10, otherwise your next step will fail.**
|
56 |
|
57 |
2. Convert ONNX to OpenVINO
|
58 |
|
demo/OpenVINO/python/README.md
CHANGED
@@ -52,7 +52,7 @@ source ~/.bashrc
|
|
52 |
|
53 |
1. Export ONNX model
|
54 |
|
55 |
-
Please refer to the [ONNX
|
56 |
|
57 |
2. Convert ONNX to OpenVINO
|
58 |
|
|
|
52 |
|
53 |
1. Export ONNX model
|
54 |
|
55 |
+
Please refer to the [ONNX tutorial](https://github.com/Megvii-BaseDetection/YOLOX/demo/ONNXRuntime). **Note that you should set --opset to 10, otherwise your next step will fail.**
|
56 |
|
57 |
2. Convert ONNX to OpenVINO
|
58 |
|
docs/updates_note.md
CHANGED
@@ -25,7 +25,7 @@ python tools/train.py -n yolox-s -d 8 -b 64 --fp16 -o [--cache]
|
|
25 |
|
26 |
### Higher performance
|
27 |
|
28 |
-
New models
|
29 |
|
30 |
### Support torch amp
|
31 |
|
|
|
25 |
|
26 |
### Higher performance
|
27 |
|
28 |
+
New models achieve **~1%** higher performance! See [Model_Zoo](model_zoo.md) for more details.
|
29 |
|
30 |
### Support torch amp
|
31 |
|
tools/demo.py
CHANGED
@@ -43,7 +43,7 @@ def make_parser():
|
|
43 |
"--exp_file",
|
44 |
default=None,
|
45 |
type=str,
|
46 |
-
help="
|
47 |
)
|
48 |
parser.add_argument("-c", "--ckpt", default=None, type=str, help="ckpt for eval")
|
49 |
parser.add_argument(
|
|
|
43 |
"--exp_file",
|
44 |
default=None,
|
45 |
type=str,
|
46 |
+
help="please input your experiment description file",
|
47 |
)
|
48 |
parser.add_argument("-c", "--ckpt", default=None, type=str, help="ckpt for eval")
|
49 |
parser.add_argument(
|
tools/eval.py
CHANGED
@@ -54,7 +54,7 @@ def make_parser():
|
|
54 |
"--exp_file",
|
55 |
default=None,
|
56 |
type=str,
|
57 |
-
help="
|
58 |
)
|
59 |
parser.add_argument("-c", "--ckpt", default=None, type=str, help="ckpt for eval")
|
60 |
parser.add_argument("--conf", default=None, type=float, help="test conf")
|
|
|
54 |
"--exp_file",
|
55 |
default=None,
|
56 |
type=str,
|
57 |
+
help="please input your experiment description file",
|
58 |
)
|
59 |
parser.add_argument("-c", "--ckpt", default=None, type=str, help="ckpt for eval")
|
60 |
parser.add_argument("--conf", default=None, type=float, help="test conf")
|
tools/export_onnx.py
CHANGED
@@ -38,7 +38,7 @@ def make_parser():
|
|
38 |
"--exp_file",
|
39 |
default=None,
|
40 |
type=str,
|
41 |
-
help="
|
42 |
)
|
43 |
parser.add_argument("-expn", "--experiment-name", type=str, default=None)
|
44 |
parser.add_argument("-n", "--name", type=str, default=None, help="model name")
|
|
|
38 |
"--exp_file",
|
39 |
default=None,
|
40 |
type=str,
|
41 |
+
help="experiment description file",
|
42 |
)
|
43 |
parser.add_argument("-expn", "--experiment-name", type=str, default=None)
|
44 |
parser.add_argument("-n", "--name", type=str, default=None, help="model name")
|
tools/export_torchscript.py
CHANGED
@@ -22,7 +22,7 @@ def make_parser():
|
|
22 |
"--exp_file",
|
23 |
default=None,
|
24 |
type=str,
|
25 |
-
help="
|
26 |
)
|
27 |
parser.add_argument("-expn", "--experiment-name", type=str, default=None)
|
28 |
parser.add_argument("-n", "--name", type=str, default=None, help="model name")
|
|
|
22 |
"--exp_file",
|
23 |
default=None,
|
24 |
type=str,
|
25 |
+
help="experiment description file",
|
26 |
)
|
27 |
parser.add_argument("-expn", "--experiment-name", type=str, default=None)
|
28 |
parser.add_argument("-n", "--name", type=str, default=None, help="model name")
|
tools/trt.py
CHANGED
@@ -24,7 +24,7 @@ def make_parser():
|
|
24 |
"--exp_file",
|
25 |
default=None,
|
26 |
type=str,
|
27 |
-
help="
|
28 |
)
|
29 |
parser.add_argument("-c", "--ckpt", default=None, type=str, help="ckpt path")
|
30 |
parser.add_argument(
|
|
|
24 |
"--exp_file",
|
25 |
default=None,
|
26 |
type=str,
|
27 |
+
help="please input your experiment description file",
|
28 |
)
|
29 |
parser.add_argument("-c", "--ckpt", default=None, type=str, help="ckpt path")
|
30 |
parser.add_argument(
|
yolox/evaluators/coco_evaluator.py
CHANGED
@@ -164,7 +164,7 @@ class COCOEvaluator:
|
|
164 |
with torch.no_grad():
|
165 |
imgs = imgs.type(tensor_type)
|
166 |
|
167 |
-
# skip the
|
168 |
is_time_record = cur_iter < len(self.dataloader) - 1
|
169 |
if is_time_record:
|
170 |
start = time.time()
|
|
|
164 |
with torch.no_grad():
|
165 |
imgs = imgs.type(tensor_type)
|
166 |
|
167 |
+
# skip the last iters since batchsize might be not enough for batch inference
|
168 |
is_time_record = cur_iter < len(self.dataloader) - 1
|
169 |
if is_time_record:
|
170 |
start = time.time()
|
yolox/evaluators/voc_evaluator.py
CHANGED
@@ -97,7 +97,7 @@ class VOCEvaluator:
|
|
97 |
with torch.no_grad():
|
98 |
imgs = imgs.type(tensor_type)
|
99 |
|
100 |
-
# skip the
|
101 |
is_time_record = cur_iter < len(self.dataloader) - 1
|
102 |
if is_time_record:
|
103 |
start = time.time()
|
|
|
97 |
with torch.no_grad():
|
98 |
imgs = imgs.type(tensor_type)
|
99 |
|
100 |
+
# skip the last iters since batchsize might be not enough for batch inference
|
101 |
is_time_record = cur_iter < len(self.dataloader) - 1
|
102 |
if is_time_record:
|
103 |
start = time.time()
|