Add `--hard-fail` argument to benchmarks for CI errors (#8513)
Browse files* Add `--hard-fail` list argument to benchmarks for CI
Will cause CI to fail on a benchmark failure for given indices.
* Update ci-testing.yml
* Attempt Failure (CI should fail)
* Update benchmarks.py
* Update export.py
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* Update benchmarks.py
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* Update ci-testing.yml
* Update benchmarks.py
* Update benchmarks.py
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
- .github/workflows/ci-testing.yml +1 -1
- export.py +12 -12
- utils/benchmarks.py +12 -4
.github/workflows/ci-testing.yml
CHANGED
@@ -39,7 +39,7 @@ jobs:
|
|
39 |
pip list
|
40 |
- name: Run benchmarks
|
41 |
run: |
|
42 |
-
python utils/benchmarks.py --weights ${{ matrix.model }}.pt --img 320
|
43 |
|
44 |
Tests:
|
45 |
timeout-minutes: 60
|
|
|
39 |
pip list
|
40 |
- name: Run benchmarks
|
41 |
run: |
|
42 |
+
python utils/benchmarks.py --weights ${{ matrix.model }}.pt --img 320 --hard-fail
|
43 |
|
44 |
Tests:
|
45 |
timeout-minutes: 60
|
export.py
CHANGED
@@ -75,18 +75,18 @@ from utils.torch_utils import select_device
|
|
75 |
def export_formats():
|
76 |
# YOLOv5 export formats
|
77 |
x = [
|
78 |
-
['PyTorch', '-', '.pt', True],
|
79 |
-
['TorchScript', 'torchscript', '.torchscript', True],
|
80 |
-
['ONNX', 'onnx', '.onnx', True],
|
81 |
-
['OpenVINO', 'openvino', '_openvino_model', False],
|
82 |
-
['TensorRT', 'engine', '.engine', True],
|
83 |
-
['CoreML', 'coreml', '.mlmodel', False],
|
84 |
-
['TensorFlow SavedModel', 'saved_model', '_saved_model', True],
|
85 |
-
['TensorFlow GraphDef', 'pb', '.pb', True],
|
86 |
-
['TensorFlow Lite', 'tflite', '.tflite', False],
|
87 |
-
['TensorFlow Edge TPU', 'edgetpu', '_edgetpu.tflite', False],
|
88 |
-
['TensorFlow.js', 'tfjs', '_web_model', False],]
|
89 |
-
return pd.DataFrame(x, columns=['Format', 'Argument', 'Suffix', 'GPU'])
|
90 |
|
91 |
|
92 |
def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:')):
|
|
|
75 |
def export_formats():
|
76 |
# YOLOv5 export formats
|
77 |
x = [
|
78 |
+
['PyTorch', '-', '.pt', True, True],
|
79 |
+
['TorchScript', 'torchscript', '.torchscript', True, True],
|
80 |
+
['ONNX', 'onnx', '.onnx', True, True],
|
81 |
+
['OpenVINO', 'openvino', '_openvino_model', True, False],
|
82 |
+
['TensorRT', 'engine', '.engine', False, True],
|
83 |
+
['CoreML', 'coreml', '.mlmodel', True, False],
|
84 |
+
['TensorFlow SavedModel', 'saved_model', '_saved_model', True, True],
|
85 |
+
['TensorFlow GraphDef', 'pb', '.pb', True, True],
|
86 |
+
['TensorFlow Lite', 'tflite', '.tflite', True, False],
|
87 |
+
['TensorFlow Edge TPU', 'edgetpu', '_edgetpu.tflite', False, False],
|
88 |
+
['TensorFlow.js', 'tfjs', '_web_model', False, False],]
|
89 |
+
return pd.DataFrame(x, columns=['Format', 'Argument', 'Suffix', 'CPU', 'GPU'])
|
90 |
|
91 |
|
92 |
def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:')):
|
utils/benchmarks.py
CHANGED
@@ -26,6 +26,7 @@ Usage:
|
|
26 |
"""
|
27 |
|
28 |
import argparse
|
|
|
29 |
import sys
|
30 |
import time
|
31 |
from pathlib import Path
|
@@ -54,14 +55,17 @@ def run(
|
|
54 |
half=False, # use FP16 half-precision inference
|
55 |
test=False, # test exports only
|
56 |
pt_only=False, # test PyTorch only
|
|
|
57 |
):
|
58 |
y, t = [], time.time()
|
59 |
device = select_device(device)
|
60 |
-
for i, (name, f, suffix, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix,
|
61 |
try:
|
62 |
-
assert i
|
63 |
-
assert i !=
|
64 |
-
if device.type
|
|
|
|
|
65 |
assert gpu, f'{name} inference not supported on GPU'
|
66 |
|
67 |
# Export
|
@@ -77,6 +81,8 @@ def run(
|
|
77 |
speeds = result[2] # times (preprocess, inference, postprocess)
|
78 |
y.append([name, round(file_size(w), 1), round(metrics[3], 4), round(speeds[1], 2)]) # MB, mAP, t_inference
|
79 |
except Exception as e:
|
|
|
|
|
80 |
LOGGER.warning(f'WARNING: Benchmark failure for {name}: {e}')
|
81 |
y.append([name, None, None, None]) # mAP, t_inference
|
82 |
if pt_only and i == 0:
|
@@ -102,6 +108,7 @@ def test(
|
|
102 |
half=False, # use FP16 half-precision inference
|
103 |
test=False, # test exports only
|
104 |
pt_only=False, # test PyTorch only
|
|
|
105 |
):
|
106 |
y, t = [], time.time()
|
107 |
device = select_device(device)
|
@@ -134,6 +141,7 @@ def parse_opt():
|
|
134 |
parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
|
135 |
parser.add_argument('--test', action='store_true', help='test exports only')
|
136 |
parser.add_argument('--pt-only', action='store_true', help='test PyTorch only')
|
|
|
137 |
opt = parser.parse_args()
|
138 |
opt.data = check_yaml(opt.data) # check YAML
|
139 |
print_args(vars(opt))
|
|
|
26 |
"""
|
27 |
|
28 |
import argparse
|
29 |
+
import platform
|
30 |
import sys
|
31 |
import time
|
32 |
from pathlib import Path
|
|
|
55 |
half=False, # use FP16 half-precision inference
|
56 |
test=False, # test exports only
|
57 |
pt_only=False, # test PyTorch only
|
58 |
+
hard_fail=False, # throw error on benchmark failure
|
59 |
):
|
60 |
y, t = [], time.time()
|
61 |
device = select_device(device)
|
62 |
+
for i, (name, f, suffix, cpu, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, CPU, GPU)
|
63 |
try:
|
64 |
+
assert i not in (9, 10), f'{name} inference not supported' # Edge TPU and TF.js are unsupported
|
65 |
+
assert i != 5 or platform.system() == 'Darwin', f'{name} inference only supported on macOS>=10.13'
|
66 |
+
if 'cpu' in device.type:
|
67 |
+
assert cpu, f'{name} inference not supported on CPU'
|
68 |
+
if 'cuda' in device.type:
|
69 |
assert gpu, f'{name} inference not supported on GPU'
|
70 |
|
71 |
# Export
|
|
|
81 |
speeds = result[2] # times (preprocess, inference, postprocess)
|
82 |
y.append([name, round(file_size(w), 1), round(metrics[3], 4), round(speeds[1], 2)]) # MB, mAP, t_inference
|
83 |
except Exception as e:
|
84 |
+
if hard_fail:
|
85 |
+
assert type(e) is AssertionError, f'Benchmark --hard-fail for {name}: {e}'
|
86 |
LOGGER.warning(f'WARNING: Benchmark failure for {name}: {e}')
|
87 |
y.append([name, None, None, None]) # mAP, t_inference
|
88 |
if pt_only and i == 0:
|
|
|
108 |
half=False, # use FP16 half-precision inference
|
109 |
test=False, # test exports only
|
110 |
pt_only=False, # test PyTorch only
|
111 |
+
hard_fail=False, # throw error on benchmark failure
|
112 |
):
|
113 |
y, t = [], time.time()
|
114 |
device = select_device(device)
|
|
|
141 |
parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
|
142 |
parser.add_argument('--test', action='store_true', help='test exports only')
|
143 |
parser.add_argument('--pt-only', action='store_true', help='test PyTorch only')
|
144 |
+
parser.add_argument('--hard-fail', action='store_true', help='throw error on benchmark failure')
|
145 |
opt = parser.parse_args()
|
146 |
opt.data = check_yaml(opt.data) # check YAML
|
147 |
print_args(vars(opt))
|