Autoinstall TensorRT if missing (#7537)
Browse files* Autoinstall TensorRT if missing
May resolve https://github.com/ultralytics/yolov5/issues/7464
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* Update export.py
* Update export.py
* Update export.py
* Update export.py
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
export.py
CHANGED
@@ -217,7 +217,15 @@ def export_coreml(model, im, file, int8, half, prefix=colorstr('CoreML:')):
|
|
217 |
def export_engine(model, im, file, train, half, simplify, workspace=4, verbose=False, prefix=colorstr('TensorRT:')):
|
218 |
# YOLOv5 TensorRT export https://developer.nvidia.com/tensorrt
|
219 |
try:
|
220 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
221 |
|
222 |
if trt.__version__[0] == '7': # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012
|
223 |
grid = model.model[-1].anchor_grid
|
@@ -230,7 +238,6 @@ def export_engine(model, im, file, train, half, simplify, workspace=4, verbose=F
|
|
230 |
onnx = file.with_suffix('.onnx')
|
231 |
|
232 |
LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...')
|
233 |
-
assert im.device.type != 'cpu', 'export running on CPU but must be on GPU, i.e. `python export.py --device 0`'
|
234 |
assert onnx.exists(), f'failed to export ONNX file: {onnx}'
|
235 |
f = file.with_suffix('.engine') # TensorRT engine file
|
236 |
logger = trt.Logger(trt.Logger.INFO)
|
|
|
217 |
def export_engine(model, im, file, train, half, simplify, workspace=4, verbose=False, prefix=colorstr('TensorRT:')):
|
218 |
# YOLOv5 TensorRT export https://developer.nvidia.com/tensorrt
|
219 |
try:
|
220 |
+
assert im.device.type != 'cpu', 'export running on CPU but must be on GPU, i.e. `python export.py --device 0`'
|
221 |
+
try:
|
222 |
+
import tensorrt as trt
|
223 |
+
except Exception:
|
224 |
+
s = f"\n{prefix} tensorrt not found and is required by YOLOv5"
|
225 |
+
LOGGER.info(f"{s}, attempting auto-update...")
|
226 |
+
r = '-U nvidia-tensorrt --index-url https://pypi.ngc.nvidia.com'
|
227 |
+
LOGGER.info(subprocess.check_output(f"pip install {r}", shell=True).decode())
|
228 |
+
import tensorrt as trt
|
229 |
|
230 |
if trt.__version__[0] == '7': # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012
|
231 |
grid = model.model[-1].anchor_grid
|
|
|
238 |
onnx = file.with_suffix('.onnx')
|
239 |
|
240 |
LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...')
|
|
|
241 |
assert onnx.exists(), f'failed to export ONNX file: {onnx}'
|
242 |
f = file.with_suffix('.engine') # TensorRT engine file
|
243 |
logger = trt.Logger(trt.Logger.INFO)
|