Replace `openvino-dev` with OpenVINO Runtime inference (#7843)
Browse files* Uses OpenVINO runtime instead of openvino-dev
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* export with openvino package
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* Revert export.py
* Update common.py
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Glenn Jocher <[email protected]>
- models/common.py +7 -10
models/common.py
CHANGED
@@ -354,13 +354,14 @@ class DetectMultiBackend(nn.Module):
|
|
354 |
stride, names = int(meta['stride']), eval(meta['names'])
|
355 |
elif xml: # OpenVINO
|
356 |
LOGGER.info(f'Loading {w} for OpenVINO inference...')
|
357 |
-
check_requirements(('openvino
|
358 |
-
|
359 |
-
|
360 |
if not Path(w).is_file(): # if not *.xml
|
361 |
w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir
|
362 |
-
network =
|
363 |
-
executable_network =
|
|
|
364 |
elif engine: # TensorRT
|
365 |
LOGGER.info(f'Loading {w} for TensorRT inference...')
|
366 |
import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download
|
@@ -444,11 +445,7 @@ class DetectMultiBackend(nn.Module):
|
|
444 |
y = self.session.run([self.session.get_outputs()[0].name], {self.session.get_inputs()[0].name: im})[0]
|
445 |
elif self.xml: # OpenVINO
|
446 |
im = im.cpu().numpy() # FP32
|
447 |
-
|
448 |
-
request = self.executable_network.requests[0] # inference request
|
449 |
-
request.set_blob(blob_name='images', blob=self.ie.Blob(desc, im)) # name=next(iter(request.input_blobs))
|
450 |
-
request.infer()
|
451 |
-
y = request.output_blobs['output'].buffer # name=next(iter(request.output_blobs))
|
452 |
elif self.engine: # TensorRT
|
453 |
assert im.shape == self.bindings['images'].shape, (im.shape, self.bindings['images'].shape)
|
454 |
self.binding_addrs['images'] = int(im.data_ptr())
|
|
|
354 |
stride, names = int(meta['stride']), eval(meta['names'])
|
355 |
elif xml: # OpenVINO
|
356 |
LOGGER.info(f'Loading {w} for OpenVINO inference...')
|
357 |
+
check_requirements(('openvino',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/
|
358 |
+
from openvino.runtime import Core
|
359 |
+
ie = Core()
|
360 |
if not Path(w).is_file(): # if not *.xml
|
361 |
w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir
|
362 |
+
network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin'))
|
363 |
+
executable_network = ie.compile_model(model=network, device_name="CPU")
|
364 |
+
self.output_layer = next(iter(executable_network.outputs))
|
365 |
elif engine: # TensorRT
|
366 |
LOGGER.info(f'Loading {w} for TensorRT inference...')
|
367 |
import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download
|
|
|
445 |
y = self.session.run([self.session.get_outputs()[0].name], {self.session.get_inputs()[0].name: im})[0]
|
446 |
elif self.xml: # OpenVINO
|
447 |
im = im.cpu().numpy() # FP32
|
448 |
+
y = self.executable_network([im])[self.output_layer]
|
|
|
|
|
|
|
|
|
449 |
elif self.engine: # TensorRT
|
450 |
assert im.shape == self.bindings['images'].shape, (im.shape, self.bindings['images'].shape)
|
451 |
self.binding_addrs['images'] = int(im.data_ptr())
|