glenn-jocher commited on
Commit
0dd66e2
·
unverified ·
1 Parent(s): a3a652c

OpenVINO metadata fix (#7952)

Browse files

* Rename OpenVINO meta.yaml to model name

* Rename OpenVINO meta.yaml to model name

* Rename OpenVINO meta.yaml to model name

* fix

Files changed (2) hide show
  1. export.py +1 -1
  2. models/common.py +14 -13
export.py CHANGED
@@ -180,7 +180,7 @@ def export_openvino(model, file, half, prefix=colorstr('OpenVINO:')):
180
 
181
  cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f} --data_type {'FP16' if half else 'FP32'}"
182
  subprocess.check_output(cmd.split()) # export
183
- with open(Path(f) / 'meta.yaml', 'w') as g:
184
  yaml.dump({'stride': int(max(model.stride)), 'names': model.names}, g) # add metadata.yaml
185
 
186
  LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
 
180
 
181
  cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f} --data_type {'FP16' if half else 'FP32'}"
182
  subprocess.check_output(cmd.split()) # export
183
+ with open(Path(f) / file.with_suffix('.yaml'), 'w') as g:
184
  yaml.dump({'stride': int(max(model.stride)), 'names': model.names}, g) # add metadata.yaml
185
 
186
  LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)')
models/common.py CHANGED
@@ -323,9 +323,12 @@ class DetectMultiBackend(nn.Module):
323
  super().__init__()
324
  w = str(weights[0] if isinstance(weights, list) else weights)
325
  pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs = self.model_type(w) # get backend
326
- stride, names = 32, [f'class{i}' for i in range(1000)] # assign defaults
327
  w = attempt_download(w) # download if not local
328
  fp16 &= (pt or jit or onnx or engine) and device.type != 'cpu' # FP16
 
 
 
 
329
 
330
  if pt: # PyTorch
331
  model = attempt_load(weights if isinstance(weights, list) else w, map_location=device)
@@ -365,7 +368,9 @@ class DetectMultiBackend(nn.Module):
365
  network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin'))
366
  executable_network = ie.compile_model(model=network, device_name="CPU")
367
  output_layer = next(iter(executable_network.outputs))
368
- self._load_metadata(w.parent / 'meta.yaml') # load metadata
 
 
369
  elif engine: # TensorRT
370
  LOGGER.info(f'Loading {w} for TensorRT inference...')
371
  import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download
@@ -431,11 +436,7 @@ class DetectMultiBackend(nn.Module):
431
  output_details = interpreter.get_output_details() # outputs
432
  elif tfjs:
433
  raise Exception('ERROR: YOLOv5 TF.js inference is not supported')
434
-
435
  self.__dict__.update(locals()) # assign all variables to self
436
- if not hasattr(self, 'names') and data: # assign class names (optional)
437
- with open(data, errors='ignore') as f:
438
- names = yaml.safe_load(f)['names']
439
 
440
  def forward(self, im, augment=False, visualize=False, val=False):
441
  # YOLOv5 MultiBackend inference
@@ -495,13 +496,6 @@ class DetectMultiBackend(nn.Module):
495
  y = torch.tensor(y, device=self.device)
496
  return (y, []) if val else y
497
 
498
- def _load_metadata(self, f='path/to/meta.yaml'):
499
- # Load metadata from meta.yaml if it exists
500
- if Path(f).is_file():
501
- with open(f, errors='ignore') as f:
502
- for k, v in yaml.safe_load(f).items():
503
- setattr(self, k, v) # assign stride, names
504
-
505
  def warmup(self, imgsz=(1, 3, 640, 640)):
506
  # Warmup model by running inference once
507
  warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb
@@ -522,6 +516,13 @@ class DetectMultiBackend(nn.Module):
522
  tflite &= not edgetpu # *.tflite
523
  return pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs
524
 
 
 
 
 
 
 
 
525
 
526
  class AutoShape(nn.Module):
527
  # YOLOv5 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS
 
323
  super().__init__()
324
  w = str(weights[0] if isinstance(weights, list) else weights)
325
  pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs = self.model_type(w) # get backend
 
326
  w = attempt_download(w) # download if not local
327
  fp16 &= (pt or jit or onnx or engine) and device.type != 'cpu' # FP16
328
+ stride, names = 32, [f'class{i}' for i in range(1000)] # assign defaults
329
+ if data: # assign class names (optional)
330
+ with open(data, errors='ignore') as f:
331
+ names = yaml.safe_load(f)['names']
332
 
333
  if pt: # PyTorch
334
  model = attempt_load(weights if isinstance(weights, list) else w, map_location=device)
 
368
  network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin'))
369
  executable_network = ie.compile_model(model=network, device_name="CPU")
370
  output_layer = next(iter(executable_network.outputs))
371
+ meta = w.with_suffix('.yaml')
372
+ if meta.exists():
373
+ stride, names = self._load_metadata(meta) # load metadata
374
  elif engine: # TensorRT
375
  LOGGER.info(f'Loading {w} for TensorRT inference...')
376
  import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download
 
436
  output_details = interpreter.get_output_details() # outputs
437
  elif tfjs:
438
  raise Exception('ERROR: YOLOv5 TF.js inference is not supported')
 
439
  self.__dict__.update(locals()) # assign all variables to self
 
 
 
440
 
441
  def forward(self, im, augment=False, visualize=False, val=False):
442
  # YOLOv5 MultiBackend inference
 
496
  y = torch.tensor(y, device=self.device)
497
  return (y, []) if val else y
498
 
 
 
 
 
 
 
 
499
  def warmup(self, imgsz=(1, 3, 640, 640)):
500
  # Warmup model by running inference once
501
  warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb
 
516
  tflite &= not edgetpu # *.tflite
517
  return pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs
518
 
519
+ @staticmethod
520
+ def _load_metadata(f='path/to/meta.yaml'):
521
+ # Load metadata from meta.yaml if it exists
522
+ with open(f, errors='ignore') as f:
523
+ d = yaml.safe_load(f)
524
+ return d['stride'], d['names'] # assign stride, names
525
+
526
 
527
  class AutoShape(nn.Module):
528
  # YOLOv5 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS