motokimura glenn-jocher commited on
Commit
16563ac
·
unverified ·
1 Parent(s): ed9bac8

Prefer `tflite_runtime` for TFLite inference if installed (#6406)

Browse files

* import tflite_runtime if tensorflow not installed

* rename tflite to tfli

* Attempt tflite_runtime for all TFLite workflows

Also rename tfli to tfl

Co-authored-by: Glenn Jocher <[email protected]>

Files changed (1) hide show
  1. models/common.py +7 -5
models/common.py CHANGED
@@ -374,17 +374,19 @@ class DetectMultiBackend(nn.Module):
374
  graph_def.ParseFromString(open(w, 'rb').read())
375
  frozen_func = wrap_frozen_graph(gd=graph_def, inputs="x:0", outputs="Identity:0")
376
  elif tflite: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python
377
- if 'edgetpu' in w.lower(): # Edge TPU
 
 
 
 
378
  LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...')
379
- import tflite_runtime.interpreter as tfli # install https://coral.ai/software/#edgetpu-runtime
380
  delegate = {'Linux': 'libedgetpu.so.1',
381
  'Darwin': 'libedgetpu.1.dylib',
382
  'Windows': 'edgetpu.dll'}[platform.system()]
383
- interpreter = tfli.Interpreter(model_path=w, experimental_delegates=[tfli.load_delegate(delegate)])
384
  else: # Lite
385
  LOGGER.info(f'Loading {w} for TensorFlow Lite inference...')
386
- import tensorflow as tf
387
- interpreter = tf.lite.Interpreter(model_path=w) # load TFLite model
388
  interpreter.allocate_tensors() # allocate
389
  input_details = interpreter.get_input_details() # inputs
390
  output_details = interpreter.get_output_details() # outputs
 
374
  graph_def.ParseFromString(open(w, 'rb').read())
375
  frozen_func = wrap_frozen_graph(gd=graph_def, inputs="x:0", outputs="Identity:0")
376
  elif tflite: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python
377
+ try:
378
+ import tflite_runtime.interpreter as tfl # prefer tflite_runtime if installed
379
+ except ImportError:
380
+ import tensorflow.lite as tfl
381
+ if 'edgetpu' in w.lower(): # Edge TPU https://coral.ai/software/#edgetpu-runtime
382
  LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...')
 
383
  delegate = {'Linux': 'libedgetpu.so.1',
384
  'Darwin': 'libedgetpu.1.dylib',
385
  'Windows': 'edgetpu.dll'}[platform.system()]
386
+ interpreter = tfl.Interpreter(model_path=w, experimental_delegates=[tfl.load_delegate(delegate)])
387
  else: # Lite
388
  LOGGER.info(f'Loading {w} for TensorFlow Lite inference...')
389
+ interpreter = tfl.Interpreter(model_path=w) # load TFLite model
 
390
  interpreter.allocate_tensors() # allocate
391
  input_details = interpreter.get_input_details() # inputs
392
  output_details = interpreter.get_output_details() # outputs