本文整理匯總了Python中tflite_runtime.interpreter.Interpreter方法的典型用法代碼示例。如果您正苦於以下問題:Python interpreter.Interpreter方法的具體用法?Python interpreter.Interpreter怎麽用?Python interpreter.Interpreter使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tflite_runtime.interpreter
的用法示例。
在下文中一共展示了interpreter.Interpreter方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: from tflite_runtime import interpreter [as 別名]
# 或者: from tflite_runtime.interpreter import Interpreter [as 別名]
def __init__(self):
edge_tpu_delegate = None
try:
edge_tpu_delegate = load_delegate('libedgetpu.so.1.0')
except ValueError:
print("No EdgeTPU detected. Falling back to CPU.")
if edge_tpu_delegate is None:
self.interpreter = tflite.Interpreter(
model_path='/cpu_model.tflite')
else:
self.interpreter = tflite.Interpreter(
model_path='/edgetpu_model.tflite',
experimental_delegates=[edge_tpu_delegate])
self.interpreter.allocate_tensors()
self.tensor_input_details = self.interpreter.get_input_details()
self.tensor_output_details = self.interpreter.get_output_details()
示例2: __init__
# 需要導入模塊: from tflite_runtime import interpreter [as 別名]
# 或者: from tflite_runtime.interpreter import Interpreter [as 別名]
def __init__(self, model_face_detect, num_threads):
# Init Face Detector
self.interpreter_face_detect = Interpreter(model_path=model_face_detect)
try:
self.interpreter_face_detect.set_num_threads(num_threads)
except:
print("WARNING: The installed PythonAPI of Tensorflow/Tensorflow Lite runtime does not support Multi-Thread processing.")
print("WARNING: It works in single thread mode.")
print("WARNING: If you want to use Multi-Thread to improve performance on aarch64/armv7l platforms, please refer to one of the below to implement a customized Tensorflow/Tensorflow Lite runtime.")
print("https://github.com/PINTO0309/Tensorflow-bin.git")
print("https://github.com/PINTO0309/TensorflowLite-bin.git")
pass
self.interpreter_face_detect.allocate_tensors()
self.input_details = self.interpreter_face_detect.get_input_details()[0]['index']
self.box = self.interpreter_face_detect.get_output_details()[0]['index']
self.scores = self.interpreter_face_detect.get_output_details()[2]['index']
self.count = self.interpreter_face_detect.get_output_details()[3]['index']
示例3: _get_edgetpu_interpreter
# 需要導入模塊: from tflite_runtime import interpreter [as 別名]
# 或者: from tflite_runtime.interpreter import Interpreter [as 別名]
def _get_edgetpu_interpreter(model=None): # pragma: no cover
# Note: Looking for ideas how to test Coral EdgeTPU dependent code
# in a cloud CI environment such as Travis CI and Github
tf_interpreter = None
if model:
try:
edgetpu_delegate = load_delegate('libedgetpu.so.1.0')
assert edgetpu_delegate
tf_interpreter = Interpreter(
model_path=model,
experimental_delegates=[edgetpu_delegate]
)
log.debug('EdgeTPU available. Will use EdgeTPU model.')
except Exception as e:
log.debug('EdgeTPU init error: %r', e)
# log.debug(stacktrace())
return tf_interpreter
示例4: run
# 需要導入模塊: from tflite_runtime import interpreter [as 別名]
# 或者: from tflite_runtime.interpreter import Interpreter [as 別名]
def run(self, timestamp, frame, annotations):
# Interpreter hates it when native tensors are retained.
# fill_inputs will release input tensors after filling with data.
self.fill_inputs(frame)
self._interpreter.invoke()
boxes = self.output_tensor(0)
classes = self.output_tensor(1)
scores = self.output_tensor(2)
num_detections = self.output_tensor(3)
if self._is_lstm:
output_lstm_c = self.output_tensor(4)
output_lstm_h = self.output_tensor(5)
np.copyto(self._lstm_c, output_lstm_c)
np.copyto(self._lstm_h, output_lstm_h)
for i in range(int(num_detections)):
box = boxes[i]
if scores[i] > self._config.score_threshold:
bbox = NormalizedBoundingBox(
left=box[1], top=box[0], right=box[3], bottom=box[2])
annotation = ObjectTrackingAnnotation(
timestamp=timestamp,
track_id=-1,
class_id=int(classes[i]),
class_name=self.label_list[int(classes[i])],
confidence_score=scores[i],
bbox=bbox)
annotations.append(annotation)
return True
示例5: load_model
# 需要導入模塊: from tflite_runtime import interpreter [as 別名]
# 或者: from tflite_runtime.interpreter import Interpreter [as 別名]
def load_model():
'''
加載模型函數
:return:
'''
global textModel
global imgModel
textModel = tflite.Interpreter(
'text.model.tflite')
textModel.allocate_tensors()
imgModel = tflite.Interpreter(
'image.model.tflite')
imgModel.allocate_tensors()
示例6: loadModel
# 需要導入模塊: from tflite_runtime import interpreter [as 別名]
# 或者: from tflite_runtime.interpreter import Interpreter [as 別名]
def loadModel(dP):
if dP.TFliteRuntime:
import tflite_runtime.interpreter as tflite
# model here is intended as interpreter
if dP.runCoralEdge:
print(" Running on Coral Edge TPU")
try:
model = tflite.Interpreter(model_path=os.path.splitext(dP.model_name)[0]+'_edgetpu.tflite',
experimental_delegates=[tflite.load_delegate(dP.edgeTPUSharedLib,{})])
except:
print(" Coral Edge TPU not found. Please make sure it's connected. ")
else:
model = tflite.Interpreter(model_path=os.path.splitext(dP.model_name)[0]+'.tflite')
model.allocate_tensors()
else:
getTFVersion(dP)
import tensorflow as tf
if dP.useTFlitePred:
# model here is intended as interpreter
model = tf.lite.Interpreter(model_path=os.path.splitext(dP.model_name)[0]+'.tflite')
model.allocate_tensors()
else:
model = tf.keras.models.load_model(dP.model_name)
return model
#************************************
# Make prediction based on framework
#************************************
示例7: __init__
# 需要導入模塊: from tflite_runtime import interpreter [as 別名]
# 或者: from tflite_runtime.interpreter import Interpreter [as 別名]
def __init__(self, model_path='detect.tflite', threads_num=4):
try:
self.interpreter = Interpreter(model_path=model_path)
self.interpreter.set_num_threads(threads_num)
except:
self.interpreter = tf.lite.Interpreter(model_path=model_path)
self.interpreter.set_num_threads(threads_num)
self.interpreter.allocate_tensors()
self.input_details = self.interpreter.get_input_details()
self.output_details = self.interpreter.get_output_details()
示例8: __init__
# 需要導入模塊: from tflite_runtime import interpreter [as 別名]
# 或者: from tflite_runtime.interpreter import Interpreter [as 別名]
def __init__(self, model_path='detect.tflite', num_threads=12):
try:
self.interpreter = Interpreter(model_path=model_path)
self.interpreter.set_num_threads(num_threads)
except:
self.interpreter = tf.lite.Interpreter(model_path=model_path)
self.interpreter.set_num_threads(num_threads)
self.interpreter.allocate_tensors()
self.input_details = self.interpreter.get_input_details()
self.output_details = self.interpreter.get_output_details()
示例9: __init__
# 需要導入模塊: from tflite_runtime import interpreter [as 別名]
# 或者: from tflite_runtime.interpreter import Interpreter [as 別名]
def __init__(self, model_path='detect.tflite'):
self.interpreter = Interpreter(model_path=model_path)
self.interpreter.set_num_threads(4)
self.interpreter.allocate_tensors()
self.input_details = self.interpreter.get_input_details()
self.output_details = self.interpreter.get_output_details()
示例10: __init__
# 需要導入模塊: from tflite_runtime import interpreter [as 別名]
# 或者: from tflite_runtime.interpreter import Interpreter [as 別名]
def __init__(self):
# Load TFLite model and allocate tensors.
self.interpreter = tflite.Interpreter(model_path=PERSON_CLASS_MODEL,
experimental_delegates=[tflite.load_delegate('libedgetpu.so.1')])
self.interpreter.allocate_tensors()
# Get input and output tensors.
self.input_details = self.interpreter.get_input_details()
self.output_details = self.interpreter.get_output_details()
示例11: __init__
# 需要導入模塊: from tflite_runtime import interpreter [as 別名]
# 或者: from tflite_runtime.interpreter import Interpreter [as 別名]
def __init__(
self,
base_url='https://github.com/leigh-johnson/rpi-deep-pantilt/releases/download/v1.0.0/',
model_name='ssdlite_mobilenet_edgetpu_coco_quant',
input_shape=(320, 320),
min_score_thresh=0.50,
tflite_model_file='model_postprocessed_quantized_128_uint8_edgetpu.tflite'
):
self.base_url = base_url
self.model_name = model_name
self.model_file = model_name + '.tar.gz'
self.model_url = base_url + self.model_file
self.tflite_model_file = tflite_model_file
self.model_dir = tf.keras.utils.get_file(
fname=self.model_file,
origin=self.model_url,
untar=True,
cache_subdir='models'
)
self.min_score_thresh = min_score_thresh
self.model_path = os.path.splitext(
os.path.splitext(self.model_dir)[0]
)[0] + f'/{self.tflite_model_file}'
try:
from tflite_runtime import interpreter as coral_tflite_interpreter
except ImportError as e:
logging.error(e)
logging.error('Please install Edge TPU tflite_runtime:')
logging.error(
'$ pip install https://dl.google.com/coral/python/tflite_runtime-2.1.0.post1-cp37-cp37m-linux_armv7l.whl')
sys.exit(1)
self.tflite_interpreter = coral_tflite_interpreter.Interpreter(
model_path=self.model_path,
experimental_delegates=[
tf.lite.experimental.load_delegate(self.EDGETPU_SHARED_LIB)
]
)
self.tflite_interpreter.allocate_tensors()
self.input_details = self.tflite_interpreter.get_input_details()
self.output_details = self.tflite_interpreter.get_output_details()
self.category_index = create_category_index_from_labelmap(
self.PATH_TO_LABELS, use_display_name=True)
logging.info(
f'loaded labels from {self.PATH_TO_LABELS} \n {self.category_index}')
logging.info(f'initialized model {model_name} \n')
logging.info(
f'model inputs: {self.input_details} \n {self.input_details}')
logging.info(
f'model outputs: {self.output_details} \n {self.output_details}')
示例12: make_interpreter
# 需要導入模塊: from tflite_runtime import interpreter [as 別名]
# 或者: from tflite_runtime.interpreter import Interpreter [as 別名]
def make_interpreter(model_file):
model_file, *device = model_file.split('@')
return tflite.Interpreter(
model_path=model_file,
experimental_delegates=[
tflite.load_delegate(EDGETPU_SHARED_LIB,
{'device': device[0]} if device else {})
])
示例13: make_interpreter
# 需要導入模塊: from tflite_runtime import interpreter [as 別名]
# 或者: from tflite_runtime.interpreter import Interpreter [as 別名]
def make_interpreter(model_file):
model_file, *device = model_file.split('@')
return tflite.Interpreter(
model_path=model_file,
experimental_delegates=[
tflite.load_delegate(EDGETPU_SHARED_LIB,
{'device': device[0]} if device else {})
])
示例14: configure
# 需要導入模塊: from tflite_runtime import interpreter [as 別名]
# 或者: from tflite_runtime.interpreter import Interpreter [as 別名]
def configure(self):
self.__labels = self.__load_labels(self.__label_file)
self.__interpreter = Interpreter(self.__model_path)
self.__interpreter.allocate_tensors()
_, self.__input_height, self.__input_width, _ = self.__interpreter.get_input_details()[0]['shape']
示例15: _load_tflite
# 需要導入模塊: from tflite_runtime import interpreter [as 別名]
# 或者: from tflite_runtime.interpreter import Interpreter [as 別名]
def _load_tflite(self, tflite_path):
experimental_delegates = []
try:
experimental_delegates.append(
tflite.load_delegate(
EDGETPU_SHARED_LIB,
{'device': self._config.device} if self._config.device else {}))
except AttributeError as e:
if '\'Delegate\' object has no attribute \'_library\'' in str(e):
print(
'Warning: EdgeTPU library not found. You can still run CPU models, '
'but if you have a Coral device make sure you set it up: '
'https://coral.ai/docs/setup/.')
except ValueError as e:
if 'Failed to load delegate from ' in str(e):
print(
'Warning: EdgeTPU library not found. You can still run CPU models, '
'but if you have a Coral device make sure you set it up: '
'https://coral.ai/docs/setup/.')
try:
self._interpreter = tflite.Interpreter(
model_path=tflite_path, experimental_delegates=experimental_delegates)
except TypeError as e:
if 'got an unexpected keyword argument \'experimental_delegates\'' in str(
e):
self._interpreter = tflite.Interpreter(model_path=tflite_path)
try:
self._interpreter.allocate_tensors()
except RuntimeError as e:
if 'edgetpu-custom-op' in str(e) or 'EdgeTpuDelegateForCustomOp' in str(
e):
raise RuntimeError('Loaded an EdgeTPU model without the EdgeTPU '
'library loaded. If you have a Coral device make '
'sure you set it up: https://coral.ai/docs/setup/.')
else:
raise e
self._is_lstm = self._check_lstm()
if self._is_lstm:
print('Loading an LSTM model.')
self._lstm_c = np.copy(self.input_tensor(1))
self._lstm_h = np.copy(self.input_tensor(2))