本文整理汇总了Python中tensorflow.python.platform.resource_loader.get_path_to_datafile函数的典型用法代码示例。如果您正苦于以下问题:Python get_path_to_datafile函数的具体用法?Python get_path_to_datafile怎么用?Python get_path_to_datafile使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_path_to_datafile函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_rnn_cell
def test_rnn_cell(self):
export_dir = self.get_temp_dir()
export_binary = resource_loader.get_path_to_datafile(
"export_rnn_cell")
self.assertCommandSucceeded(export_binary, export_dir=export_dir)
use_binary = resource_loader.get_path_to_datafile("use_rnn_cell")
self.assertCommandSucceeded(use_binary, model_dir=export_dir)
示例2: test_mnist_cnn
def test_mnist_cnn(self):
export_dir = self.get_temp_dir()
export_binary = resource_loader.get_path_to_datafile("export_mnist_cnn")
self.assertCommandSucceeded(
export_binary, export_dir=export_dir, fast_test_mode="true")
use_binary = resource_loader.get_path_to_datafile("use_mnist_cnn")
self.assertCommandSucceeded(
use_binary, export_dir=export_dir, fast_test_mode="true")
示例3: test_text_embedding_in_sequential_keras
def test_text_embedding_in_sequential_keras(self):
export_dir = self.get_temp_dir()
export_binary = resource_loader.get_path_to_datafile(
"export_simple_text_embedding")
self.assertCommandSucceeded(export_binary, export_dir=export_dir)
use_binary = resource_loader.get_path_to_datafile(
"use_model_in_sequential_keras")
self.assertCommandSucceeded(use_binary, model_dir=export_dir)
示例4: __init__
def __init__(self):
# TODO(aselle): make this work in the open source version with better
# path.
paths_to_try = [
"../../../../flatbuffers/flatc", # not bazel
"../../../../external/flatbuffers/flatc" # bazel
]
for p in paths_to_try:
self._flatc_path = resource_loader.get_path_to_datafile(p)
if os.path.exists(self._flatc_path): break
def FindSchema(base_name):
return resource_loader.get_path_to_datafile("%s" % base_name)
# Supported schemas for upgrade.
self._schemas = [
(0, FindSchema("schema_v0.fbs"), True, self._Upgrade0To1),
(1, FindSchema("schema_v1.fbs"), True, self._Upgrade1To2),
(2, FindSchema("schema_v2.fbs"), True, self._Upgrade2To3),
(3, FindSchema("schema_v3.fbs"), False, None) # Non-callable by design.
]
# Ensure schemas are sorted, and extract latest version and upgrade
# dispatch function table.
self._schemas.sort()
self._new_version, self._new_schema = self._schemas[-1][:2]
self._upgrade_dispatch = dict(
(version, dispatch)
for version, unused1, unused2, dispatch in self._schemas)
示例5: setUp
def setUp(self):
self.interpreter = interpreter_wrapper.Interpreter(
model_path=resource_loader.get_path_to_datafile(
'testdata/permute_float.tflite'))
self.interpreter.allocate_tensors()
self.input0 = self.interpreter.get_input_details()[0]['index']
self.initial_data = np.array([[-1., -2., -3., -4.]], np.float32)
示例6: testFloat
def testFloat(self):
interpreter = interpreter_wrapper.Interpreter(
model_path=resource_loader.get_path_to_datafile(
'testdata/permute_float.tflite'))
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('input', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 4] == input_details[0]['shape']).all())
self.assertEqual((0.0, 0), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('output', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 4] == output_details[0]['shape']).all())
self.assertEqual((0.0, 0), output_details[0]['quantization'])
test_input = np.array([[1.0, 2.0, 3.0, 4.0]], dtype=np.float32)
expected_output = np.array([[4.0, 3.0, 2.0, 1.0]], dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], test_input)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
self.assertTrue((expected_output == output_data).all())
示例7: testUint8
def testUint8(self):
model_path = resource_loader.get_path_to_datafile(
'testdata/permute_uint8.tflite')
with io.open(model_path, 'rb') as model_file:
data = model_file.read()
interpreter = interpreter_wrapper.Interpreter(model_content=data)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('input', input_details[0]['name'])
self.assertEqual(np.uint8, input_details[0]['dtype'])
self.assertTrue(([1, 4] == input_details[0]['shape']).all())
self.assertEqual((1.0, 0), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('output', output_details[0]['name'])
self.assertEqual(np.uint8, output_details[0]['dtype'])
self.assertTrue(([1, 4] == output_details[0]['shape']).all())
self.assertEqual((1.0, 0), output_details[0]['quantization'])
test_input = np.array([[1, 2, 3, 4]], dtype=np.uint8)
expected_output = np.array([[4, 3, 2, 1]], dtype=np.uint8)
interpreter.resize_tensor_input(input_details[0]['index'],
test_input.shape)
interpreter.allocate_tensors()
interpreter.set_tensor(input_details[0]['index'], test_input)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
self.assertTrue((expected_output == output_data).all())
示例8: testString
def testString(self):
interpreter = interpreter_wrapper.Interpreter(
model_path=resource_loader.get_path_to_datafile(
'testdata/gather_string.tflite'))
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('input', input_details[0]['name'])
self.assertEqual(np.string_, input_details[0]['dtype'])
self.assertTrue(([10] == input_details[0]['shape']).all())
self.assertEqual((0.0, 0), input_details[0]['quantization'])
self.assertEqual('indices', input_details[1]['name'])
self.assertEqual(np.int64, input_details[1]['dtype'])
self.assertTrue(([3] == input_details[1]['shape']).all())
self.assertEqual((0.0, 0), input_details[1]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('output', output_details[0]['name'])
self.assertEqual(np.string_, output_details[0]['dtype'])
self.assertTrue(([3] == output_details[0]['shape']).all())
self.assertEqual((0.0, 0), output_details[0]['quantization'])
test_input = np.array([1, 2, 3], dtype=np.int64)
interpreter.set_tensor(input_details[1]['index'], test_input)
test_input = np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'])
expected_output = np.array([b'b', b'c', b'd'])
interpreter.set_tensor(input_details[0]['index'], test_input)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
self.assertTrue((expected_output == output_data).all())
示例9: _load_library
def _load_library(name, op_list=None):
"""Loads a .so file containing the specified operators.
Args:
name: The name of the .so file to load.
op_list: A list of names of operators that the library should have. If None
then the .so file's contents will not be verified.
Raises:
NameError if one of the required ops is missing.
"""
try:
filename = resource_loader.get_path_to_datafile(name)
library = load_library.load_op_library(filename)
for expected_op in (op_list or []):
for lib_op in library.OP_LIST.op:
if lib_op.name == expected_op:
break
else:
raise NameError(
'Could not find operator %s in dynamic library %s' %
(expected_op, name))
return library
except errors.NotFoundError:
logging.warning('%s file could not be loaded.', name)
示例10: testInvokeBeforeReady
def testInvokeBeforeReady(self):
interpreter = interpreter_wrapper.Interpreter(
model_path=resource_loader.get_path_to_datafile(
'testdata/permute_float.tflite'))
with self.assertRaisesRegexp(RuntimeError,
'Invoke called on model that is not ready'):
interpreter.invoke()
示例11: load_op_library
def load_op_library(path):
"""Loads a contrib op library from the given path.
NOTE(mrry): On Windows, we currently assume that some contrib op
libraries are statically linked into the main TensorFlow Python
extension DLL - use dynamically linked ops if the .so is present.
Args:
path: An absolute path to a shared object file.
Returns:
A Python module containing the Python wrappers for Ops defined in the
plugin.
"""
if os.name == 'nt':
# To avoid makeing every user_ops aware of windows, re-write
# the file extension from .so to .dll.
path = re.sub(r'\.so$', '.dll', path)
# Currently we have only some user_ops as dlls on windows - don't try
# to load them if the dll is not found.
# TODO(mrry): Once we have all of them this check should be removed.
if not os.path.exists(path):
return None
path = resource_loader.get_path_to_datafile(path)
ret = load_library.load_op_library(path)
assert ret, 'Could not load %s' % path
return ret
示例12: _maybe_load_nccl_ops_so
def _maybe_load_nccl_ops_so():
"""Loads nccl ops so if it hasn't been loaded already."""
with _module_lock:
global _nccl_ops_so
if not _nccl_ops_so:
_nccl_ops_so = loader.load_op_library(
resource_loader.get_path_to_datafile('_nccl_ops.so'))
示例13: testInvalidIndex
def testInvalidIndex(self):
interpreter = interpreter_wrapper.Interpreter(
model_path=resource_loader.get_path_to_datafile(
'testdata/permute_float.tflite'))
interpreter.allocate_tensors()
#Invalid tensor index passed.
with self.assertRaisesRegexp(ValueError, 'Tensor with no shape found.'):
interpreter._get_tensor_details(4)
示例14: assertCommandSucceeded
def assertCommandSucceeded(self, script_name, **flags):
"""Runs a test script via run_script."""
run_script = resource_loader.get_path_to_datafile("run_script")
command_parts = [run_script]
for flag_key, flag_value in flags.items():
command_parts.append("--%s=%s" % (flag_key, flag_value))
env = dict(TF2_BEHAVIOR="enabled", SCRIPT_NAME=script_name)
logging.info("Running: %s with environment flags %s" % (command_parts, env))
subprocess.check_call(command_parts, env=dict(os.environ, **env))
示例15: _initObjectDetectionArgs
def _initObjectDetectionArgs(self):
# Initializes the arguments required for the object detection model.
self._graph_def_file = resource_loader.get_path_to_datafile(
'testdata/tflite_graph.pb')
self._input_arrays = ['normalized_input_image_tensor']
self._output_arrays = [
'TFLite_Detection_PostProcess', 'TFLite_Detection_PostProcess:1',
'TFLite_Detection_PostProcess:2', 'TFLite_Detection_PostProcess:3'
]
self._input_shapes = {'normalized_input_image_tensor': [1, 300, 300, 3]}