本文整理汇总了Python中tensorflow.python.tools.strip_unused_lib.strip_unused方法的典型用法代码示例。如果您正苦于以下问题:Python strip_unused_lib.strip_unused方法的具体用法?Python strip_unused_lib.strip_unused怎么用?Python strip_unused_lib.strip_unused使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.tools.strip_unused_lib
的用法示例。
在下文中一共展示了strip_unused_lib.strip_unused方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: optimize_graph
# 需要导入模块: from tensorflow.python.tools import strip_unused_lib [as 别名]
# 或者: from tensorflow.python.tools.strip_unused_lib import strip_unused [as 别名]
def optimize_graph(graph):
"""Strips unused subgraphs and save it as another frozen TF model."""
gdef = strip_unused_lib.strip_unused(
input_graph_def = graph.as_graph_def(),
input_node_names = [input_node],
output_node_names = [bbox_output_node, class_output_node],
placeholder_type_enum = dtypes.float32.as_datatype_enum)
with gfile.GFile(frozen_model_file, "wb") as f:
f.write(gdef.SerializeToString())
# Load the original graph and remove anything we don't need.
示例2: optimize_for_inference
# 需要导入模块: from tensorflow.python.tools import strip_unused_lib [as 别名]
# 或者: from tensorflow.python.tools.strip_unused_lib import strip_unused [as 别名]
def optimize_for_inference(input_graph_def, input_node_names, output_node_names,
placeholder_type_enum):
"""Applies a series of inference optimizations on the input graph.
Args:
input_graph_def: A GraphDef containing a training model.
input_node_names: A list of names of the nodes that are fed inputs during
inference.
output_node_names: A list of names of the nodes that produce the final
results.
placeholder_type_enum: The AttrValue enum for the placeholder data type, or
a list that specifies one value per input node name.
Returns:
An optimized version of the input graph.
"""
ensure_graph_is_valid(input_graph_def)
optimized_graph_def = input_graph_def
optimized_graph_def = strip_unused_lib.strip_unused(optimized_graph_def,
input_node_names,
output_node_names,
placeholder_type_enum)
optimized_graph_def = graph_util.remove_training_nodes(optimized_graph_def)
optimized_graph_def = fold_batch_norms(optimized_graph_def)
optimized_graph_def = fuse_resize_and_conv(optimized_graph_def,
output_node_names)
ensure_graph_is_valid(optimized_graph_def)
return optimized_graph_def
示例3: optimize_for_inference
# 需要导入模块: from tensorflow.python.tools import strip_unused_lib [as 别名]
# 或者: from tensorflow.python.tools.strip_unused_lib import strip_unused [as 别名]
def optimize_for_inference(input_graph_def, input_node_names,
output_node_names, placeholder_type_enum):
"""Applies a series of inference optimizations on the input graph.
Args:
input_graph_def: A GraphDef containing a training model.
input_node_names: A list of names of the nodes that are fed inputs during
inference.
output_node_names: A list of names of the nodes that produce the final
results.
placeholder_type_enum: Data type of the placeholders used for inputs.
Returns:
An optimized version of the input graph.
"""
ensure_graph_is_valid(input_graph_def)
optimized_graph_def = input_graph_def
optimized_graph_def = strip_unused_lib.strip_unused(optimized_graph_def,
input_node_names,
output_node_names,
placeholder_type_enum)
optimized_graph_def = graph_util.remove_training_nodes(optimized_graph_def)
optimized_graph_def = fold_batch_norms(optimized_graph_def)
optimized_graph_def = fuse_resize_and_conv(optimized_graph_def,
output_node_names)
ensure_graph_is_valid(optimized_graph_def)
return optimized_graph_def
示例4: optimize_graph
# 需要导入模块: from tensorflow.python.tools import strip_unused_lib [as 别名]
# 或者: from tensorflow.python.tools.strip_unused_lib import strip_unused [as 别名]
def optimize_graph(input_path, output_path, input_nodes, output_nodes):
graph = tf.Graph()
with tf.Session(graph=graph) as sess:
tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], input_path)
gdef = strip_unused_lib.strip_unused(
input_graph_def=graph.as_graph_def(),
input_node_names=input_nodes,
output_node_names=output_nodes,
placeholder_type_enum=dtypes.float32.as_datatype_enum)
with gfile.GFile(output_path, 'wb') as f:
f.write(gdef.SerializeToString())
return graph
示例5: test_rewrite_nn_resize_op_multiple_path
# 需要导入模块: from tensorflow.python.tools import strip_unused_lib [as 别名]
# 或者: from tensorflow.python.tools.strip_unused_lib import strip_unused [as 别名]
def test_rewrite_nn_resize_op_multiple_path(self):
g = tf.Graph()
with g.as_default():
with tf.name_scope('nearest_upsampling'):
x = array_ops.placeholder(dtypes.float32, shape=(8, 10, 10, 8))
x_stack = tf.stack([tf.stack([x] * 2, axis=3)] * 2, axis=2)
x_reshape = tf.reshape(x_stack, [8, 20, 20, 8])
with tf.name_scope('nearest_upsampling'):
x_2 = array_ops.placeholder(dtypes.float32, shape=(8, 10, 10, 8))
x_stack_2 = tf.stack([tf.stack([x_2] * 2, axis=3)] * 2, axis=2)
x_reshape_2 = tf.reshape(x_stack_2, [8, 20, 20, 8])
t = x_reshape + x_reshape_2
exporter.rewrite_nn_resize_op()
graph_def = g.as_graph_def()
graph_def = strip_unused_lib.strip_unused(
graph_def,
input_node_names=[
'nearest_upsampling/Placeholder', 'nearest_upsampling_1/Placeholder'
],
output_node_names=['add'],
placeholder_type_enum=dtypes.float32.as_datatype_enum)
counter_resize_op = 0
t_input_ops = [op.name for op in t.op.inputs]
for node in graph_def.node:
# Make sure Stacks are replaced.
self.assertNotEqual(node.op, 'Pack')
if node.op == 'ResizeNearestNeighbor':
counter_resize_op += 1
self.assertIn(six.ensure_str(node.name) + ':0', t_input_ops)
self.assertEqual(counter_resize_op, 2)
示例6: optimize_for_inference
# 需要导入模块: from tensorflow.python.tools import strip_unused_lib [as 别名]
# 或者: from tensorflow.python.tools.strip_unused_lib import strip_unused [as 别名]
def optimize_for_inference(input_graph_def, input_node_names, output_node_names,
placeholder_type_enum):
"""Applies a series of inference optimizations on the input graph.
Args:
input_graph_def: A GraphDef containing a training model.
input_node_names: A list of names of the nodes that are fed inputs during
inference.
output_node_names: A list of names of the nodes that produce the final
results.
placeholder_type_enum: The AttrValue enum for the placeholder data type, or
a list that specifies one value per input node name.
Returns:
An optimized version of the input graph.
"""
ensure_graph_is_valid(input_graph_def)
optimized_graph_def = input_graph_def
optimized_graph_def = strip_unused_lib.strip_unused(
optimized_graph_def, input_node_names, output_node_names,
placeholder_type_enum)
optimized_graph_def = graph_util.remove_training_nodes(
optimized_graph_def, output_node_names)
optimized_graph_def = fold_batch_norms(optimized_graph_def)
optimized_graph_def = fuse_resize_and_conv(optimized_graph_def,
output_node_names)
ensure_graph_is_valid(optimized_graph_def)
return optimized_graph_def
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:30,代码来源:optimize_for_inference_lib.py