本文整理汇总了Python中tensorflow.contrib.tensorrt.calib_graph_to_infer_graph方法的典型用法代码示例。如果您正苦于以下问题:Python tensorrt.calib_graph_to_infer_graph方法的具体用法?Python tensorrt.calib_graph_to_infer_graph怎么用?Python tensorrt.calib_graph_to_infer_graph使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.contrib.tensorrt
的用法示例。
在下文中一共展示了tensorrt.calib_graph_to_infer_graph方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_trt_graph_from_calib
# 需要导入模块: from tensorflow.contrib import tensorrt [as 别名]
# 或者: from tensorflow.contrib.tensorrt import calib_graph_to_infer_graph [as 别名]
def get_trt_graph_from_calib(graph_name, calib_graph_def, output_dir):
"""Convert a TensorRT graph used for calibration to an inference graph."""
trt_graph = trt.calib_graph_to_infer_graph(calib_graph_def)
write_graph_to_file(graph_name, trt_graph, output_dir)
return trt_graph
################################################################################
# Run the graph in various precision modes.
################################################################################
示例2: load_model
# 需要导入模块: from tensorflow.contrib import tensorrt [as 别名]
# 或者: from tensorflow.contrib.tensorrt import calib_graph_to_infer_graph [as 别名]
def load_model(model, input_map=None):
# Check if the model is a model directory (containing a metagraph and a checkpoint file)
# or if it is a protobuf file with a frozen graph
model_exp = os.path.expanduser(model)
if (os.path.isfile(model_exp)):
print('Model filename: %s' % model_exp)
with gfile.FastGFile(model_exp,'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
#JJia TensorRT enable
print('TensorRT Enabled')
trt_graph = trt.create_inference_graph(input_graph_def=graph_def,
outputs=['embeddings:0'],
max_batch_size = 1,
max_workspace_size_bytes= 500000000, # 500MB mem assgined to TRT
precision_mode="FP16", # Precision "FP32","FP16" or "INT8"
minimum_segment_size=1
)
##trt_graph=trt.calib_graph_to_infer_graph(trt_graph)
#tf.import_graph_def(trt_graph, input_map=input_map, name='')
return trt_graph #"return graph_def" for trt disable, "return trt_graph" for trt enable
else:
print('Model directory: %s' % model_exp)
meta_file, ckpt_file = get_model_filenames(model_exp)
print('Metagraph file: %s' % meta_file)
print('Checkpoint file: %s' % ckpt_file)
saver = tf.train.import_meta_graph(os.path.join(model_exp, meta_file), input_map=input_map)
saver.restore(tf.get_default_session(), os.path.join(model_exp, ckpt_file))
#JJia TensorRT enable
print('TensorRT Enabled', 1<<20)
frozen_graph = tf.graph_util.convert_variables_to_constants(
tf.get_default_session(),
tf.get_default_graph().as_graph_def(),
output_node_names=["embeddings"])
for node in frozen_graph.node:
if node.op == 'RefSwitch':
node.op = 'Switch'
elif node.op == 'AssignSub':
node.op = 'Sub'
if 'use_locking' in node.attr: del node.attr['use_locking']
trt_graph = trt.create_inference_graph(
input_graph_def=frozen_graph,
outputs=["embeddings"],
max_batch_size = 1,
max_workspace_size_bytes= 1 << 20,
precision_mode="FP16",
minimum_segment_size=1)
#tf.import_graph_def(trt_graph,return_elements=["embeddings:0"])
return trt_graph #"return frozen_graph" for trt disable, "return trt_graph" for trt enable