本文整理汇总了Python中tensorrt.Builder方法的典型用法代码示例。如果您正苦于以下问题:Python tensorrt.Builder方法的具体用法?Python tensorrt.Builder怎么用?Python tensorrt.Builder使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorrt
的用法示例。
在下文中一共展示了tensorrt.Builder方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: import tensorrt [as 别名]
# 或者: from tensorrt import Builder [as 别名]
def main():
"Parse command line and feed the conversion function"
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('tf_model_filename', type=str, help='TensorFlow model file')
arg_parser.add_argument('trt_model_filename', type=str, help='TensorRT model file')
arg_parser.add_argument('--model_data_layout', type=str, default='NHWC', help='Model data layout (NHWC or NCHW)')
arg_parser.add_argument('--input_layer_name', type=str, default='input', help='Input layer name')
arg_parser.add_argument('--input_height', type=int, default=224, help='Input height')
arg_parser.add_argument('--input_width', type=int, default=224, help='Input width')
arg_parser.add_argument('--output_layer_name', type=str, default='MobilenetV1/Predictions/Reshape_1', help='Output layer name')
arg_parser.add_argument('--output_data_type', type=str, default='fp32', help='Model data type')
arg_parser.add_argument('--max_workspace_size', type=int, default=(1<<30), help='Builder workspace size')
arg_parser.add_argument('--max_batch_size', type=int, default=1, help='Builder batch size')
args = arg_parser.parse_args()
convert_tf_model_to_trt( args.tf_model_filename, args.trt_model_filename,
args.model_data_layout, args.input_layer_name, args.input_height, args.input_width,
args.output_layer_name, args.output_data_type, args.max_workspace_size, args.max_batch_size )
示例2: main
# 需要导入模块: import tensorrt [as 别名]
# 或者: from tensorrt import Builder [as 别名]
def main():
"Parse command line and feed the conversion function"
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('onnx_model_file', type=str, help='Onnx model file')
arg_parser.add_argument('trt_model_filename', type=str, help='TensorRT model file')
arg_parser.add_argument('--input_tensor_name', type=str, default='input_tensor:0', help='Input tensor type')
arg_parser.add_argument('--output_tensor_name', type=str, default='prob', help='Output tensor type')
arg_parser.add_argument('--output_data_type', type=str, default='fp32', help='Model data type')
arg_parser.add_argument('--max_workspace_size', type=int, default=(1<<30), help='Builder workspace size')
arg_parser.add_argument('--max_batch_size', type=int, default=1, help='Builder batch size')
args = arg_parser.parse_args()
convert_onnx_model_to_trt( args.onnx_model_file, args.trt_model_filename,
args.input_tensor_name, args.output_tensor_name,
args.output_data_type, args.max_workspace_size, args.max_batch_size )
示例3: build_engine
# 需要导入模块: import tensorrt [as 别名]
# 或者: from tensorrt import Builder [as 别名]
def build_engine(onnx, verbose=False):
"""Build TensorRT engine from the ONNX model."""
TRT_LOGGER = trt.Logger(trt.Logger.VERBOSE) if verbose else trt.Logger()
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(*EXPLICIT_BATCH) as network, trt.OnnxParser(network, TRT_LOGGER) as parser:
builder.max_workspace_size = 1 << 30 # 1GB
builder.max_batch_size = MAX_BATCH
builder.fp16_mode = FP16_MODE
with open(onnx, 'rb') as model:
if not parser.parse(model.read()):
print('ERROR: Failed to parse the ONNX file.')
for error in range(parser.num_errors):
print(parser.get_error(error))
return None
if trt.__version__[0] >= '7':
# set input to batch size 1
shape = list(network.get_input(0).shape)
shape[0] = 1
network.get_input(0).shape = shape
return builder.build_cuda_engine(network)
示例4: build_engine
# 需要导入模块: import tensorrt [as 别名]
# 或者: from tensorrt import Builder [as 别名]
def build_engine(deploy_file, model_file):
with trt.Builder(TRT_LOGGER) as builder, builder.create_network() as network, trt.CaffeParser() as parser:
builder.max_workspace_size = common.GiB(1)
# Set the parser's plugin factory. Note that we bind the factory to a reference so
# that we can destroy it later. (parser.plugin_factory_ext is a write-only attribute)
parser.plugin_factory_ext = fc_factory
# Parse the model and build the engine.
model_tensors = parser.parse(deploy=deploy_file, model=model_file, network=network, dtype=ModelData.DTYPE)
network.mark_output(model_tensors.find(ModelData.OUTPUT_NAME))
return builder.build_cuda_engine(network)
# Loads a test case into the provided pagelocked_buffer.
示例5: build_engine
# 需要导入模块: import tensorrt [as 别名]
# 或者: from tensorrt import Builder [as 别名]
def build_engine(model_file):
# For more information on TRT basics, refer to the introductory samples.
with trt.Builder(TRT_LOGGER) as builder, builder.create_network() as network, trt.UffParser() as parser:
builder.max_workspace_size = common.GiB(1)
# Parse the Uff Network
parser.register_input(ModelData.INPUT_NAME, ModelData.INPUT_SHAPE)
parser.register_output(ModelData.OUTPUT_NAME)
parser.parse(model_file, network)
# Build and return an engine.
return builder.build_cuda_engine(network)
# Loads a test case into the provided pagelocked_buffer.
示例6: build_engine
# 需要导入模块: import tensorrt [as 别名]
# 或者: from tensorrt import Builder [as 别名]
def build_engine(uff_model_path, trt_logger, trt_engine_datatype=trt.DataType.FLOAT, batch_size=1, silent=False):
with trt.Builder(trt_logger) as builder, builder.create_network() as network, trt.UffParser() as parser:
builder.max_workspace_size = 1 << 30
if trt_engine_datatype == trt.DataType.HALF:
builder.fp16_mode = True
builder.max_batch_size = batch_size
parser.register_input(ModelData.INPUT_NAME, ModelData.INPUT_SHAPE)
parser.register_output("MarkOutput_0")
parser.parse(uff_model_path, network)
if not silent:
print("Building TensorRT engine. This may take few minutes.")
return builder.build_cuda_engine(network)
示例7: build_engine_onnx
# 需要导入模块: import tensorrt [as 别名]
# 或者: from tensorrt import Builder [as 别名]
def build_engine_onnx(model_file):
with trt.Builder(TRT_LOGGER) as builder, builder.create_network() as network, trt.OnnxParser(network, TRT_LOGGER) as parser:
builder.max_workspace_size = common.GiB(1)
# Load the Onnx model and parse it in order to populate the TensorRT network.
with open(model_file, 'rb') as model:
parser.parse(model.read())
return builder.build_cuda_engine(network)
示例8: build_engine_uff
# 需要导入模块: import tensorrt [as 别名]
# 或者: from tensorrt import Builder [as 别名]
def build_engine_uff(model_file):
# You can set the logger severity higher to suppress messages (or lower to display more messages).
with trt.Builder(TRT_LOGGER) as builder, builder.create_network() as network, trt.UffParser() as parser:
# Workspace size is the maximum amount of memory available to the builder while building an engine.
# It should generally be set as high as possible.
builder.max_workspace_size = common.GiB(1)
# We need to manually register the input and output nodes for UFF.
parser.register_input(ModelData.INPUT_NAME, ModelData.INPUT_SHAPE)
parser.register_output(ModelData.OUTPUT_NAME)
# Load the UFF model and parse it in order to populate the TensorRT network.
parser.parse(model_file, network)
# Build and return an engine.
return builder.build_cuda_engine(network)
示例9: get_engine
# 需要导入模块: import tensorrt [as 别名]
# 或者: from tensorrt import Builder [as 别名]
def get_engine(onnx_file_path, engine_file_path=""):
"""Attempts to load a serialized engine if available, otherwise builds a new TensorRT engine and saves it."""
def build_engine():
"""Takes an ONNX file and creates a TensorRT engine to run inference with"""
with trt.Builder(TRT_LOGGER) as builder, builder.create_network() as network, trt.OnnxParser(network, TRT_LOGGER) as parser:
builder.max_workspace_size = 1 << 30 # 1GB
builder.max_batch_size = 1
# Parse model file
if not os.path.exists(onnx_file_path):
print('ONNX file {} not found, please run yolov3_to_onnx.py first to generate it.'.format(onnx_file_path))
exit(0)
print('Loading ONNX file from path {}...'.format(onnx_file_path))
with open(onnx_file_path, 'rb') as model:
print('Beginning ONNX file parsing')
parser.parse(model.read())
print('Completed parsing of ONNX file')
print('Building an engine from file {}; this may take a while...'.format(onnx_file_path))
engine = builder.build_cuda_engine(network)
print("Completed creating Engine")
with open(engine_file_path, "wb") as f:
f.write(engine.serialize())
return engine
if os.path.exists(engine_file_path):
# If a serialized engine exists, use it instead of building an engine.
print("Reading engine from file {}".format(engine_file_path))
with open(engine_file_path, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime:
return runtime.deserialize_cuda_engine(f.read())
else:
return build_engine()
示例10: build_engine
# 需要导入模块: import tensorrt [as 别名]
# 或者: from tensorrt import Builder [as 别名]
def build_engine(weights):
# For more information on TRT basics, refer to the introductory samples.
with trt.Builder(TRT_LOGGER) as builder, builder.create_network() as network:
builder.max_workspace_size = common.GiB(1)
# Populate the network using weights from the PyTorch model.
populate_network(network, weights)
# Build and return an engine.
return builder.build_cuda_engine(network)
# Loads a random test case from pytorch's DataLoader
示例11: build_engine
# 需要导入模块: import tensorrt [as 别名]
# 或者: from tensorrt import Builder [as 别名]
def build_engine(model_path):
with trt.Builder(TRT_LOGGER) as builder, builder.create_network() as network, trt.UffParser() as parser:
builder.max_workspace_size = common.GiB(1)
uff_path = model_to_uff(model_path)
parser.register_input(ModelData.INPUT_NAME, ModelData.INPUT_SHAPE)
parser.register_output(ModelData.OUTPUT_NAME)
parser.parse(uff_path, network)
return builder.build_cuda_engine(network)
# Loads a test case into the provided pagelocked_buffer. Returns loaded test case label.
示例12: build_engine
# 需要导入模块: import tensorrt [as 别名]
# 或者: from tensorrt import Builder [as 别名]
def build_engine(deploy_file, model_file):
with trt.Builder(TRT_LOGGER) as builder, builder.create_network() as network, trt.CaffeParser() as parser:
builder.max_workspace_size = common.GiB(1)
# Set the parser's plugin factory. Note that we bind the factory to a reference so
# that we can destroy it later. (parser.plugin_factory_ext is a write-only attribute)
parser.plugin_factory_ext = fc_factory
# Parse the model and build the engine.
model_tensors = parser.parse(deploy=deploy_file, model=model_file, network=network, dtype=ModelData.DTYPE)
network.mark_output(model_tensors.find(ModelData.OUTPUT_NAME))
return builder.build_cuda_engine(network)
# Tries to load an engine from the provided engine_path, or builds and saves an engine to the engine_path.
示例13: build_engine_caffe
# 需要导入模块: import tensorrt [as 别名]
# 或者: from tensorrt import Builder [as 别名]
def build_engine_caffe(model_file, deploy_file):
# You can set the logger severity higher to suppress messages (or lower to display more messages).
with trt.Builder(TRT_LOGGER) as builder, builder.create_network() as network, trt.CaffeParser() as parser:
# Workspace size is the maximum amount of memory available to the builder while building an engine.
# It should generally be set as high as possible.
builder.max_workspace_size = common.GiB(1)
# Load the Caffe model and parse it in order to populate the TensorRT network.
# This function returns an object that we can query to find tensors by name.
model_tensors = parser.parse(deploy=deploy_file, model=model_file, network=network, dtype=ModelData.DTYPE)
# For Caffe, we need to manually mark the output of the network.
# Since we know the name of the output tensor, we can find it in model_tensors.
network.mark_output(model_tensors.find(ModelData.OUTPUT_NAME))
return builder.build_cuda_engine(network)
示例14: get_engine
# 需要导入模块: import tensorrt [as 别名]
# 或者: from tensorrt import Builder [as 别名]
def get_engine(onnx_file_path, engine_file_path=""):
"""Attempts to load a serialized engine if available, otherwise builds a new TensorRT engine and saves it."""
def build_engine():
"""Takes an ONNX file and creates a TensorRT engine to run inference with"""
with trt.Builder(TRT_LOGGER) as builder, builder.create_network() as network, trt.OnnxParser(network, TRT_LOGGER) as parser:
builder.max_workspace_size = 1 << 28 # 256MiB
builder.max_batch_size = 1
# Parse model file
if not os.path.exists(onnx_file_path):
print('ONNX file {} not found, please run yolov3_to_onnx.py first to generate it.'.format(onnx_file_path))
exit(0)
print('Loading ONNX file from path {}...'.format(onnx_file_path))
with open(onnx_file_path, 'rb') as model:
print('Beginning ONNX file parsing')
parser.parse(model.read())
print('Completed parsing of ONNX file')
print('Building an engine from file {}; this may take a while...'.format(onnx_file_path))
engine = builder.build_cuda_engine(network)
print("Completed creating Engine")
with open(engine_file_path, "wb") as f:
f.write(engine.serialize())
return engine
if os.path.exists(engine_file_path):
# If a serialized engine exists, use it instead of building an engine.
print("Reading engine from file {}".format(engine_file_path))
with open(engine_file_path, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime:
return runtime.deserialize_cuda_engine(f.read())
else:
return build_engine()
示例15: build_engine
# 需要导入模块: import tensorrt [as 别名]
# 或者: from tensorrt import Builder [as 别名]
def build_engine(weights):
# For more information on TRT basics, refer to the introductory samples.
with trt.Builder(TRT_LOGGER) as builder, builder.create_network() as network:
builder.max_workspace_size = common.GiB(1)
# Set the refit flag in the builder
builder.refittable = True;
# Populate the network using weights from the PyTorch model.
populate_network(network, weights)
# Build and return an engine.
return builder.build_cuda_engine(network)
# Loads a random test case from pytorch's DataLoader