本文整理汇总了Python中tensorrt.Logger方法的典型用法代码示例。如果您正苦于以下问题:Python tensorrt.Logger方法的具体用法?Python tensorrt.Logger怎么用?Python tensorrt.Logger使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorrt
的用法示例。
在下文中一共展示了tensorrt.Logger方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: import tensorrt [as 别名]
# 或者: from tensorrt import Logger [as 别名]
def __init__(self, engine_path, input_names=None, output_names=None, final_shapes=None):
# load engine
self.logger = trt.Logger()
self.runtime = trt.Runtime(self.logger)
with open(engine_path, 'rb') as f:
self.engine = self.runtime.deserialize_cuda_engine(f.read())
self.context = self.engine.create_execution_context()
if input_names is None:
self.input_names = self._trt_input_names()
else:
self.input_names = input_names
if output_names is None:
self.output_names = self._trt_output_names()
else:
self.output_names = output_names
self.final_shapes = final_shapes
示例2: __init__
# 需要导入模块: import tensorrt [as 别名]
# 或者: from tensorrt import Logger [as 别名]
def __init__(self, model, input_shape, output_layout=7):
"""Initialize TensorRT plugins, engine and conetxt."""
self.model = model
self.input_shape = input_shape
self.output_layout = output_layout
self.trt_logger = trt.Logger(trt.Logger.INFO)
self._load_plugins()
self.engine = self._load_engine()
self.host_inputs = []
self.cuda_inputs = []
self.host_outputs = []
self.cuda_outputs = []
self.bindings = []
self.stream = cuda.Stream()
self.context = self._create_context()
示例3: __init__
# 需要导入模块: import tensorrt [as 别名]
# 或者: from tensorrt import Logger [as 别名]
def __init__(self,
max_batchsize,
workspace,
dtype=trt.float32,
builder_config_fn=None,
net_post_fn=None,
input_names=None,
verbose=False):
super().__init__()
self.max_batchsize = max_batchsize
self.workspace = workspace
self.logger = trt.Logger(trt.Logger.WARNING)
self.built = False
self.graph_pth = None
self.refit_weight_dict = {}
self.engine = None
self.ctx = None
self.output_shapes = None
self.output_names = None
self.need_refit = False
self.verbose = verbose
self.builder_config_fn = builder_config_fn
self.input_names = input_names
self.net_post_fn = net_post_fn
示例4: build_engine
# 需要导入模块: import tensorrt [as 别名]
# 或者: from tensorrt import Logger [as 别名]
def build_engine(onnx, verbose=False):
"""Build TensorRT engine from the ONNX model."""
TRT_LOGGER = trt.Logger(trt.Logger.VERBOSE) if verbose else trt.Logger()
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(*EXPLICIT_BATCH) as network, trt.OnnxParser(network, TRT_LOGGER) as parser:
builder.max_workspace_size = 1 << 30 # 1GB
builder.max_batch_size = MAX_BATCH
builder.fp16_mode = FP16_MODE
with open(onnx, 'rb') as model:
if not parser.parse(model.read()):
print('ERROR: Failed to parse the ONNX file.')
for error in range(parser.num_errors):
print(parser.get_error(error))
return None
if trt.__version__[0] >= '7':
# set input to batch size 1
shape = list(network.get_input(0).shape)
shape[0] = 1
network.get_input(0).shape = shape
return builder.build_cuda_engine(network)
示例5: _load_from_state_dict
# 需要导入模块: import tensorrt [as 别名]
# 或者: from tensorrt import Logger [as 别名]
def _load_from_state_dict(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
engine_bytes = state_dict[prefix + "engine"]
with trt.Logger() as logger, trt.Runtime(logger) as runtime:
self.engine = runtime.deserialize_cuda_engine(engine_bytes)
self.context = self.engine.create_execution_context()
self.input_names = state_dict[prefix + "input_names"]
self.output_names = state_dict[prefix + "output_names"]
示例6: __init__
# 需要导入模块: import tensorrt [as 别名]
# 或者: from tensorrt import Logger [as 别名]
def __init__(self):
self.colors = np.random.uniform(0, 255, size=(100, 3))
self.input_shape = INPUT_HW
self.trt_logger = trt.Logger(trt.Logger.INFO)
self._load_plugins()
self.engine = self._load_engine()
self.host_inputs = []
self.cuda_inputs = []
self.host_outputs = []
self.cuda_outputs = []
self.bindings = []
self.stream = cuda.Stream()
self.context = self._create_context()
示例7: main
# 需要导入模块: import tensorrt [as 别名]
# 或者: from tensorrt import Logger [as 别名]
def main():
parser = argparse.ArgumentParser()
parser.add_argument('model', type=str, choices=list(MODEL_SPECS.keys()))
args = parser.parse_args()
# initialize
if trt.__version__[0] < '7':
ctypes.CDLL(LIB_FILE)
TRT_LOGGER = trt.Logger(trt.Logger.INFO)
trt.init_libnvinfer_plugins(TRT_LOGGER, '')
# compile the model into TensorRT engine
model = args.model
spec = MODEL_SPECS[model]
dynamic_graph = add_plugin(
gs.DynamicGraph(spec['input_pb']),
model,
spec)
_ = uff.from_tensorflow(
dynamic_graph.as_graph_def(),
output_nodes=['NMS'],
output_filename=spec['tmp_uff'],
text=True,
debug_mode=DEBUG_UFF)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network() as network, trt.UffParser() as parser:
builder.max_workspace_size = 1 << 28
builder.max_batch_size = 1
builder.fp16_mode = True
parser.register_input('Input', INPUT_DIMS)
parser.register_output('MarkOutput_0')
parser.parse(spec['tmp_uff'], network)
engine = builder.build_cuda_engine(network)
buf = engine.serialize()
with open(spec['output_bin'], 'wb') as f:
f.write(buf)
示例8: build_engine
# 需要导入模块: import tensorrt [as 别名]
# 或者: from tensorrt import Logger [as 别名]
def build_engine(onnx_file_path, engine_file_path, verbose=False):
"""Takes an ONNX file and creates a TensorRT engine."""
TRT_LOGGER = trt.Logger(trt.Logger.VERBOSE) if verbose else trt.Logger()
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(*EXPLICIT_BATCH) as network, trt.OnnxParser(network, TRT_LOGGER) as parser:
builder.max_workspace_size = 1 << 28
builder.max_batch_size = 1
builder.fp16_mode = True
#builder.strict_type_constraints = True
# Parse model file
print('Loading ONNX file from path {}...'.format(onnx_file_path))
with open(onnx_file_path, 'rb') as model:
print('Beginning ONNX file parsing')
if not parser.parse(model.read()):
print('ERROR: Failed to parse the ONNX file.')
for error in range(parser.num_errors):
print(parser.get_error(error))
return None
if trt.__version__[0] >= '7':
# The actual yolov3.onnx is generated with batch size 64.
# Reshape input to batch size 1
shape = list(network.get_input(0).shape)
shape[0] = 1
network.get_input(0).shape = shape
print('Completed parsing of ONNX file')
print('Building an engine; this may take a while...')
engine = builder.build_cuda_engine(network)
print('Completed creating engine')
with open(engine_file_path, 'wb') as f:
f.write(engine.serialize())
return engine
示例9: convert_caffe_model_to_trt
# 需要导入模块: import tensorrt [as 别名]
# 或者: from tensorrt import Logger [as 别名]
def convert_caffe_model_to_trt(caffe_weights_file, caffe_deploy_file, trt_model_filename,
output_tensor_name, output_data_type, max_workspace_size, max_batch_size):
"Convert a pair of (caffe_weights_file,caffe_deploy_file) into a trt_model_file using the given parameters"
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network() as network, trt.CaffeParser() as parser:
if (output_data_type=='fp16'):
if not builder.platform_has_fast_fp16:
print('Warning: This platform is not optimized for fast fp16 mode')
builder.fp16_mode = True
print('Converting into fp16, max_batch_size={}'.format(max_batch_size))
else:
print('Converting into fp32 (default), max_batch_size={}'.format(max_batch_size))
builder.max_workspace_size = max_workspace_size
builder.max_batch_size = max_batch_size
model_tensors = parser.parse(deploy=caffe_deploy_file, model=caffe_weights_file, network=network, dtype=trt.float32)
network.mark_output(model_tensors.find(output_tensor_name))
trt_model_object = builder.build_cuda_engine(network)
try:
serialized_trt_model = trt_model_object.serialize()
with open(trt_model_filename, "wb") as trt_model_file:
trt_model_file.write(serialized_trt_model)
except:
print('Error: cannot serialize or write TensorRT engine to file {}.'.format(trt_model_filename))
示例10: set_trt_logging_level
# 需要导入模块: import tensorrt [as 别名]
# 或者: from tensorrt import Logger [as 别名]
def set_trt_logging_level(sev):
global TRT_LOGGER
if sev == logging.DEBUG:
logging.min_severity = trt.Logger.INFO
elif sev == logging.WARNING:
logging.min_severity = trt.Logger.WARNING
elif sev == logging.ERROR:
logging.min_severity = trt.Logger.ERROR
elif sev == logging.CRITICAL:
logging.min_severity = trt.Logger.INTERNAL_ERROR
示例11: infer_with_trt
# 需要导入模块: import tensorrt [as 别名]
# 或者: from tensorrt import Logger [as 别名]
def infer_with_trt(img, model):
"""Inference the image with TensorRT engine."""
import pycuda.autoinit
import pycuda.driver as cuda
import tensorrt as trt
TRT_LOGGER = trt.Logger(trt.Logger.INFO)
with open(model, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime:
engine = runtime.deserialize_cuda_engine(f.read())
assert len(engine) == 2, 'ERROR: bad number of bindings'
host_input, cuda_input, host_output, cuda_output = init_trt_buffers(
cuda, trt, engine)
stream = cuda.Stream()
context = engine.create_execution_context()
context.set_binding_shape(0, (1, 224, 224, 3))
np.copyto(host_input, img.ravel())
cuda.memcpy_htod_async(cuda_input, host_input, stream)
if trt.__version__[0] >= '7':
context.execute_async_v2(bindings=[int(cuda_input), int(cuda_output)],
stream_handle=stream.handle)
else:
context.execute_async(bindings=[int(cuda_input), int(cuda_output)],
stream_handle=stream.handle)
cuda.memcpy_dtoh_async(host_output, cuda_output, stream)
stream.synchronize()
return host_output
示例12: __init__
# 需要导入模块: import tensorrt [as 别名]
# 或者: from tensorrt import Logger [as 别名]
def __init__(self, model, input_shape, category_num=80):
"""Initialize TensorRT plugins, engine and conetxt."""
self.model = model
self.input_shape = input_shape
h, w = input_shape
# filters count
filters = (category_num + 5) * 3
if 'tiny' in model:
self.output_shapes = [(1, filters, h // 32, w // 32),
(1, filters, h // 16, w // 16)]
else:
self.output_shapes = [(1, filters, h // 32, w // 32),
(1, filters, h // 16, w // 16),
(1, filters, h // 8, w // 8)]
if 'tiny' in model:
postprocessor_args = {
# A list of 2 three-dimensional tuples for the Tiny YOLO masks
'yolo_masks': [(3, 4, 5), (0, 1, 2)],
# A list of 6 two-dimensional tuples for the Tiny YOLO anchors
'yolo_anchors': [(10, 14), (23, 27), (37, 58),
(81, 82), (135, 169), (344, 319)],
# Threshold for non-max suppression algorithm, float
# value between 0 and 1
'nms_threshold': 0.5,
'yolo_input_resolution': input_shape,
'category_num': category_num
}
else:
postprocessor_args = {
# A list of 3 three-dimensional tuples for the YOLO masks
'yolo_masks': [(6, 7, 8), (3, 4, 5), (0, 1, 2)],
# A list of 9 two-dimensional tuples for the YOLO anchors
'yolo_anchors': [(10, 13), (16, 30), (33, 23),
(30, 61), (62, 45), (59, 119),
(116, 90), (156, 198), (373, 326)],
# Threshold for non-max suppression algorithm, float
# value between 0 and 1
# between 0 and 1
'nms_threshold': 0.5,
'yolo_input_resolution': input_shape,
'category_num': category_num
}
self.postprocessor = PostprocessYOLO(**postprocessor_args)
self.trt_logger = trt.Logger(trt.Logger.INFO)
self.engine = self._load_engine()
self.context = self._create_context()
self.inputs, self.outputs, self.bindings, self.stream = \
allocate_buffers(self.engine)
self.inference_fn = do_inference if trt.__version__[0] < '7' \
else do_inference_v2
示例13: convert_tf_model_to_trt
# 需要导入模块: import tensorrt [as 别名]
# 或者: from tensorrt import Logger [as 别名]
def convert_tf_model_to_trt(tf_model_filename, trt_model_filename,
model_data_layout, input_layer_name, input_height, input_width,
output_layer_name, output_data_type, max_workspace_size, max_batch_size):
"Convert an tf_model_filename into a trt_model_filename using the given parameters"
uff_model = uff.from_tensorflow_frozen_model(tf_model_filename)
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network() as network, trt.UffParser() as parser:
if model_data_layout == 'NHWC':
parser.register_input(input_layer_name, [input_height, input_width, 3], trt.UffInputOrder.NHWC)
else:
parser.register_input(input_layer_name, [3, input_height, input_width], trt.UffInputOrder.NCHW)
parser.register_output(output_layer_name)
if not parser.parse_buffer(uff_model, network):
raise RuntimeError("UFF model parsing (originally from {}) failed. Error: {}".format(tf_model_filename, parser.get_error(0).desc()))
if (output_data_type=='fp32'):
print('Converting into fp32 (default), max_batch_size={}'.format(max_batch_size))
else:
if not builder.platform_has_fast_fp16:
print('Warning: This platform is not optimized for fast fp16 mode')
builder.fp16_mode = True
print('Converting into fp16, max_batch_size={}'.format(max_batch_size))
builder.max_workspace_size = max_workspace_size
builder.max_batch_size = max_batch_size
trt_model_object = builder.build_cuda_engine(network)
try:
serialized_trt_model = trt_model_object.serialize()
with open(trt_model_filename, "wb") as trt_model_file:
trt_model_file.write(serialized_trt_model)
except:
raise RuntimeError('Cannot serialize or write TensorRT engine to file {}.'.format(trt_model_filename))
示例14: convert_onnx_model_to_trt
# 需要导入模块: import tensorrt [as 别名]
# 或者: from tensorrt import Logger [as 别名]
def convert_onnx_model_to_trt(onnx_model_filename, trt_model_filename,
input_tensor_name, output_tensor_name,
output_data_type, max_workspace_size, max_batch_size):
"Convert an onnx_model_filename into a trt_model_filename using the given parameters"
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
TRT_VERSION_MAJOR = int(trt.__version__.split('.')[0])
with trt.Builder(TRT_LOGGER) as builder:
if TRT_VERSION_MAJOR >= 7:
flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_PRECISION)) | (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
network = builder.create_network(flag)
else:
network = builder.create_network()
parser = trt.OnnxParser(network, TRT_LOGGER)
if (output_data_type=='fp32'):
print('Converting into fp32 (default), max_batch_size={}'.format(max_batch_size))
builder.fp16_mode = False
else:
if not builder.platform_has_fast_fp16:
print('Warning: This platform is not optimized for fast fp16 mode')
builder.fp16_mode = True
print('Converting into fp16, max_batch_size={}'.format(max_batch_size))
builder.max_workspace_size = max_workspace_size
builder.max_batch_size = max_batch_size
with open(onnx_model_filename, 'rb') as onnx_model_file:
onnx_model = onnx_model_file.read()
if not parser.parse(onnx_model):
raise RuntimeError("Onnx model parsing from {} failed. Error: {}".format(onnx_model_filename, parser.get_error(0).desc()))
if TRT_VERSION_MAJOR >= 7:
# Create an optimization profile (see Section 7.2 of https://docs.nvidia.com/deeplearning/sdk/pdf/TensorRT-Developer-Guide.pdf).
profile = builder.create_optimization_profile()
# FIXME: Hardcoded for ImageNet. The minimum/optimum/maximum dimensions of a dynamic input tensor are the same.
profile.set_shape(input_tensor_name, (1, 3, 224, 224), (max_batch_size, 3, 224, 224), (max_batch_size, 3, 224, 224))
config = builder.create_builder_config()
config.add_optimization_profile(profile)
trt_model_object = builder.build_engine(network, config)
else:
trt_model_object = builder.build_cuda_engine(network)
try:
serialized_trt_model = trt_model_object.serialize()
with open(trt_model_filename, "wb") as trt_model_file:
trt_model_file.write(serialized_trt_model)
except:
raise RuntimeError('Cannot serialize or write TensorRT engine to file {}.'.format(trt_model_filename))
示例15: build_engine
# 需要导入模块: import tensorrt [as 别名]
# 或者: from tensorrt import Logger [as 别名]
def build_engine(
onnx_path,
seq_len=192,
max_seq_len=256,
batch_size=8,
max_batch_size=64,
trt_fp16=True,
verbose=True,
max_workspace_size=None,
encoder=True,
):
"""Builds TRT engine from an ONNX file
Note that network output 1 is unmarked so that the engine will not use
vestigial length calculations associated with masked_fill
"""
TRT_LOGGER = trt.Logger(trt.Logger.VERBOSE) if verbose else trt.Logger(trt.Logger.WARNING)
builder = trt.Builder(TRT_LOGGER)
builder.max_batch_size = max_batch_size
with open(onnx_path, 'rb') as model_fh:
model = model_fh.read()
model_onnx = onnx.load_model_from_string(model)
input_feats = model_onnx.graph.input[0].type.tensor_type.shape.dim[1].dim_value
input_name = model_onnx.graph.input[0].name
if trt_fp16:
builder.fp16_mode = True
print("Optimizing for FP16")
config_flags = 1 << int(trt.BuilderFlag.FP16) # | 1 << int(trt.BuilderFlag.STRICT_TYPES)
else:
config_flags = 0
builder.max_workspace_size = max_workspace_size if max_workspace_size else (4 * 1024 * 1024 * 1024)
config = builder.create_builder_config()
config.flags = config_flags
profile = builder.create_optimization_profile()
profile.set_shape(
input_name,
min=(1, input_feats, seq_len),
opt=(batch_size, input_feats, seq_len),
max=(max_batch_size, input_feats, max_seq_len),
)
config.add_optimization_profile(profile)
explicit_batch = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
network = builder.create_network(explicit_batch)
with trt.OnnxParser(network, TRT_LOGGER) as parser:
parsed = parser.parse(model)
print("Parsing returned ", parsed)
return builder.build_engine(network, config=config)