本文整理汇总了Python中onnx.save方法的典型用法代码示例。如果您正苦于以下问题:Python onnx.save方法的具体用法?Python onnx.save怎么用?Python onnx.save使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类onnx
的用法示例。
在下文中一共展示了onnx.save方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: rewrite_onnx_file
# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import save [as 别名]
def rewrite_onnx_file(model_filename, out_filename, new_input_types):
xmodel = onnx.load(model_filename)
xmodel = rewrite_onnx_model(xmodel, new_input_types)
onnx.save(xmodel, out_filename)
return xmodel
示例2: main
# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import save [as 别名]
def main():
args = parser.parse_args()
onnx_model = onnx.load(args.model)
num_original_nodes, original_graph_str = traverse_graph(onnx_model.graph)
# Optimizer passes to perform
passes = [
#'eliminate_deadend',
'eliminate_identity',
'eliminate_nop_dropout',
'eliminate_nop_pad',
'eliminate_nop_transpose',
'eliminate_unused_initializer',
'extract_constant_to_initializer',
'fuse_add_bias_into_conv',
'fuse_bn_into_conv',
'fuse_consecutive_concats',
'fuse_consecutive_reduce_unsqueeze',
'fuse_consecutive_squeezes',
'fuse_consecutive_transposes',
#'fuse_matmul_add_bias_into_gemm',
'fuse_pad_into_conv',
#'fuse_transpose_into_gemm',
#'lift_lexical_references',
]
# Apply the optimization on the original serialized model
optimized_model = optimizer.optimize(onnx_model, passes)
num_optimized_nodes, optimzied_graph_str = traverse_graph(optimized_model.graph)
print('==> The model after optimization:\n{}\n'.format(optimzied_graph_str))
print('==> The optimized model has {} nodes, the original had {}.'.format(num_optimized_nodes, num_original_nodes))
# Save the ONNX model
onnx.save(optimized_model, args.output)
示例3: save_model
# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import save [as 别名]
def save_model(MainModel, network_filepath, weight_filepath, dump_filepath):
model = MainModel.KitModel(weight_filepath)
onnx.save(model, dump_filepath)
print('ONNX model file is saved as [{}], generated by [{}.py] and [{}].'.format(
dump_filepath, network_filepath, weight_filepath))
示例4: save
# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import save [as 别名]
def save(self, filename):
"""Saves the wrapper ONNX ModelProto into a file with given name."""
onnx.save(self._model_proto, filename)
示例5: graph_to_onnx
# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import save [as 别名]
def graph_to_onnx(graph, onnx_model_path):
import onnx
# to do in the future using onnx ir
onnx_out = graph.produce_onnx_model()
onnx.save(onnx_out, onnx_model_path)
return onnx_out
示例6: main
# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import save [as 别名]
def main():
args = parse_args()
if not args.out.endswith('.onnx'):
raise ValueError('The output file must be a onnx file.')
if len(args.shape) == 1:
input_shape = (3, args.shape[0], args.shape[0])
elif len(args.shape) == 2:
input_shape = (3, ) + tuple(args.shape)
else:
raise ValueError('invalid input shape')
cfg = mmcv.Config.fromfile(args.config)
cfg.model.pretrained = None
# build the model and load checkpoint
model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
load_checkpoint(model, args.checkpoint, map_location='cpu')
# Only support CPU mode for now
model.cpu().eval()
# Customized ops are not supported, use torchvision ops instead.
for m in model.modules():
if isinstance(m, (RoIPool, RoIAlign)):
# set use_torchvision on-the-fly
m.use_torchvision = True
# TODO: a better way to override forward function
if hasattr(model, 'forward_dummy'):
model.forward = model.forward_dummy
else:
raise NotImplementedError(
'ONNX conversion is currently not currently supported with '
f'{model.__class__.__name__}')
input_data = torch.empty((1, *input_shape),
dtype=next(model.parameters()).dtype,
device=next(model.parameters()).device)
onnx_model = export_onnx_model(model, (input_data, ), args.passes)
# Print a human readable representation of the graph
onnx.helper.printable_graph(onnx_model.graph)
print(f'saving model in {args.out}')
onnx.save(onnx_model, args.out)
示例7: main
# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import save [as 别名]
def main():
"""Run the DarkNet-to-ONNX conversion for YOLOv3-608."""
# Have to use python 2 due to hashlib compatibility
if sys.version_info[0] > 2:
raise Exception("This is script is only compatible with python2, please re-run this script \
with python2. The rest of this sample can be run with either version of python")
# Download the config for YOLOv3 if not present yet, and analyze the checksum:
cfg_file_path = download_file(
'yolov3.cfg',
'https://raw.githubusercontent.com/pjreddie/darknet/f86901f6177dfc6116360a13cc06ab680e0c86b0/cfg/yolov3.cfg',
'b969a43a848bbf26901643b833cfb96c')
# These are the only layers DarkNetParser will extract parameters from. The three layers of
# type 'yolo' are not parsed in detail because they are included in the post-processing later:
supported_layers = ['net', 'convolutional', 'shortcut',
'route', 'upsample']
# Create a DarkNetParser object, and the use it to generate an OrderedDict with all
# layer's configs from the cfg file:
parser = DarkNetParser(supported_layers)
layer_configs = parser.parse_cfg_file(cfg_file_path)
# We do not need the parser anymore after we got layer_configs:
del parser
# In above layer_config, there are three outputs that we need to know the output
# shape of (in CHW format):
output_tensor_dims = OrderedDict()
output_tensor_dims['082_convolutional'] = [255, 19, 19]
output_tensor_dims['094_convolutional'] = [255, 38, 38]
output_tensor_dims['106_convolutional'] = [255, 76, 76]
# Create a GraphBuilderONNX object with the known output tensor dimensions:
builder = GraphBuilderONNX(output_tensor_dims)
# We want to populate our network with weights later, that's why we download those from
# the official mirror (and verify the checksum):
weights_file_path = download_file(
'yolov3.weights',
'https://pjreddie.com/media/files/yolov3.weights',
'c84e5b99d0e52cd466ae710cadf6d84c')
# Now generate an ONNX graph with weights from the previously parsed layer configurations
# and the weights file:
yolov3_model_def = builder.build_onnx_graph(
layer_configs=layer_configs,
weights_file_path=weights_file_path,
verbose=True)
# Once we have the model definition, we do not need the builder anymore:
del builder
# Perform a sanity check on the ONNX model definition:
onnx.checker.check_model(yolov3_model_def)
# Serialize the generated ONNX graph to this file:
output_file_path = 'yolov3.onnx'
onnx.save(yolov3_model_def, output_file_path)
示例8: main
# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import save [as 别名]
def main():
"""Run the DarkNet-to-ONNX conversion for YOLOv3-608."""
# Have to use python 2 due to hashlib compatibility
if sys.version_info[0] > 2:
raise Exception("This script is only compatible with python2, please re-run this script with python2. The rest of this sample can be run with either version of python.")
# Download the config for YOLOv3 if not present yet, and analyze the checksum:
cfg_file_path = download_file(
'yolov3.cfg',
'https://raw.githubusercontent.com/pjreddie/darknet/f86901f6177dfc6116360a13cc06ab680e0c86b0/cfg/yolov3.cfg',
'b969a43a848bbf26901643b833cfb96c')
# These are the only layers DarkNetParser will extract parameters from. The three layers of
# type 'yolo' are not parsed in detail because they are included in the post-processing later:
supported_layers = ['net', 'convolutional', 'shortcut',
'route', 'upsample']
# Create a DarkNetParser object, and the use it to generate an OrderedDict with all
# layer's configs from the cfg file:
parser = DarkNetParser(supported_layers)
layer_configs = parser.parse_cfg_file(cfg_file_path)
# We do not need the parser anymore after we got layer_configs:
del parser
# In above layer_config, there are three outputs that we need to know the output
# shape of (in CHW format):
output_tensor_dims = OrderedDict()
output_tensor_dims['082_convolutional'] = [255, 19, 19]
output_tensor_dims['094_convolutional'] = [255, 38, 38]
output_tensor_dims['106_convolutional'] = [255, 76, 76]
# Create a GraphBuilderONNX object with the known output tensor dimensions:
builder = GraphBuilderONNX(output_tensor_dims)
# We want to populate our network with weights later, that's why we download those from
# the official mirror (and verify the checksum):
weights_file_path = download_file(
'yolov3.weights',
'https://pjreddie.com/media/files/yolov3.weights',
'c84e5b99d0e52cd466ae710cadf6d84c')
# Now generate an ONNX graph with weights from the previously parsed layer configurations
# and the weights file:
yolov3_model_def = builder.build_onnx_graph(
layer_configs=layer_configs,
weights_file_path=weights_file_path,
verbose=True)
# Once we have the model definition, we do not need the builder anymore:
del builder
# Perform a sanity check on the ONNX model definition:
onnx.checker.check_model(yolov3_model_def)
# Serialize the generated ONNX graph to this file:
output_file_path = 'yolov3.onnx'
onnx.save(yolov3_model_def, output_file_path)
示例9: main
# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import save [as 别名]
def main():
parser = argparse.ArgumentParser(
description='Quantize model with specified parameters')
parser.add_argument('--no_per_channel', '-t',
action='store_true', default=False)
parser.add_argument('--nbits', type=int, default=8)
parser.add_argument('--quantization_mode', default='Integer',
choices=('Integer', 'QLinear'))
parser.add_argument('--static', '-s', action='store_true', default=False)
parser.add_argument('--asymmetric_input_types',
action='store_true', default=False)
parser.add_argument('--input_quantization_params', default='')
parser.add_argument('--output_quantization_params', default='')
parser.add_argument('model')
parser.add_argument('output')
args = parser.parse_args()
args.per_channel = not args.no_per_channel
del args.no_per_channel
if args.quantization_mode == 'QLinear':
args.quantization_mode = quantize.QuantizationMode.QLinearOps
else:
args.quantization_mode = quantize.QuantizationMode.IntegerOps
if len(args.input_quantization_params) != 0:
args.input_quantization_params = json.loads(
args.input_quantization_params)
else:
args.input_quantization_params = None
if len(args.output_quantization_params) != 0:
args.output_quantization_params = json.loads(
args.output_quantization_params)
else:
args.output_quantization_params = None
# Load the onnx model
model_file = args.model
model = onnx.load(model_file)
del args.model
output_file = args.output
del args.output
# Quantize
print('Quantize config: {}'.format(vars(args)))
quantized_model = quantize.quantize(model, **vars(args))
print('Saving "{}" to "{}"'.format(model_file, output_file))
# Save the quantized model
onnx.save(quantized_model, output_file)