本文整理汇总了Python中onnx.ModelProto方法的典型用法代码示例。如果您正苦于以下问题:Python onnx.ModelProto方法的具体用法?Python onnx.ModelProto怎么用?Python onnx.ModelProto使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类onnx
的用法示例。
在下文中一共展示了onnx.ModelProto方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: optimize_onnx_model
# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import ModelProto [as 别名]
def optimize_onnx_model(origin_model, nchw_inputs=None, stop_initializers=None):
# type: (onnx.ModelProto, list, list) -> onnx.ModelProto
"""
the origin model will be updated after the optimization.
:param origin_model:
:param nchw_inputs:
:return:
"""
graph = origin_model.graph
nodelist = list(graph.node)
opt_graph = optimize_onnx_graph(nodelist,
nchw_inputs=nchw_inputs,
inputs=graph.input,
outputs=graph.output,
initializers=list(graph.initializer),
stop_initializers=stop_initializers,
model_value_info=graph.value_info,
model_name=graph.name,
target_opset=next(opset_.version for opset_ in origin_model.opset_import
if opset_.domain == '' or opset_.domain == 'ai.onnx'))
origin_model.graph.CopyFrom(opt_graph)
return origin_model
示例2: run_model
# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import ModelProto [as 别名]
def run_model(onnx_model, data_inputs):
# type: (onnx.ModelProto, List[np.ndarray]) -> List[np.ndarray]
"""
Convert ONNX model to an ngraph model and perform computation on input data.
:param onnx_model: ONNX ModelProto describing an ONNX model
:param data_inputs: list of numpy ndarrays with input data
:return: list of numpy ndarrays with computed output
"""
NgraphBackend.backend_name = BACKEND_NAME
if NgraphBackend.supports_ngraph_device(NgraphBackend.backend_name):
ng_model_function = import_onnx_model(onnx_model)
runtime = get_runtime()
computation = runtime.computation(ng_model_function)
return computation(*data_inputs)
else:
raise RuntimeError('The requested nGraph backend <'
+ NgraphBackend.backend_name + '> is not supported!')
示例3: generate_model
# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import ModelProto [as 别名]
def generate_model(self, inputs, outputs, graph, model) -> 'ModelProto':
# assign param names
self.param2name = {id(p): 'param' + n.replace('/', '_')
for n, p in model.namedparams()}
for p, n in self.param2name.items():
assigned_names.append(n)
# assign onnx name
assign_onnx_name(graph)
graph_ = self.generate_graph(inputs, outputs, graph, None, True)
onnx_model = oh.make_model(
graph_, producer_name="elichika", producer_version="0.1")
return onnx_model
示例4: _get_onnx_outputs_info
# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import ModelProto [as 别名]
def _get_onnx_outputs_info(model): # type: (...) -> Dict[Text, EdgeInfo]
"""
Takes in an onnx model and returns a dictionary
of onnx output names mapped to a tuple that is (output_name, type, shape)
"""
if isinstance(model, str):
onnx_model = onnx.load(model)
elif isinstance(model, onnx.ModelProto):
onnx_model = model
graph = onnx_model.graph
onnx_output_dict = {}
for o in graph.output:
out = _input_from_onnx_input(o)
onnx_output_dict[out[0]] = out
return onnx_output_dict
示例5: prepare
# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import ModelProto [as 别名]
def prepare(cls,
model, # type: ModelProto
device='CPU', # type: Text
minimum_ios_deployment_target='12', # type: str
**kwargs # type: Any
):
# type: (...) -> CoreMLRep
super(CoreMLBackend, cls).prepare(model, device, **kwargs)
if DEBUG:
with open('/tmp/node_model.onnx', 'wb') as f:
s = model.SerializeToString()
f.write(s)
coreml_model = convert(model, minimum_ios_deployment_target=minimum_ios_deployment_target)
if DEBUG:
coreml_model.save('/tmp/node_model.mlmodel')
onnx_outputs_info = _get_onnx_outputs_info(model)
return CoreMLRep(coreml_model, onnx_outputs_info, device == 'CPU', minimum_ios_deployment_target=minimum_ios_deployment_target)
示例6: is_compatible
# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import ModelProto [as 别名]
def is_compatible(cls,
model, # type: ModelProto
device='CPU', # type: Text
**kwargs # type: Any
): # type: (...) -> bool
# Return whether the model is compatible with CoreML.
'''
This function will gradually grow to cover more cases.
Need to be careful of false negatives. There are some cases that seemingly
are not supported on CoreML, which the graph transformer optimizes and converts to
a graph that can be converted to CoreML.
2. Unsupported ops: If graph has one of unsupported op, exit
'''
## TODO: Add un-supported ops
unsupported_ops = []
graph = model.graph
for node in graph.node:
if node.op_type in unsupported_ops:
return False
return True
示例7: __init__
# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import ModelProto [as 别名]
def __init__(self, model):
"""
This constructor takes a reference to a ONNX Model and checks model, infers intermediate shapes and sets up maps from name to type and node or constant value
graph_util.convert_variables_to_constants and graph_util.remove_training_nodes to cleanse the graph of any nodes that are linked to training. This leaves us with
the nodes you need for inference.
In the resulting graph there should only be tf.Operations left that have one of the following types [Const, MatMul, Add, BiasAdd, Conv2D, Reshape, MaxPool, AveragePool, Placeholder, Relu, Sigmoid, Tanh]
If the input should be a Keras model we will ignore operations with type Pack, Shape, StridedSlice, and Prod such that the Flatten layer can be used.
Arguments
---------
model : onnx.ModelProto
"""
if issubclass(model.__class__, onnx.ModelProto):
onnx.checker.check_model(model)
self.model = model
self.nodes = self.model.graph.node
self.shape_map, self.constants_map, self.output_node_map, self.input_node_map, self.placeholdernames = prepare_model(model)
else:
assert 0, 'not onnx model'
示例8: _get_onnx_outputs_info
# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import ModelProto [as 别名]
def _get_onnx_outputs_info(model): # type: (...) -> Dict[Text, EdgeInfo]
"""
Takes in an onnx model and returns a dictionary
of onnx output names mapped to a tuple that is (output_name, type, shape)
"""
if isinstance(model, _string_types):
onnx_model = onnx.load(model)
elif isinstance(model, onnx.ModelProto):
onnx_model = model
graph = onnx_model.graph
onnx_output_dict = {}
for o in graph.output:
out = _input_from_onnx_input(o)
onnx_output_dict[out[0]] = out
return onnx_output_dict
示例9: prepare
# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import ModelProto [as 别名]
def prepare(
cls,
model, # type: ModelProto
device="CPU", # type: Text
minimum_ios_deployment_target="12", # type: str
**kwargs # type: Any
):
# type: (...) -> CoreMLRep
super(CoreMLBackend, cls).prepare(model, device, **kwargs)
if DEBUG:
with open("/tmp/node_model.onnx", "wb") as f:
s = model.SerializeToString()
f.write(s)
coreml_model = convert(
model, minimum_ios_deployment_target=minimum_ios_deployment_target
)
if DEBUG:
coreml_model.save("/tmp/node_model.mlmodel")
onnx_outputs_info = _get_onnx_outputs_info(model)
return CoreMLRep(
coreml_model,
onnx_outputs_info,
device == "CPU",
minimum_ios_deployment_target=minimum_ios_deployment_target,
)
示例10: is_compatible
# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import ModelProto [as 别名]
def is_compatible(
cls,
model, # type: ModelProto
device="CPU", # type: Text
**kwargs # type: Any
): # type: (...) -> bool
# Return whether the model is compatible with CoreML.
"""
This function will gradually grow to cover more cases.
Need to be careful of false negatives. There are some cases that seemingly
are not supported on CoreML, which the graph transformer optimizes and converts to
a graph that can be converted to CoreML.
2. Unsupported ops: If graph has one of unsupported op, exit
"""
## TODO: Add un-supported ops
unsupported_ops = []
graph = model.graph
for node in graph.node:
if node.op_type in unsupported_ops:
return False
return True
示例11: save
# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import ModelProto [as 别名]
def save(self, dst):
if self._onnx_model_path:
shutil.copyfile(
self._onnx_model_path, self.spec._saved_model_file_path(dst)
)
elif self._model_proto:
try:
import onnx
except ImportError:
raise MissingDependencyException(
'"onnx" package is required for packing with OnnxModelArtifact'
)
onnx.save_model(self._model_proto, self.spec._saved_model_file_path(dst))
else:
raise InvalidArgument(
'onnx.ModelProto or a model file path is required to pack an '
'OnnxModelArtifact'
)
示例12: prepare
# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import ModelProto [as 别名]
def prepare(cls, model, device='CUDA:0', **kwargs):
"""Build a TensorRT engine from the onnx model.
Parameters
----------
model : onnx.ModelProto
The onnx model.
device : str, optional
The executing device.
Returns
-------
dragon.vm.tensorrt.ONNXBackendRep
The backend rep.
"""
if not isinstance(device, Device):
device = Device(device)
return ONNXBackendRep(model, device, **kwargs)
示例13: run_model
# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import ModelProto [as 别名]
def run_model(cls, model, inputs, device='CUDA:0', **kwargs):
"""Build and run a TensorRT engine from the onnx model.
Parameters
----------
model : onnx.ModelProto
The onnx model.
inputs : Union[Sequence, Dict]
The input arrays.
device : str, optional
The executing device.
Returns
-------
namedtuple
The model outputs.
"""
return cls.prepare(model, device, **kwargs).run(inputs)
示例14: optimize_model
# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import ModelProto [as 别名]
def optimize_model(input,
model_type,
num_heads,
hidden_size,
opt_level=99,
optimization_options=None):
(optimizer_class, producer, run_onnxruntime) = MODEL_CLASSES[model_type]
input_model_path = input
if run_onnxruntime and opt_level > 0:
input_model_path = optimize_by_onnxruntime(input_model_path, use_gpu=False, opt_level=opt_level)
logger.info("Use OnnxRuntime to optimize and save the optimized model to {}".format(input_model_path))
model = ModelProto()
with open(input_model_path, "rb") as f:
model.ParseFromString(f.read())
if model.producer_name and producer != model.producer_name:
logger.warning(
f"Model producer not matched: Expect {producer}, Got {model.producer_name} {model.producer_version}. Please specify correct --model_type parameter."
)
if optimization_options is None:
optimization_options = BertOptimizationOptions(model_type)
bert_model = optimizer_class(model, num_heads, hidden_size)
bert_model.optimize(optimization_options)
return bert_model
示例15: save_model_to_file
# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import ModelProto [as 别名]
def save_model_to_file(self, output_path):
logger.info(f"Output model to {output_path}")
if output_path.endswith(".json"):
assert isinstance(self.model, ModelProto)
with open(output_path, "w") as out:
out.write(str(self.model))
else:
with open(output_path, "wb") as out:
out.write(self.model.SerializeToString())