本文整理汇总了Python中onnx.NodeProto方法的典型用法代码示例。如果您正苦于以下问题:Python onnx.NodeProto方法的具体用法?Python onnx.NodeProto怎么用?Python onnx.NodeProto使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类onnx
的用法示例。
在下文中一共展示了onnx.NodeProto方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import NodeProto [as 别名]
def __init__(self,
node=None,
name=None,
inputs=None,
outputs=None,
attr=None,
domain=None,
op_type=None):
# storing a reference to the original protobuf object
if node is None:
self.node = None
self.name = name or ""
self.inputs = inputs or []
self.attr = attr or {}
self.domain = domain or ""
self.op_type = op_type or ""
self.outputs = outputs or self.get_outputs_names()
elif isinstance(node, (OnnxNode, NodeProto)):
self._load_onnx_node(node)
elif isinstance(node, NodeDef):
self._load_tf_node(node)
示例2: GetOpNodeProducer
# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import NodeProto [as 别名]
def GetOpNodeProducer(embed_docstring=False, **kwargs): # type: (bool, **Any) -> _NodeProducer
def ReallyGetOpNode(op, op_id): # type: (NodeProto, int) -> pydot.Node
if op.name:
node_name = '%s/%s (op#%d)' % (op.name, op.op_type, op_id)
else:
node_name = '%s (op#%d)' % (op.op_type, op_id)
for i, input in enumerate(op.input):
node_name += '\n input' + str(i) + ' ' + input
for i, output in enumerate(op.output):
node_name += '\n output' + str(i) + ' ' + output
node = pydot.Node(node_name, **kwargs)
if embed_docstring:
url = _form_and_sanitize_docstring(op.doc_string)
node.set_URL(url)
return node
return ReallyGetOpNode
示例3: _make_fake_if_op
# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import NodeProto [as 别名]
def _make_fake_if_op(self,
true_nodes, # type: Sequence[NodeProto]
false_nodes, # type: Sequence[NodeProto]
output_types # type: Sequence[Tuple[TensorProto.DataType, Sequence[int], Text]]
): # type: (...) -> List[NodeProto]
true = helper.make_tensor("condition", TensorProto.BOOL, (), [True])
true_graph = helper.make_graph(true_nodes, "true_graph", [], [])
false_graph = helper.make_graph(false_nodes, "false_graph", [], [])
if_inputs = ["condition"]
if_outputs = [name for _, _, name in output_types]
retval_nodes = [
helper.make_node("Constant", [], ["condition"], value=true),
helper.make_node("If", if_inputs, if_outputs, then_branch=true_graph,
else_branch=false_graph)
]
return retval_nodes
# fn is a function that takes a single node as argument
示例4: test_nop_transpose
# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import NodeProto [as 别名]
def test_nop_transpose(self): # type: () -> None
nodes = [helper.make_node("Transpose", ["X"], ["Y"], perm=[0, 1])]
nodes.extend(self._make_fake_loop_op(
[helper.make_node("Transpose", ["_Y"], ["_Y2"], perm=[0, 1])],
[(TensorProto.FLOAT, (2, 3), "Y")],
[(TensorProto.FLOAT, (2, 3), "Y2")]))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 3)),
helper.make_tensor_value_info("Y2", TensorProto.FLOAT, (2, 3))])
optimized_model = self._optimized(graph, ["eliminate_nop_transpose"])
def check_transpose(node): # type: (NodeProto) -> None
assert node.op_type != "Transpose"
self._visit_all_nodes_recursive(optimized_model.graph, check_transpose)
# Use of the output from the Transpose node in the main graph should
# have been replaced with the input to the identity node
assert len(optimized_model.graph.output) == 2
assert optimized_model.graph.output[0].name == "X"
# Use of the output from the Transpose node in the loop graph should
# have been replaced with the input to that identity node
assert len(optimized_model.graph.node[2].attribute[0].g.output) == 2
assert optimized_model.graph.node[2].attribute[0].g.output[1].name == "_Y"
示例5: __init__
# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import NodeProto [as 别名]
def __init__(self, node):
'''
Create OnnxNode from NodeProto
Parameters
----------
node : NodeProto
Returns
-------
:class:`OnnxNode` object
'''
self.name = str(node.name)
self.op_type = str(node.op_type)
self.domain = str(node.domain)
self.attrs = dict([(attr.name,
_convert_onnx_attribute_proto(attr))
for attr in node.attribute])
self.input = list(node.input)
self.output = list(node.output)
self.node_proto = node
self.parents = []
self.children = []
self.tensors = {}
示例6: run_node
# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import NodeProto [as 别名]
def run_node(cls,
node, # type: onnx.NodeProto
inputs, # type: List[np.ndarray]
device='CPU', # type: Text
outputs_info=None, # type: Optional[Sequence[Tuple[np.dtype, Tuple[int, ...]]]]
**kwargs # type: Any
): # type: (...) -> List[Any]
"""Prepare and run a computation on an ONNX node."""
# default values for input/output tensors
input_tensor_types = [np_dtype_to_tensor_type(node_input.dtype) for node_input in inputs]
output_tensor_types = [onnx.TensorProto.FLOAT for idx in range(len(node.output))]
output_tensor_shapes = [()] # type: List[Tuple[int, ...]]
if outputs_info is not None:
output_tensor_types = [np_dtype_to_tensor_type(dtype) for (dtype, shape) in
outputs_info]
output_tensor_shapes = [shape for (dtype, shape) in outputs_info]
input_tensors = [make_tensor_value_info(name, tensor_type, value.shape)
for name, value, tensor_type in zip(node.input, inputs,
input_tensor_types)]
output_tensors = [make_tensor_value_info(name, tensor_type, shape)
for name, shape, tensor_type in zip(node.output, output_tensor_shapes,
output_tensor_types)]
graph = make_graph([node], 'compute_graph', input_tensors, output_tensors)
model = make_model(graph, producer_name='NgraphBackend')
if 'opset_version' in kwargs:
model.opset_import[0].version = kwargs['opset_version']
return cls.prepare(model, device).run(inputs)
示例7: run_node
# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import NodeProto [as 别名]
def run_node(onnx_node, data_inputs, **kwargs):
# type: (onnx.NodeProto, List[np.ndarray], Dict[Text, Any]) -> List[np.ndarray]
"""
Convert ONNX node to ngraph node and perform computation on input data.
:param onnx_node: ONNX NodeProto describing a computation node
:param data_inputs: list of numpy ndarrays with input data
:return: list of numpy ndarrays with computed output
"""
NgraphBackend.backend_name = BACKEND_NAME
if NgraphBackend.supports_ngraph_device(NgraphBackend.backend_name):
return NgraphBackend.run_node(onnx_node, data_inputs, **kwargs)
else:
raise RuntimeError('The requested nGraph backend <'
+ NgraphBackend.backend_name + '> is not supported!')
示例8: from_onnx
# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import NodeProto [as 别名]
def from_onnx(node): # type: (NodeProto) -> Node
attrs = Attributes.from_onnx(node.attribute)
name = Text(node.name)
if len(name) == 0:
name = "_".join(node.output)
return Node(
name, node.op_type, attrs, list(node.input), list(node.output)
)
示例9: _onnx_create_model
# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import NodeProto [as 别名]
def _onnx_create_model(nodes, # type: Sequence[NodeProto]
inputs, # type: Sequence[Tuple[Text,Tuple[int, ...]]]
outputs, # type: Sequence[Tuple[Text,Tuple[int, ...], int]]
initializer=[], # type: Sequence[TensorProto]
):
# type: (...) -> ModelProto
initializer_inputs = [
helper.make_tensor_value_info(
t.name,
TensorProto.FLOAT,
t.dims
) for t in initializer
]
graph = helper.make_graph(
nodes=nodes,
name="test",
inputs=initializer_inputs + [
helper.make_tensor_value_info(
input_[0],
TensorProto.FLOAT,
input_[1]
) for input_ in inputs
],
outputs=[
helper.make_tensor_value_info(
output_[0],
output_[2],
output_[1]
) for output_ in outputs
],
initializer=initializer
)
onnx_model = helper.make_model(graph)
return onnx_model
示例10: _load_onnx_node
# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import NodeProto [as 别名]
def _load_onnx_node(self, node):
if isinstance(node, NodeProto):
node = OnnxNode(node)
self.name = node.name
self.inputs = node.inputs
self.outputs = node.outputs
self.attr = node.attrs
self.domain = node.domain
self.op_type = node.op_type
示例11: outputs_proto
# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import NodeProto [as 别名]
def outputs_proto(self):
return self._outputs_proto
# This list holds the protobuf objects of type NodeProto
# representing the ops in the converted ONNX graph.
示例12: __init__
# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import NodeProto [as 别名]
def __init__(self, onnx_proto_instance, graph): # type: (onnx.NodeProto, GraphWrapper) -> None
super(NodeWrapper, self).__init__(onnx_proto_instance, graph)
self.input = [self._graph.get_input(input_name) for input_name in self._proto.input]
self.output = [self._graph.get_input(output_name) for output_name in self._proto.output]
self.attribute = [AttributeWrapper(attr, self._graph) for attr in self._proto.attribute]
示例13: run_node
# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import NodeProto [as 别名]
def run_node(cls, onnx_node, inputs, device='CPU'):
# type: (onnx.NodeProto, List[numpy.ndarray], str) -> List[numpy.ndarray]
input_tensors = [make_tensor_value_info(name, onnx.TensorProto.FLOAT, value.shape)
for name, value in zip(onnx_node.input, inputs)]
output_tensors = [make_tensor_value_info(name, onnx.TensorProto.FLOAT, value.shape)
for name, value in zip(onnx_node.output, ())]
graph = make_graph([onnx_node], 'compute_graph', input_tensors, output_tensors)
model = make_model(graph, producer_name='NgraphBackend')
return cls.prepare(model).run(inputs)
示例14: check_node
# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import NodeProto [as 别名]
def check_node(): # type: () -> None
parser = argparse.ArgumentParser('check-node')
parser.add_argument('node_pb', type=argparse.FileType('rb'))
args = parser.parse_args()
node = NodeProto()
node.ParseFromString(args.node_pb.read())
checker.check_node(node)
示例15: _make_fake_loop_op
# 需要导入模块: import onnx [as 别名]
# 或者: from onnx import NodeProto [as 别名]
def _make_fake_loop_op(self,
body_nodes, # type: Sequence[NodeProto]
input_types, # type: Sequence[Tuple[TensorProto.DataType, Sequence[int], Text]]
output_types # type: Sequence[Tuple[TensorProto.DataType, Sequence[int], Text]]
): # type: (...) -> List[NodeProto]
zero = helper.make_tensor("trip_count_value", TensorProto.INT32, (), [10])
true = helper.make_tensor("condition", TensorProto.BOOL, (), [True])
# lcd is a dummy loop-carried dependency that only exists because
# right now the schema checker is broken and assumes a variadic
# input needs at least one value.
graph_inputs = [helper.make_tensor_value_info("i", TensorProto.INT32, ()),
helper.make_tensor_value_info("cond", TensorProto.BOOL, ())]
for type, shape, name in input_types:
graph_inputs.append(helper.make_tensor_value_info("_" + name, type, shape))
graph_outputs = [helper.make_tensor_value_info("cond", TensorProto.BOOL, ())]
for type, shape, name in output_types:
graph_outputs.append(helper.make_tensor_value_info("_" + name, type, shape))
body_graph = helper.make_graph(body_nodes, "body_graph", graph_inputs,
graph_outputs)
loop_inputs = ["trip_count", "condition"]
loop_inputs.extend([name for _, _, name in input_types])
# TODO: fix checker to accept 0-input variadic inputs
if len(loop_inputs) == 2:
loop_inputs.append("")
loop_outputs = [name for _, _, name in output_types]
retval_nodes = [
helper.make_node("Constant", [], ["trip_count"], value=zero),
helper.make_node("Constant", [], ["condition"], value=true),
helper.make_node("Loop", loop_inputs, loop_outputs, body=body_graph)
]
return retval_nodes