本文整理汇总了Python中onnx.helper.make_node方法的典型用法代码示例。如果您正苦于以下问题:Python helper.make_node方法的具体用法?Python helper.make_node怎么用?Python helper.make_node使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类onnx.helper
的用法示例。
在下文中一共展示了helper.make_node方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_broadcast
# 需要导入模块: from onnx import helper [as 别名]
# 或者: from onnx.helper import make_node [as 别名]
def test_broadcast():
"""Test for broadcasting in onnx operators."""
input1 = np.random.rand(1, 3, 4, 5).astype("float32")
input2 = np.random.rand(1, 5).astype("float32")
inputs = [helper.make_tensor_value_info("input1", TensorProto.FLOAT, shape=(1, 3, 4, 5)),
helper.make_tensor_value_info("input2", TensorProto.FLOAT, shape=(1, 5))]
outputs = [helper.make_tensor_value_info("output", TensorProto.FLOAT, shape=(1, 3, 4, 5))]
nodes = [helper.make_node("Add", ["input1", "input2"], ["output"])]
graph = helper.make_graph(nodes,
"bcast_test",
inputs,
outputs)
bcast_model = helper.make_model(graph)
bkd_rep = mxnet_backend.prepare(bcast_model)
numpy_op = input1 + input2
output = bkd_rep.run([input1, input2])
npt.assert_almost_equal(output[0], numpy_op)
示例2: test_greater
# 需要导入模块: from onnx import helper [as 别名]
# 或者: from onnx.helper import make_node [as 别名]
def test_greater():
"""Test for logical greater in onnx operators."""
input1 = np.random.rand(1, 3, 4, 5).astype("float32")
input2 = np.random.rand(1, 5).astype("float32")
inputs = [helper.make_tensor_value_info("input1", TensorProto.FLOAT, shape=(1, 3, 4, 5)),
helper.make_tensor_value_info("input2", TensorProto.FLOAT, shape=(1, 5))]
outputs = [helper.make_tensor_value_info("output", TensorProto.FLOAT, shape=(1, 3, 4, 5))]
nodes = [helper.make_node("Greater", ["input1", "input2"], ["output"])]
graph = helper.make_graph(nodes,
"greater_test",
inputs,
outputs)
greater_model = helper.make_model(graph)
bkd_rep = mxnet_backend.prepare(greater_model)
numpy_op = np.greater(input1, input2).astype(np.float32)
output = bkd_rep.run([input1, input2])
npt.assert_almost_equal(output[0], numpy_op)
示例3: test_lesser
# 需要导入模块: from onnx import helper [as 别名]
# 或者: from onnx.helper import make_node [as 别名]
def test_lesser():
"""Test for logical greater in onnx operators."""
input1 = np.random.rand(1, 3, 4, 5).astype("float32")
input2 = np.random.rand(1, 5).astype("float32")
inputs = [helper.make_tensor_value_info("input1", TensorProto.FLOAT, shape=(1, 3, 4, 5)),
helper.make_tensor_value_info("input2", TensorProto.FLOAT, shape=(1, 5))]
outputs = [helper.make_tensor_value_info("output", TensorProto.FLOAT, shape=(1, 3, 4, 5))]
nodes = [helper.make_node("Less", ["input1", "input2"], ["output"])]
graph = helper.make_graph(nodes,
"lesser_test",
inputs,
outputs)
greater_model = helper.make_model(graph)
bkd_rep = mxnet_backend.prepare(greater_model)
numpy_op = np.less(input1, input2).astype(np.float32)
output = bkd_rep.run([input1, input2])
npt.assert_almost_equal(output[0], numpy_op)
示例4: test_equal
# 需要导入模块: from onnx import helper [as 别名]
# 或者: from onnx.helper import make_node [as 别名]
def test_equal():
"""Test for logical greater in onnx operators."""
input1 = np.random.rand(1, 3, 4, 5).astype("float32")
input2 = np.random.rand(1, 5).astype("float32")
inputs = [helper.make_tensor_value_info("input1", TensorProto.FLOAT, shape=(1, 3, 4, 5)),
helper.make_tensor_value_info("input2", TensorProto.FLOAT, shape=(1, 5))]
outputs = [helper.make_tensor_value_info("output", TensorProto.FLOAT, shape=(1, 3, 4, 5))]
nodes = [helper.make_node("Equal", ["input1", "input2"], ["output"])]
graph = helper.make_graph(nodes,
"equal_test",
inputs,
outputs)
greater_model = helper.make_model(graph)
bkd_rep = mxnet_backend.prepare(greater_model)
numpy_op = np.equal(input1, input2).astype(np.float32)
output = bkd_rep.run([input1, input2])
npt.assert_almost_equal(output[0], numpy_op)
示例5: _make_upsample_node
# 需要导入模块: from onnx import helper [as 别名]
# 或者: from onnx.helper import make_node [as 别名]
def _make_upsample_node(self, layer_name, layer_dict):
"""Create an ONNX Upsample node with the properties from
the DarkNet-based graph.
Keyword arguments:
layer_name -- the layer's name (also the corresponding key in layer_configs)
layer_dict -- a layer parameter dictionary (one element of layer_configs)
"""
upsample_factor = float(layer_dict['stride'])
previous_node_specs = self._get_previous_node_specs()
inputs = [previous_node_specs.name]
channels = previous_node_specs.channels
assert channels > 0
upsample_node = helper.make_node(
'Upsample',
mode='nearest',
# For ONNX versions <0.7.0, Upsample nodes accept different parameters than 'scales':
scales=[1.0, 1.0, upsample_factor, upsample_factor],
inputs=inputs,
outputs=[layer_name],
name=layer_name,
)
self._nodes.append(upsample_node)
return layer_name, channels
示例6: test_unsqueeze
# 需要导入模块: from onnx import helper [as 别名]
# 或者: from onnx.helper import make_node [as 别名]
def test_unsqueeze():
data = np.random.randn(3, 4, 5).astype(np.float32)
expected_output = np.expand_dims(data, axis=0)
node = onnx.helper.make_node('Unsqueeze', inputs=['x'], outputs=['y'], axes=[0])
ng_results = run_node(node, [data])
assert np.array_equal(ng_results, [expected_output])
expected_output = np.reshape(data, [1, 3, 4, 5, 1])
node = onnx.helper.make_node('Unsqueeze', inputs=['x'], outputs=['y'], axes=[0, 4])
ng_results = run_node(node, [data])
assert np.array_equal(ng_results, [expected_output])
expected_output = np.reshape(data, [1, 3, 1, 4, 5])
node = onnx.helper.make_node('Unsqueeze', inputs=['x'], outputs=['y'], axes=[0, 2])
ng_results = run_node(node, [data])
assert np.array_equal(ng_results, [expected_output])
示例7: test_identity
# 需要导入模块: from onnx import helper [as 别名]
# 或者: from onnx.helper import make_node [as 别名]
def test_identity():
np.random.seed(133391)
shape = [2, 4]
input_data = np.random.randn(*shape).astype(np.float32)
identity_node = make_node('Identity', inputs=['x'], outputs=['y'])
ng_results = run_node(identity_node, [input_data])
assert np.array_equal(ng_results, [input_data])
node1 = make_node('Add', inputs=['A', 'B'], outputs=['add1'], name='add_node1')
node2 = make_node('Identity', inputs=['add1'], outputs=['identity1'], name='identity_node1')
node3 = make_node('Abs', inputs=['identity1'], outputs=['Y'], name='abs_node1')
graph = make_graph([node1, node2, node3], 'test_graph',
[make_tensor_value_info('A', onnx.TensorProto.FLOAT, shape),
make_tensor_value_info('B', onnx.TensorProto.FLOAT, shape)],
[make_tensor_value_info('Y', onnx.TensorProto.FLOAT, shape)])
model = make_model(graph, producer_name='ngraph ONNX Importer')
ng_model_function = import_onnx_model(model)
runtime = get_runtime()
computation = runtime.computation(ng_model_function)
ng_results = computation(input_data, input_data)
expected_result = np.abs(input_data + input_data)
assert np.array_equal(ng_results[0], expected_result)
示例8: test_constant
# 需要导入模块: from onnx import helper [as 别名]
# 或者: from onnx.helper import make_node [as 别名]
def test_constant(value_type):
values = np.random.randn(5, 5).astype(value_type)
node = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=['values'],
value=onnx.helper.make_tensor(
name='const_tensor',
data_type=onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(value_type)],
dims=values.shape,
vals=values.flatten()))
ng_results = run_node(node, [])
assert np.allclose(ng_results, [values])
# See https://github.com/onnx/onnx/issues/1190
示例9: make_onnx_model_for_gemm_op
# 需要导入模块: from onnx import helper [as 别名]
# 或者: from onnx.helper import make_node [as 别名]
def make_onnx_model_for_gemm_op(input_a, input_b, input_c, **kwargs):
input_a_for_output = input_a
input_b_for_output = input_b
if kwargs.get('transA'):
input_a_for_output = input_a.T
if kwargs.get('transB'):
input_b_for_output = input_b.T
output_shape = np.dot(input_a_for_output, input_b_for_output).shape
node = make_node('Gemm', ['A', 'B', 'C'], ['Y'], name='test_node', **kwargs)
graph = make_graph([node], 'test_graph',
[make_tensor_value_info('A', onnx.TensorProto.FLOAT, input_a.shape),
make_tensor_value_info('B', onnx.TensorProto.FLOAT, input_b.shape),
make_tensor_value_info('C', onnx.TensorProto.FLOAT, input_c.shape)],
[make_tensor_value_info('Y', onnx.TensorProto.FLOAT, output_shape)])
model = make_model(graph, producer_name='ngraph ONNXImporter')
return model
示例10: test_pool_average
# 需要导入模块: from onnx import helper [as 别名]
# 或者: from onnx.helper import make_node [as 别名]
def test_pool_average(ndarray_1x1x4x4):
x = ndarray_1x1x4x4
node = onnx.helper.make_node('AveragePool', inputs=['x'], outputs=['y'],
kernel_shape=(2, 2), strides=(2, 2))
y = np.array([[13.5, 15.5],
[21.5, 23.5]], dtype=np.float32).reshape(1, 1, 2, 2)
ng_results = run_node(node, [x])
assert np.array_equal(ng_results, [y])
node = onnx.helper.make_node('AveragePool', inputs=['x'], outputs=['y'],
kernel_shape=(2, 2), strides=(2, 2), pads=(1, 1, 1, 1))
y = np.array([[11, 12.5, 14],
[17, 18.5, 20],
[23, 24.5, 26]], dtype=np.float32).reshape(1, 1, 3, 3)
ng_results = run_node(node, [x])
assert np.array_equal(ng_results, [y])
示例11: make_graph
# 需要导入模块: from onnx import helper [as 别名]
# 或者: from onnx.helper import make_node [as 别名]
def make_graph(nodes, graph_name, inputs, outputs):
input_dict = {}
for input in inputs:
input_dict[input.name] = (input, None)
outputs_fixed = []
for output in outputs:
if output.name in input_dict:
input, new_output = input_dict[output.name]
if new_output is None:
new_output = new_tensor(name=graph_name + '_out')
nodes.append(helper.make_node('Identity',
inputs=[input.name],
outputs=[new_output.name]))
input_dict[output.name] = (input, new_output)
else:
new_output = output
outputs_fixed.append(new_output)
graph_name = gen_graph_name(graph_name)
return helper.make_graph(nodes, graph_name, inputs, outputs_fixed)
示例12: _onnx_create_single_node_model
# 需要导入模块: from onnx import helper [as 别名]
# 或者: from onnx.helper import make_node [as 别名]
def _onnx_create_single_node_model(op_type, # type: Text
input_shapes, # type: Sequence[Tuple[int, ...]]
output_shapes, # type: Sequence[Tuple[int, ...]]
initializer=[], # type: Sequence[TensorProto]
**kwargs # type: Any
):
# type: (...) -> ModelProto
inputs = [
("input{}".format(i,), input_shapes[i])
for i in range(len(input_shapes))
]
outputs = [
("output{}".format(i,), output_shapes[i], TensorProto.FLOAT)
for i in range(len(output_shapes))
]
node = helper.make_node(
op_type,
inputs=[i[0] for i in inputs] + [t.name for t in initializer],
outputs=[o[0] for o in outputs],
**kwargs
)
return _onnx_create_model([node], inputs, outputs, initializer)
示例13: _make_model_acos_exp_topk
# 需要导入模块: from onnx import helper [as 别名]
# 或者: from onnx.helper import make_node [as 别名]
def _make_model_acos_exp_topk(): # type: (...) -> ModelProto
'''
make a very simple model for testing: input->clip->exp->topk->2 outputs
'''
inputs = [('input0', (10,), TensorProto.FLOAT), ('K', (1,), TensorProto.INT64)]
outputs = [('output_values', (3,), TensorProto.FLOAT), ('output_indices', (3,), TensorProto.INT64)]
acos = helper.make_node("Acos",
inputs=[inputs[0][0]],
outputs=['acos_out'])
exp = helper.make_node("Exp",
inputs=[acos.output[0]],
outputs=['exp_out'])
topk = helper.make_node("TopK",
inputs=[exp.output[0], inputs[1][0]],
outputs=[outputs[0][0], outputs[1][0]],
axis=0)
return _onnx_create_model([acos, exp, topk], inputs, outputs)
示例14: test_add
# 需要导入模块: from onnx import helper [as 别名]
# 或者: from onnx.helper import make_node [as 别名]
def test_add(self):
node_def = helper.make_node("Add", ["X", "Y"], ["Z"])
x = self._get_rnd_float32(shape=[5, 10, 5, 5])
y = self._get_rnd_float32(shape=[10, 1, 1])
output = run_node(node_def, [x, y])
np.testing.assert_almost_equal(output["Z"],
np.add(x, y.reshape([1, 10, 1, 1])))
# node_def = helper.make_node("Add", ["A", "B"], ["C"], broadcast=1)
# a = self._get_rnd([10, 10])
# b = self._get_rnd([10, 10])
# output = run_node(node_def, [a, b])
# np.testing.assert_almost_equal(output["C"], np.add(a, b))
# node_def = helper.make_node("Add", ["A", "B"], ["C"], broadcast=1)
# a = self._get_rnd([10, 10])
# b = self._get_rnd([10,])
# output = run_node(node_def, [a, b])
# np.testing.assert_almost_equal(output["C"], np.add(a, b))
示例15: test_batch_normalization
# 需要导入模块: from onnx import helper [as 别名]
# 或者: from onnx.helper import make_node [as 别名]
def test_batch_normalization(self):
if legacy_opset_pre_ver(6):
raise unittest.SkipTest("Backend doesn't support consumed flag")
node_def = helper.make_node("BatchNormalization",
["X", "scale", "bias", "mean", "var"], ["Y"],
epsilon=0.001)
x_shape = [3, 5, 4, 2]
param_shape = [5]
_param_shape = [1, 5, 1, 1]
x = self._get_rnd_float32(0, 1, shape=x_shape)
m = self._get_rnd_float32(0, 1, shape=param_shape)
_m = m.reshape(_param_shape)
v = self._get_rnd_float32(0, 1, shape=param_shape)
_v = v.reshape(_param_shape)
scale = self._get_rnd_float32(0, 1, shape=param_shape)
_scale = scale.reshape(_param_shape)
bias = self._get_rnd_float32(0, 1, shape=param_shape)
_bias = bias.reshape(_param_shape)
golden = self._batch_normalization(x, _m, _v, _bias, _scale, 0.001)
output = run_node(node_def, [x, scale, bias, m, v])
np.testing.assert_almost_equal(output["Y"], golden, decimal=5)