本文整理匯總了Python中onnx.helper.make_graph方法的典型用法代碼示例。如果您正苦於以下問題:Python helper.make_graph方法的具體用法?Python helper.make_graph怎麽用?Python helper.make_graph使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類onnx.helper
的用法示例。
在下文中一共展示了helper.make_graph方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_broadcast
# 需要導入模塊: from onnx import helper [as 別名]
# 或者: from onnx.helper import make_graph [as 別名]
def test_broadcast():
"""Test for broadcasting in onnx operators."""
input1 = np.random.rand(1, 3, 4, 5).astype("float32")
input2 = np.random.rand(1, 5).astype("float32")
inputs = [helper.make_tensor_value_info("input1", TensorProto.FLOAT, shape=(1, 3, 4, 5)),
helper.make_tensor_value_info("input2", TensorProto.FLOAT, shape=(1, 5))]
outputs = [helper.make_tensor_value_info("output", TensorProto.FLOAT, shape=(1, 3, 4, 5))]
nodes = [helper.make_node("Add", ["input1", "input2"], ["output"])]
graph = helper.make_graph(nodes,
"bcast_test",
inputs,
outputs)
bcast_model = helper.make_model(graph)
bkd_rep = mxnet_backend.prepare(bcast_model)
numpy_op = input1 + input2
output = bkd_rep.run([input1, input2])
npt.assert_almost_equal(output[0], numpy_op)
示例2: test_greater
# 需要導入模塊: from onnx import helper [as 別名]
# 或者: from onnx.helper import make_graph [as 別名]
def test_greater():
"""Test for logical greater in onnx operators."""
input1 = np.random.rand(1, 3, 4, 5).astype("float32")
input2 = np.random.rand(1, 5).astype("float32")
inputs = [helper.make_tensor_value_info("input1", TensorProto.FLOAT, shape=(1, 3, 4, 5)),
helper.make_tensor_value_info("input2", TensorProto.FLOAT, shape=(1, 5))]
outputs = [helper.make_tensor_value_info("output", TensorProto.FLOAT, shape=(1, 3, 4, 5))]
nodes = [helper.make_node("Greater", ["input1", "input2"], ["output"])]
graph = helper.make_graph(nodes,
"greater_test",
inputs,
outputs)
greater_model = helper.make_model(graph)
bkd_rep = mxnet_backend.prepare(greater_model)
numpy_op = np.greater(input1, input2).astype(np.float32)
output = bkd_rep.run([input1, input2])
npt.assert_almost_equal(output[0], numpy_op)
示例3: test_lesser
# 需要導入模塊: from onnx import helper [as 別名]
# 或者: from onnx.helper import make_graph [as 別名]
def test_lesser():
"""Test for logical greater in onnx operators."""
input1 = np.random.rand(1, 3, 4, 5).astype("float32")
input2 = np.random.rand(1, 5).astype("float32")
inputs = [helper.make_tensor_value_info("input1", TensorProto.FLOAT, shape=(1, 3, 4, 5)),
helper.make_tensor_value_info("input2", TensorProto.FLOAT, shape=(1, 5))]
outputs = [helper.make_tensor_value_info("output", TensorProto.FLOAT, shape=(1, 3, 4, 5))]
nodes = [helper.make_node("Less", ["input1", "input2"], ["output"])]
graph = helper.make_graph(nodes,
"lesser_test",
inputs,
outputs)
greater_model = helper.make_model(graph)
bkd_rep = mxnet_backend.prepare(greater_model)
numpy_op = np.less(input1, input2).astype(np.float32)
output = bkd_rep.run([input1, input2])
npt.assert_almost_equal(output[0], numpy_op)
示例4: test_equal
# 需要導入模塊: from onnx import helper [as 別名]
# 或者: from onnx.helper import make_graph [as 別名]
def test_equal():
"""Test for logical greater in onnx operators."""
input1 = np.random.rand(1, 3, 4, 5).astype("float32")
input2 = np.random.rand(1, 5).astype("float32")
inputs = [helper.make_tensor_value_info("input1", TensorProto.FLOAT, shape=(1, 3, 4, 5)),
helper.make_tensor_value_info("input2", TensorProto.FLOAT, shape=(1, 5))]
outputs = [helper.make_tensor_value_info("output", TensorProto.FLOAT, shape=(1, 3, 4, 5))]
nodes = [helper.make_node("Equal", ["input1", "input2"], ["output"])]
graph = helper.make_graph(nodes,
"equal_test",
inputs,
outputs)
greater_model = helper.make_model(graph)
bkd_rep = mxnet_backend.prepare(greater_model)
numpy_op = np.equal(input1, input2).astype(np.float32)
output = bkd_rep.run([input1, input2])
npt.assert_almost_equal(output[0], numpy_op)
示例5: test_simple_graph
# 需要導入模塊: from onnx import helper [as 別名]
# 或者: from onnx.helper import make_graph [as 別名]
def test_simple_graph():
node1 = make_node('Add', ['A', 'B'], ['X'], name='add_node1')
node2 = make_node('Add', ['X', 'C'], ['Y'], name='add_node2')
graph = make_graph([node1, node2], 'test_graph',
[make_tensor_value_info('A', onnx.TensorProto.FLOAT, [1]),
make_tensor_value_info('B', onnx.TensorProto.FLOAT, [1]),
make_tensor_value_info('C', onnx.TensorProto.FLOAT, [1])],
[make_tensor_value_info('Y', onnx.TensorProto.FLOAT, [1])])
model = make_model(graph, producer_name='ngraph ONNXImporter')
ng_model_function = import_onnx_model(model)
runtime = get_runtime()
computation = runtime.computation(ng_model_function)
assert np.array_equal(computation(1, 2, 3)[0], np.array([6.0], dtype=np.float32))
assert np.array_equal(computation(4, 5, 6)[0], np.array([15.0], dtype=np.float32))
示例6: test_identity
# 需要導入模塊: from onnx import helper [as 別名]
# 或者: from onnx.helper import make_graph [as 別名]
def test_identity():
np.random.seed(133391)
shape = [2, 4]
input_data = np.random.randn(*shape).astype(np.float32)
identity_node = make_node('Identity', inputs=['x'], outputs=['y'])
ng_results = run_node(identity_node, [input_data])
assert np.array_equal(ng_results, [input_data])
node1 = make_node('Add', inputs=['A', 'B'], outputs=['add1'], name='add_node1')
node2 = make_node('Identity', inputs=['add1'], outputs=['identity1'], name='identity_node1')
node3 = make_node('Abs', inputs=['identity1'], outputs=['Y'], name='abs_node1')
graph = make_graph([node1, node2, node3], 'test_graph',
[make_tensor_value_info('A', onnx.TensorProto.FLOAT, shape),
make_tensor_value_info('B', onnx.TensorProto.FLOAT, shape)],
[make_tensor_value_info('Y', onnx.TensorProto.FLOAT, shape)])
model = make_model(graph, producer_name='ngraph ONNX Importer')
ng_model_function = import_onnx_model(model)
runtime = get_runtime()
computation = runtime.computation(ng_model_function)
ng_results = computation(input_data, input_data)
expected_result = np.abs(input_data + input_data)
assert np.array_equal(ng_results[0], expected_result)
示例7: make_onnx_model_for_gemm_op
# 需要導入模塊: from onnx import helper [as 別名]
# 或者: from onnx.helper import make_graph [as 別名]
def make_onnx_model_for_gemm_op(input_a, input_b, input_c, **kwargs):
input_a_for_output = input_a
input_b_for_output = input_b
if kwargs.get('transA'):
input_a_for_output = input_a.T
if kwargs.get('transB'):
input_b_for_output = input_b.T
output_shape = np.dot(input_a_for_output, input_b_for_output).shape
node = make_node('Gemm', ['A', 'B', 'C'], ['Y'], name='test_node', **kwargs)
graph = make_graph([node], 'test_graph',
[make_tensor_value_info('A', onnx.TensorProto.FLOAT, input_a.shape),
make_tensor_value_info('B', onnx.TensorProto.FLOAT, input_b.shape),
make_tensor_value_info('C', onnx.TensorProto.FLOAT, input_c.shape)],
[make_tensor_value_info('Y', onnx.TensorProto.FLOAT, output_shape)])
model = make_model(graph, producer_name='ngraph ONNXImporter')
return model
示例8: make_graph
# 需要導入模塊: from onnx import helper [as 別名]
# 或者: from onnx.helper import make_graph [as 別名]
def make_graph(nodes, graph_name, inputs, outputs):
input_dict = {}
for input in inputs:
input_dict[input.name] = (input, None)
outputs_fixed = []
for output in outputs:
if output.name in input_dict:
input, new_output = input_dict[output.name]
if new_output is None:
new_output = new_tensor(name=graph_name + '_out')
nodes.append(helper.make_node('Identity',
inputs=[input.name],
outputs=[new_output.name]))
input_dict[output.name] = (input, new_output)
else:
new_output = output
outputs_fixed.append(new_output)
graph_name = gen_graph_name(graph_name)
return helper.make_graph(nodes, graph_name, inputs, outputs_fixed)
示例9: generate_graph
# 需要導入模塊: from onnx import helper [as 別名]
# 或者: from onnx.helper import make_graph [as 別名]
def generate_graph(self, name: 'str', isMain=False):
input_tensor_and_initializer = self.input_tensor.copy()
# TODO(take-cheeze): Remove this workaround
for i in input_tensor_and_initializer:
t = i.type.tensor_type
if t is not None and t.elem_type is TensorProto.UNDEFINED:
t.elem_type = TensorProto.FLOAT
initializers = []
# add initializers
if isMain:
for v in self.generator.initializers.values():
initializers.append(v.tensor)
if v.tensor_value in self.input_tensor:
continue
input_tensor_and_initializer.append(v.tensor_value)
return oh.make_graph(self.nodes, name, input_tensor_and_initializer, self.output_tensor, initializer=initializers)
示例10: make_graph_proto
# 需要導入模塊: from onnx import helper [as 別名]
# 或者: from onnx.helper import make_graph [as 別名]
def make_graph_proto(self):
self._clean_graph()
self._fix_data_type()
if IS_PYTHON3:
params = list(inspect.signature(make_graph).parameters.keys())
else:
params = inspect.getargspec(make_graph).args
kwargs = {
"initializer": self.consts_proto,
"value_info": self.value_info_proto
}
return make_graph(self.nodes_proto, self._name, self.inputs_proto,
self.outputs_proto,
**dict([(k, kwargs[k]) for k in kwargs if k in params]))
示例11: test_relu_node_inplace
# 需要導入模塊: from onnx import helper [as 別名]
# 或者: from onnx.helper import make_graph [as 別名]
def test_relu_node_inplace(self):
X = np.random.randn(3, 2).astype(np.float32)
Y_ref = np.clip(X, 0, np.inf)
node_def = helper.make_node("Relu", ["X"], ["X1"])
graph_def = helper.make_graph(
[node_def],
name="test",
inputs=[helper.make_tensor_value_info("X", TensorProto.FLOAT, [3, 2])],
outputs=[
helper.make_tensor_value_info("X1", TensorProto.FLOAT, [3, 2])
])
tf_rep = prepare(helper.make_model(graph_def))
output = tf_rep.run({"X": X})
np.testing.assert_almost_equal(output.X1, Y_ref)
示例12: test_eye_like
# 需要導入模塊: from onnx import helper [as 別名]
# 或者: from onnx.helper import make_graph [as 別名]
def test_eye_like(self):
if legacy_opset_pre_ver(9):
raise unittest.SkipTest("ONNX version {} doesn't support EyeLike.".format(
defs.onnx_opset_version()))
shape = [6, 10]
off_diagonal_offset = -3
x = self._get_rnd_int(0, 100, shape=shape)
y = np.eye(shape[0], shape[1], k=off_diagonal_offset, dtype=np.float32)
node_def = helper.make_node("EyeLike", ["x"], ["y"],
dtype=TensorProto.FLOAT,
k=off_diagonal_offset)
graph_def = helper.make_graph(
[node_def],
name="test_unknown_shape",
inputs=[
helper.make_tensor_value_info("x", TensorProto.INT32, [None, None])
],
outputs=[
helper.make_tensor_value_info("y", TensorProto.FLOAT, [None, None])
])
tf_rep = onnx_graph_to_tensorflow_rep(graph_def)
output = tf_rep.run({"x": x})
np.testing.assert_equal(output["y"], y)
示例13: test_flatten
# 需要導入模塊: from onnx import helper [as 別名]
# 或者: from onnx.helper import make_graph [as 別名]
def test_flatten(self):
shape = [2, 3, 4]
x = self._get_rnd_float32(shape=shape)
axis = 1
node_def = helper.make_node("Flatten", ["X"], ["Y"], axis=axis)
graph_def = helper.make_graph(
[node_def],
name="test_unknown_shape",
inputs=[
helper.make_tensor_value_info("X", TensorProto.FLOAT,
[None, None, None])
],
outputs=[helper.make_tensor_value_info("Y", TensorProto.FLOAT, [None])])
tf_rep = onnx_graph_to_tensorflow_rep(graph_def)
output = tf_rep.run({"X": x})
new_shape = (np.prod(shape[0:axis]).astype(int), -1)
np.testing.assert_almost_equal(output["Y"], np.reshape(x, new_shape))
示例14: test_gather_nd
# 需要導入模塊: from onnx import helper [as 別名]
# 或者: from onnx.helper import make_graph [as 別名]
def test_gather_nd(self):
if legacy_opset_pre_ver(11):
raise unittest.SkipTest(
"ONNX version {} doesn't support GatherND.".format(
defs.onnx_opset_version()))
# valid positive and negative indices for elements
data = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)
indices = np.array([[0, 0], [1, -3]], dtype=np.int64)
ref_output = np.array([1, 4], dtype=np.int32)
node_def = helper.make_node("GatherND", ["data", "indices"], ["outputs"])
graph_def = helper.make_graph(
[node_def],
name="test_unknown_shape",
inputs=[
helper.make_tensor_value_info("data", TensorProto.INT32,
[None, None]),
helper.make_tensor_value_info("indices", TensorProto.INT64,
[None, None])
],
outputs=[
helper.make_tensor_value_info("outputs", TensorProto.INT32, [None])
])
tf_rep = onnx_graph_to_tensorflow_rep(graph_def)
output = tf_rep.run({"data": data, "indices": indices})
np.testing.assert_almost_equal(output["outputs"], ref_output)
示例15: test_is_inf
# 需要導入模塊: from onnx import helper [as 別名]
# 或者: from onnx.helper import make_graph [as 別名]
def test_is_inf(self):
if legacy_opset_pre_ver(10):
raise unittest.SkipTest("ONNX version {} doesn't support IsInf.".format(
defs.onnx_opset_version()))
inp = np.array([-1.2, np.nan, np.inf, 2.8, np.NINF, np.inf],
dtype=np.float32)
expected_output = np.isinf(inp)
node_def = helper.make_node("IsInf", ["X"], ["Y"])
graph_def = helper.make_graph(
[node_def],
name="test_unknown_shape",
inputs=[
helper.make_tensor_value_info("X", TensorProto.FLOAT, [None]),
],
outputs=[helper.make_tensor_value_info("Y", TensorProto.BOOL, [None])])
tf_rep = onnx_graph_to_tensorflow_rep(graph_def)
output = tf_rep.run({"X": inp})
np.testing.assert_equal(output["Y"], expected_output)