当前位置: 首页>>代码示例>>Python>>正文


Python helper.make_model方法代码示例

本文整理汇总了Python中onnx.helper.make_model方法的典型用法代码示例。如果您正苦于以下问题:Python helper.make_model方法的具体用法?Python helper.make_model怎么用?Python helper.make_model使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在onnx.helper的用法示例。


在下文中一共展示了helper.make_model方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_broadcast

# 需要导入模块: from onnx import helper [as 别名]
# 或者: from onnx.helper import make_model [as 别名]
def test_broadcast():
    """Test for broadcasting in onnx operators."""
    input1 = np.random.rand(1, 3, 4, 5).astype("float32")
    input2 = np.random.rand(1, 5).astype("float32")
    inputs = [helper.make_tensor_value_info("input1", TensorProto.FLOAT, shape=(1, 3, 4, 5)),
              helper.make_tensor_value_info("input2", TensorProto.FLOAT, shape=(1, 5))]

    outputs = [helper.make_tensor_value_info("output", TensorProto.FLOAT, shape=(1, 3, 4, 5))]

    nodes = [helper.make_node("Add", ["input1", "input2"], ["output"])]

    graph = helper.make_graph(nodes,
                              "bcast_test",
                              inputs,
                              outputs)

    bcast_model = helper.make_model(graph)
    
    bkd_rep = mxnet_backend.prepare(bcast_model)
    numpy_op = input1 + input2
    output = bkd_rep.run([input1, input2])
    npt.assert_almost_equal(output[0], numpy_op) 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:24,代码来源:onnx_import_test.py

示例2: test_greater

# 需要导入模块: from onnx import helper [as 别名]
# 或者: from onnx.helper import make_model [as 别名]
def test_greater():
    """Test for logical greater in onnx operators."""
    input1 = np.random.rand(1, 3, 4, 5).astype("float32")
    input2 = np.random.rand(1, 5).astype("float32")
    inputs = [helper.make_tensor_value_info("input1", TensorProto.FLOAT, shape=(1, 3, 4, 5)),
              helper.make_tensor_value_info("input2", TensorProto.FLOAT, shape=(1, 5))]

    outputs = [helper.make_tensor_value_info("output", TensorProto.FLOAT, shape=(1, 3, 4, 5))]

    nodes = [helper.make_node("Greater", ["input1", "input2"], ["output"])]

    graph = helper.make_graph(nodes,
                              "greater_test",
                              inputs,
                              outputs)

    greater_model = helper.make_model(graph)
    
    bkd_rep = mxnet_backend.prepare(greater_model)
    numpy_op = np.greater(input1, input2).astype(np.float32)
    output = bkd_rep.run([input1, input2])
    npt.assert_almost_equal(output[0], numpy_op) 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:24,代码来源:onnx_import_test.py

示例3: test_lesser

# 需要导入模块: from onnx import helper [as 别名]
# 或者: from onnx.helper import make_model [as 别名]
def test_lesser():
    """Test for logical greater in onnx operators."""
    input1 = np.random.rand(1, 3, 4, 5).astype("float32")
    input2 = np.random.rand(1, 5).astype("float32")
    inputs = [helper.make_tensor_value_info("input1", TensorProto.FLOAT, shape=(1, 3, 4, 5)),
              helper.make_tensor_value_info("input2", TensorProto.FLOAT, shape=(1, 5))]

    outputs = [helper.make_tensor_value_info("output", TensorProto.FLOAT, shape=(1, 3, 4, 5))]

    nodes = [helper.make_node("Less", ["input1", "input2"], ["output"])]

    graph = helper.make_graph(nodes,
                              "lesser_test",
                              inputs,
                              outputs)

    greater_model = helper.make_model(graph)
    
    bkd_rep = mxnet_backend.prepare(greater_model)
    numpy_op = np.less(input1, input2).astype(np.float32)
    output = bkd_rep.run([input1, input2])
    npt.assert_almost_equal(output[0], numpy_op) 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:24,代码来源:onnx_import_test.py

示例4: test_equal

# 需要导入模块: from onnx import helper [as 别名]
# 或者: from onnx.helper import make_model [as 别名]
def test_equal():
    """Test for logical greater in onnx operators."""
    input1 = np.random.rand(1, 3, 4, 5).astype("float32")
    input2 = np.random.rand(1, 5).astype("float32")
    inputs = [helper.make_tensor_value_info("input1", TensorProto.FLOAT, shape=(1, 3, 4, 5)),
              helper.make_tensor_value_info("input2", TensorProto.FLOAT, shape=(1, 5))]

    outputs = [helper.make_tensor_value_info("output", TensorProto.FLOAT, shape=(1, 3, 4, 5))]

    nodes = [helper.make_node("Equal", ["input1", "input2"], ["output"])]

    graph = helper.make_graph(nodes,
                              "equal_test",
                              inputs,
                              outputs)

    greater_model = helper.make_model(graph)
    
    bkd_rep = mxnet_backend.prepare(greater_model)
    numpy_op = np.equal(input1, input2).astype(np.float32)
    output = bkd_rep.run([input1, input2])
    npt.assert_almost_equal(output[0], numpy_op) 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:24,代码来源:onnx_import_test.py

示例5: test_simple_graph

# 需要导入模块: from onnx import helper [as 别名]
# 或者: from onnx.helper import make_model [as 别名]
def test_simple_graph():
    node1 = make_node('Add', ['A', 'B'], ['X'], name='add_node1')
    node2 = make_node('Add', ['X', 'C'], ['Y'], name='add_node2')
    graph = make_graph([node1, node2], 'test_graph',
                       [make_tensor_value_info('A', onnx.TensorProto.FLOAT, [1]),
                        make_tensor_value_info('B', onnx.TensorProto.FLOAT, [1]),
                        make_tensor_value_info('C', onnx.TensorProto.FLOAT, [1])],
                       [make_tensor_value_info('Y', onnx.TensorProto.FLOAT, [1])])
    model = make_model(graph, producer_name='ngraph ONNXImporter')

    ng_model_function = import_onnx_model(model)

    runtime = get_runtime()
    computation = runtime.computation(ng_model_function)
    assert np.array_equal(computation(1, 2, 3)[0], np.array([6.0], dtype=np.float32))
    assert np.array_equal(computation(4, 5, 6)[0], np.array([15.0], dtype=np.float32)) 
开发者ID:NervanaSystems,项目名称:ngraph-onnx,代码行数:18,代码来源:test_graph_import.py

示例6: test_identity

# 需要导入模块: from onnx import helper [as 别名]
# 或者: from onnx.helper import make_model [as 别名]
def test_identity():
    np.random.seed(133391)
    shape = [2, 4]
    input_data = np.random.randn(*shape).astype(np.float32)

    identity_node = make_node('Identity', inputs=['x'], outputs=['y'])
    ng_results = run_node(identity_node, [input_data])
    assert np.array_equal(ng_results, [input_data])

    node1 = make_node('Add', inputs=['A', 'B'], outputs=['add1'], name='add_node1')
    node2 = make_node('Identity', inputs=['add1'], outputs=['identity1'], name='identity_node1')
    node3 = make_node('Abs', inputs=['identity1'], outputs=['Y'], name='abs_node1')

    graph = make_graph([node1, node2, node3], 'test_graph',
                       [make_tensor_value_info('A', onnx.TensorProto.FLOAT, shape),
                        make_tensor_value_info('B', onnx.TensorProto.FLOAT, shape)],
                       [make_tensor_value_info('Y', onnx.TensorProto.FLOAT, shape)])
    model = make_model(graph, producer_name='ngraph ONNX Importer')
    ng_model_function = import_onnx_model(model)
    runtime = get_runtime()
    computation = runtime.computation(ng_model_function)
    ng_results = computation(input_data, input_data)
    expected_result = np.abs(input_data + input_data)

    assert np.array_equal(ng_results[0], expected_result) 
开发者ID:NervanaSystems,项目名称:ngraph-onnx,代码行数:27,代码来源:test_ops_unary.py

示例7: make_onnx_model_for_gemm_op

# 需要导入模块: from onnx import helper [as 别名]
# 或者: from onnx.helper import make_model [as 别名]
def make_onnx_model_for_gemm_op(input_a, input_b, input_c, **kwargs):
    input_a_for_output = input_a
    input_b_for_output = input_b
    if kwargs.get('transA'):
        input_a_for_output = input_a.T
    if kwargs.get('transB'):
        input_b_for_output = input_b.T

    output_shape = np.dot(input_a_for_output, input_b_for_output).shape
    node = make_node('Gemm', ['A', 'B', 'C'], ['Y'], name='test_node', **kwargs)
    graph = make_graph([node], 'test_graph',
                       [make_tensor_value_info('A', onnx.TensorProto.FLOAT, input_a.shape),
                        make_tensor_value_info('B', onnx.TensorProto.FLOAT, input_b.shape),
                        make_tensor_value_info('C', onnx.TensorProto.FLOAT, input_c.shape)],
                       [make_tensor_value_info('Y', onnx.TensorProto.FLOAT, output_shape)])
    model = make_model(graph, producer_name='ngraph ONNXImporter')
    return model 
开发者ID:NervanaSystems,项目名称:ngraph-onnx,代码行数:19,代码来源:test_ops_matmul.py

示例8: generate_model

# 需要导入模块: from onnx import helper [as 别名]
# 或者: from onnx.helper import make_model [as 别名]
def generate_model(self, inputs, outputs, graph, model) -> 'ModelProto':

        # assign param names
        self.param2name = {id(p): 'param' + n.replace('/', '_')
                           for n, p in model.namedparams()}

        for p, n in self.param2name.items():
            assigned_names.append(n)

        # assign onnx name
        assign_onnx_name(graph)

        graph_ = self.generate_graph(inputs, outputs, graph, None, True)
        onnx_model = oh.make_model(
            graph_, producer_name="elichika", producer_version="0.1")
        return onnx_model 
开发者ID:pfnet-research,项目名称:chainer-compiler,代码行数:18,代码来源:onnx_converters.py

示例9: test_relu_node_inplace

# 需要导入模块: from onnx import helper [as 别名]
# 或者: from onnx.helper import make_model [as 别名]
def test_relu_node_inplace(self):
    X = np.random.randn(3, 2).astype(np.float32)
    Y_ref = np.clip(X, 0, np.inf)

    node_def = helper.make_node("Relu", ["X"], ["X1"])

    graph_def = helper.make_graph(
        [node_def],
        name="test",
        inputs=[helper.make_tensor_value_info("X", TensorProto.FLOAT, [3, 2])],
        outputs=[
            helper.make_tensor_value_info("X1", TensorProto.FLOAT, [3, 2])
        ])
    tf_rep = prepare(helper.make_model(graph_def))
    output = tf_rep.run({"X": X})
    np.testing.assert_almost_equal(output.X1, Y_ref) 
开发者ID:onnx,项目名称:onnx-tensorflow,代码行数:18,代码来源:test_model.py

示例10: test_attribute_wrapper

# 需要导入模块: from onnx import helper [as 别名]
# 或者: from onnx.helper import make_model [as 别名]
def test_attribute_wrapper():
    def attribute_value_test(attribute_value):
        node = make_node('Abs', ['X'], [], name='test_node', test_attribute=attribute_value)
        model = make_model(make_graph([node], 'test_graph', [
            make_tensor_value_info('X', onnx.TensorProto.FLOAT, [1, 2]),
        ], []), producer_name='ngraph')
        wrapped_attribute = ModelWrapper(model).graph.node[0].get_attribute('test_attribute')
        return wrapped_attribute.get_value()

    tensor = make_tensor('test_tensor', onnx.TensorProto.FLOAT, [1], [1])

    assert attribute_value_test(1) == 1
    assert type(attribute_value_test(1)) == np.long
    assert attribute_value_test(1.0) == 1.0
    assert type(attribute_value_test(1.0)) == np.float
    assert attribute_value_test('test') == 'test'
    assert attribute_value_test(tensor)._proto == tensor

    assert attribute_value_test([1, 2, 3]) == [1, 2, 3]
    assert attribute_value_test([1.0, 2.0, 3.0]) == [1.0, 2.0, 3.0]
    assert attribute_value_test(['test1', 'test2']) == ['test1', 'test2']
    assert attribute_value_test([tensor, tensor])[1]._proto == tensor 
开发者ID:NervanaSystems,项目名称:ngraph-python,代码行数:24,代码来源:test_model_wrappers.py

示例11: convert_and_calculate

# 需要导入模块: from onnx import helper [as 别名]
# 或者: from onnx.helper import make_model [as 别名]
def convert_and_calculate(onnx_node, data_inputs, data_outputs):
    # type: (NodeProto, List[np.ndarray], List[np.ndarray]) -> List[np.ndarray]
    """
    Convert ONNX node to ngraph node and perform computation on input data.

    :param onnx_node: ONNX NodeProto describing a computation node
    :param data_inputs: list of numpy ndarrays with input data
    :param data_outputs: list of numpy ndarrays with expected output data
    :return: list of numpy ndarrays with computed output
    """
    transformer = get_transformer()
    input_tensors = [make_tensor_value_info(name, onnx.TensorProto.FLOAT, value.shape)
                     for name, value in zip(onnx_node.input, data_inputs)]
    output_tensors = [make_tensor_value_info(name, onnx.TensorProto.FLOAT, value.shape)
                      for name, value in zip(onnx_node.output, data_outputs)]

    graph = make_graph([onnx_node], 'test_graph', input_tensors, output_tensors)
    model = make_model(graph, producer_name='ngraph ONNXImporter')

    ng_results = []
    for ng_model in import_onnx_model(model):
        computation = transformer.computation(ng_model['output'], *ng_model['inputs'])
        ng_results.append(computation(*data_inputs))

    return ng_results 
开发者ID:NervanaSystems,项目名称:ngraph-python,代码行数:27,代码来源:utils.py

示例12: _test_power_iteration

# 需要导入模块: from onnx import helper [as 别名]
# 或者: from onnx.helper import make_model [as 别名]
def _test_power_iteration(x_shape, y_shape):
    if isinstance(y_shape, int):
        y_shape = [y_shape]

    x = np.random.uniform(size=x_shape).astype(np.float32)
    y = np.random.uniform(size=y_shape).astype(np.float32)

    np_res = np.power(x, y).astype(np.float32)

    res = helper.make_node("Pow", ['x', 'y'], ['out'])

    graph = helper.make_graph([res],
                              'power_test',
                              inputs = [helper.make_tensor_value_info("x",
                                            TensorProto.FLOAT, list(x_shape)),
                                        helper.make_tensor_value_info("y",
                                            TensorProto.FLOAT, list(y_shape))],
                              outputs = [helper.make_tensor_value_info("out",
                                            TensorProto.FLOAT, list(np_res.shape))])

    model = helper.make_model(graph, producer_name='power_test')

    for target, ctx in ctx_list():
        tvm_out = get_tvm_output(model, [x, y], target, ctx, np_res.shape)
        np.testing.assert_allclose(np_res, tvm_out, rtol=1e-5, atol=1e-5) 
开发者ID:mlperf,项目名称:training_results_v0.6,代码行数:27,代码来源:test_forward.py

示例13: test_squeeze

# 需要导入模块: from onnx import helper [as 别名]
# 或者: from onnx.helper import make_model [as 别名]
def test_squeeze():
    in_shape = (1, 3, 1, 3, 1, 1)
    out_shape = (3, 3)
    y = helper.make_node("Squeeze", ['in'], ['out'], axes=[0, 2, 4, 5])

    graph = helper.make_graph([y],
                              'squeeze_test',
                              inputs = [helper.make_tensor_value_info("in",
                                            TensorProto.FLOAT, list(in_shape))],
                              outputs = [helper.make_tensor_value_info("out",
                                            TensorProto.FLOAT, list(out_shape))])

    model = helper.make_model(graph, producer_name='squeeze_test')

    for target, ctx in ctx_list():
        x = np.random.uniform(size=in_shape).astype('float32')
        tvm_out = get_tvm_output(model, x, target, ctx, out_shape, 'float32')

    np.testing.assert_allclose(out_shape, tvm_out.shape) 
开发者ID:mlperf,项目名称:training_results_v0.6,代码行数:21,代码来源:test_forward.py

示例14: test_unsqueeze

# 需要导入模块: from onnx import helper [as 别名]
# 或者: from onnx.helper import make_model [as 别名]
def test_unsqueeze():
    in_shape = (3, 3)
    axis = (0, 3, 4)
    out_shape = (1, 3, 3, 1, 1)
    y = helper.make_node("Unsqueeze", ['in'], ['out'], axes=list(axis))

    graph = helper.make_graph([y],
                              'squeeze_test',
                              inputs = [helper.make_tensor_value_info("in",
                                            TensorProto.FLOAT, list(in_shape))],
                              outputs = [helper.make_tensor_value_info("out",
                                            TensorProto.FLOAT, list(out_shape))])

    model = helper.make_model(graph, producer_name='squeeze_test')

    for target, ctx in ctx_list():
        x = np.random.uniform(size=in_shape).astype('float32')
        tvm_out = get_tvm_output(model, x, target, ctx, out_shape, 'float32')

    np.testing.assert_allclose(out_shape, tvm_out.shape) 
开发者ID:mlperf,项目名称:training_results_v0.6,代码行数:22,代码来源:test_forward.py

示例15: _test_slice_iteration

# 需要导入模块: from onnx import helper [as 别名]
# 或者: from onnx.helper import make_model [as 别名]
def _test_slice_iteration(indata, outdata, starts, ends, axes=None):
    if axes:
        y = helper.make_node("Slice", ['in'], ['out'], axes=axes, starts=starts, ends=ends)
    else:
        y = helper.make_node("Slice", ['in'], ['out'], starts=starts, ends=ends)

    graph = helper.make_graph([y],
                              'slice_test',
                              inputs = [helper.make_tensor_value_info("in",
                                            TensorProto.FLOAT, list(indata.shape))],
                              outputs = [helper.make_tensor_value_info("out",
                                            TensorProto.FLOAT, list(outdata.shape))])

    model = helper.make_model(graph, producer_name='slice_test')

    for target, ctx in ctx_list():
        tvm_out = get_tvm_output(model, indata, target, ctx, outdata.shape, 'float32')

    np.testing.assert_allclose(outdata, tvm_out) 
开发者ID:mlperf,项目名称:training_results_v0.6,代码行数:21,代码来源:test_forward.py


注:本文中的onnx.helper.make_model方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。