当前位置: 首页>>代码示例>>Python>>正文


Python relay.Function方法代码示例

本文整理汇总了Python中tvm.relay.Function方法的典型用法代码示例。如果您正苦于以下问题:Python relay.Function方法的具体用法?Python relay.Function怎么用?Python relay.Function使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tvm.relay的用法示例。


在下文中一共展示了relay.Function方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_LeNet

# 需要导入模块: from tvm import relay [as 别名]
# 或者: from tvm.relay import Function [as 别名]
def get_LeNet(batch_size=batch_size, img_shape=(1, 28, 28), dtype="float32"):
    data_shape = (batch_size,) + img_shape
    data = relay.var("data", shape=data_shape, dtype=dtype)
    conv1_bias = relay.var("conv1_bias")
    conv1 = layers.conv2d(data, kernel_size=(3, 3), strides=(1, 1), padding=(1, 1), channels=6, name="conv1")
    conv1 = relay.nn.bias_add(conv1, conv1_bias)
    maxpool1 = relay.nn.max_pool2d(conv1, (2, 2), (2, 2))
    conv2_bias = relay.var("conv2_bias")
    conv2 = layers.conv2d(maxpool1, kernel_size=(5, 5), strides=(1, 1), padding=(0, 0), channels=16, name="conv2")
    conv2 = relay.nn.bias_add(conv2, conv2_bias)
    maxpool2 = relay.nn.max_pool2d(conv2, (2, 2), (2, 2))
    bf1 = relay.nn.batch_flatten(maxpool2)
    dense1 = layers.dense_without_bias(bf1, units=120, name="dense1")
    dense2 = layers.dense_without_bias(dense1, units=84, name="dense2")
    dense3 = layers.dense_without_bias(dense2, units=10, name="dense3")
    softmax = relay.nn.softmax(dense3)
    #label is from input
    label = relay.var("data2", shape=(batch_size, 10), dtype=dtype)
    loss = relay.nn.cross_entropy(softmax, label)
    args = relay.analysis.free_vars(loss)
    return relay.Function(args, loss) 
开发者ID:KnowingNothing,项目名称:FlexTensor,代码行数:23,代码来源:relay-lenet.py

示例2: test_recursion

# 需要导入模块: from tvm import relay [as 别名]
# 或者: from tvm.relay import Function [as 别名]
def test_recursion():
    """
    Program:
       let f(n: i32, data: f32) -> f32 = {
          if (n == 0) {
              return data;
          } else {
              return f(n - 1, log(data));
          }
       }
       f(2, 10000);
    """
    f = relay.Var("f")
    n = relay.Var("n")
    np = relay.Param(n, e.int32)
    data = relay.Var("data")
    datap = relay.Param(data, e.float32)
    funcbody = relay.If(equal(n, convert(0)), data, f(subtract(n, convert(1.0)), log(data)))
    value = relay.Function([np, datap], e.float32, funcbody, [])
    orig = relay.Let(f, funcbody, f(convert(2.0), convert(10000.0)), e.float32)
    assert alpha_equal(dead_code_elimination(orig), orig)
    assert alpha_equal(dead_code_elimination(relay.Let(f, funcbody, e.three, e.float32)), e.three) 
开发者ID:mlperf,项目名称:training_results_v0.6,代码行数:24,代码来源:test_pass_dead_code_elimination.py

示例3: example

# 需要导入模块: from tvm import relay [as 别名]
# 或者: from tvm.relay import Function [as 别名]
def example():
    shape = (1, 64, 54, 54)
    c_data = np.empty(shape).astype("float32")
    c = relay.const(c_data)
    weight = relay.var('weight', shape=(64, 64, 3, 3))
    x = relay.var("x", relay.TensorType((1, 64, 56, 56), "float32"))
    conv = relay.nn.conv2d(x, weight)
    y = relay.add(c, c)
    y = relay.multiply(y, relay.const(2, "float32"))
    y = relay.add(conv, y)
    z = relay.add(y, c)
    z1 = relay.add(y, c)
    z2 = relay.add(z, z1)
    return relay.Function([x], z2)

###############################################################################
# Let us register layout alteration for a conv2d op so that we can apply the
# layout alteration pass on the example. How alter layout pass works is out
# the scope of this tutorial. 
开发者ID:apache,项目名称:incubator-tvm,代码行数:21,代码来源:relay_pass_infra.py

示例4: to_cps

# 需要导入模块: from tvm import relay [as 别名]
# 或者: from tvm.relay import Function [as 别名]
def to_cps(func, mod=None):
    """
    Turn expression into CPS expression.

    Every intermediate compute will be passed to a continuation.

    Parameters
    ----------
    func: tvm.relay.Function
        The input function.

    mod: Optional[tvm.IRModule]
        The global module.

    Returns
    -------
    result: tvm.relay.Function
      The output function.
    """
    use_mod = mod if mod is not None else tvm.ir.IRModule()
    return _ffi_api.to_cps(func, use_mod) 
开发者ID:apache,项目名称:incubator-tvm,代码行数:23,代码来源:transform.py

示例5: un_cps

# 需要导入模块: from tvm import relay [as 别名]
# 或者: from tvm.relay import Function [as 别名]
def un_cps(func):
    """
    Turn an cps function into a Function without the continuation argument.

    Note that this will not give the exact same interface as before cps:
      If the input/output is higher order, they will still be in cps form.

    Parameters
    ----------
    func: tvm.relay.Function
        The input function

    Returns
    -------
    result: tvm.relay.Function
        The output function
    """
    return _ffi_api.un_cps(func) 
开发者ID:apache,项目名称:incubator-tvm,代码行数:20,代码来源:transform.py

示例6: test_graph_runtime

# 需要导入模块: from tvm import relay [as 别名]
# 或者: from tvm.relay import Function [as 别名]
def test_graph_runtime():
    """Test a program which uses the graph runtime."""
    if not tvm.runtime.enabled("micro_dev"):
        return
    shape = (1024,)
    dtype = "float32"

    # Construct Relay program.
    x = relay.var("x", relay.TensorType(shape=shape, dtype=dtype))
    xx = relay.multiply(x, x)
    z = relay.add(xx, relay.const(1.0))
    func = relay.Function([x], z)

    with micro.Session(DEV_CONFIG_A):
        mod = relay_micro_build(func, DEV_CONFIG_A)

        x_in = np.random.uniform(size=shape[0]).astype(dtype)
        mod.run(x=x_in)
        result = mod.get_output(0).asnumpy()

        tvm.testing.assert_allclose(
                mod.get_input(0).asnumpy(), x_in)
        tvm.testing.assert_allclose(
                result, x_in * x_in + 1.0) 
开发者ID:apache,项目名称:incubator-tvm,代码行数:26,代码来源:test_runtime_micro_on_arm.py

示例7: test_nested_sessions

# 需要导入模块: from tvm import relay [as 别名]
# 或者: from tvm.relay import Function [as 别名]
def test_nested_sessions():
    """Test entering and exiting nested session contexts."""
    if not tvm.runtime.enabled("micro_dev"):
        return
    shape = (1024,)
    dtype = "float32"

    # Construct Relay add program.
    x = relay.var("x", relay.TensorType(shape=shape, dtype=dtype))
    ret = relay.add(x, relay.const(1.0))
    add_const_func = relay.Function([x], ret)

    sess_a = micro.Session(DEV_CONFIG_A)
    sess_b = micro.Session(DEV_CONFIG_B)
    with sess_a:
        np_tensor_a = np.random.uniform(size=shape).astype(dtype)
        micro_tensor_a = tvm.nd.array(np_tensor_a, tvm.micro_dev(0))
        with sess_b:
            np_tensor_b = np.random.uniform(size=shape).astype(dtype)
            micro_tensor_b = tvm.nd.array(np_tensor_b, tvm.micro_dev(0))
        add_const_mod = relay_micro_build(add_const_func, DEV_CONFIG_A)
        add_const_mod.run(x=micro_tensor_a)
        add_result = add_const_mod.get_output(0).asnumpy()
        tvm.testing.assert_allclose(
                add_result, np_tensor_a + 1.0) 
开发者ID:apache,项目名称:incubator-tvm,代码行数:27,代码来源:test_runtime_micro_on_arm.py

示例8: test_has_multiple_inputs

# 需要导入模块: from tvm import relay [as 别名]
# 或者: from tvm.relay import Function [as 别名]
def test_has_multiple_inputs():
    data = relay.var("data")
    out1 = data * relay.expr.const(3.0)
    w0 = relay.var("w0")
    out2 = relay.nn.conv2d(data, w0)
    out = relay.add(out1, out2)
    net = relay.Function(relay.analysis.free_vars(out), out)
    net = bind_inputs(net, {"data": (1, 16, 224, 224), "w0": (16, 16, 1, 1)})
    target_ops = [relay.op.get("nn.conv2d")]
    node_list = []
    node_dict = {}
    expr2graph(net, target_ops, node_dict, node_list)
    input_names = ["data"]
    verify_has_multiple_inputs(node_list, 2, input_names, False)
    verify_has_multiple_inputs(node_list, 4, input_names, False)
    verify_has_multiple_inputs(node_list, 5, input_names, True) 
开发者ID:apache,项目名称:incubator-tvm,代码行数:18,代码来源:test_autotvm_graph_tuner_utils.py

示例9: test_get_direct_ancestor

# 需要导入模块: from tvm import relay [as 别名]
# 或者: from tvm.relay import Function [as 别名]
def test_get_direct_ancestor():
    data = relay.var("data")
    w0 = relay.var("w0")
    out1 = relay.nn.conv2d(data, w0)
    out2 = relay.add(out1, data * relay.expr.const(5.0))
    out3 = out2 + relay.expr.const(2.5)
    w1 = relay.var("w1")
    out = relay.nn.conv2d(out3, w1)
    net = relay.Function(relay.analysis.free_vars(out), out)
    net = bind_inputs(net, {"data": (1, 16, 224, 224), "w0": (16, 16, 1, 1), "w1": (16, 16, 1, 1)})
    target_ops = [relay.op.get("nn.conv2d")]
    node_list = []
    node_dict = {}
    expr2graph(net, target_ops, node_dict, node_list)
    visited_dict = {}
    input_names = ["data"]
    out = get_direct_ancestor(node_list, visited_dict, target_ops, 5, input_names)
    assert out == [0], "Output mismatch: expecting [0] but got %s." % str(out) 
开发者ID:apache,项目名称:incubator-tvm,代码行数:20,代码来源:test_autotvm_graph_tuner_utils.py

示例10: test_get_in_nodes

# 需要导入模块: from tvm import relay [as 别名]
# 或者: from tvm.relay import Function [as 别名]
def test_get_in_nodes():
    data = relay.var("data")
    w0 = relay.var("w0")
    out1 = relay.nn.conv2d(data, w0)
    out2 = relay.add(out1, data)
    out3 = out2 + relay.expr.const(2.5)
    w1 = relay.var("w1")
    out = relay.nn.conv2d(out3, w1)
    net = relay.Function(relay.analysis.free_vars(out), out)
    net = bind_inputs(net, {"data": (1, 16, 224, 224), "w0": (16, 16, 1, 1), "w1": (16, 16, 1, 1)})
    target_ops = [relay.op.get("nn.conv2d")]
    input_names = ["data"]
    node_list = []
    node_dict = {}
    expr2graph(net, target_ops, node_dict, node_list)
    out = get_in_nodes(node_list, target_ops, input_names)
    expected_out = {3: [0], 4: [3, 0], 7: [4]}
    diff_set = set(out) ^ set(expected_out)
    if len(diff_set) != 0:
        raise RuntimeError("Output mismatch: expecting %s but got %s." % (str(expected_out), str(out))) 
开发者ID:apache,项目名称:incubator-tvm,代码行数:22,代码来源:test_autotvm_graph_tuner_utils.py

示例11: test_tuple_object

# 需要导入模块: from tvm import relay [as 别名]
# 或者: from tvm.relay import Function [as 别名]
def test_tuple_object():
    x = relay.var(
        'x',
        type_annotation=relay.ty.TupleType([
            relay.ty.TensorType((), 'int32'),
            relay.ty.TensorType((), 'int32')
        ]))

    fn = relay.Function([x], relay.expr.TupleGetItem(x, 0))
    mod = tvm.IRModule.from_expr(fn)

    exe = relay.create_executor(
        kind="vm", mod=mod, ctx=nd.cpu(), target="llvm")
    f = exe.evaluate()
    value_tuple = _container.tuple_object(
        [nd.array(np.array(11)),
         nd.array(np.array(12))])
    # pass an ADT object to evaluate
    out = f(value_tuple)
    tvm.testing.assert_allclose(out.asnumpy(), np.array(11)) 
开发者ID:apache,项目名称:incubator-tvm,代码行数:22,代码来源:test_runtime_container.py

示例12: test_multi_outputs

# 需要导入模块: from tvm import relay [as 别名]
# 或者: from tvm.relay import Function [as 别名]
def test_multi_outputs():
    xshape = (10, 27)
    yshape = (10, 9)

    def mx_compose(F, **kwargs):
        x = F.sym.Variable("x")
        y = F.sym.Variable("y")
        z = F.sym.split(x, **kwargs)
        return F.sym.broadcast_sub(F.sym.broadcast_add(z[0], z[2]), y)

    def relay_compose(F, **kwargs):
        x = F.var("x", shape=xshape)
        y = F.var("y", shape=yshape)
        z = F.split(x, **kwargs)
        z = F.subtract(F.add(z[0], z[2]), y)
        func = relay.Function(relay.analysis.free_vars(z), z)
        return tvm.IRModule.from_expr(func)

    mx_sym = mx_compose(mx, num_outputs=3, axis=1)
    mod, _ = relay.frontend.from_mxnet(
        mx_sym, shape={"x":xshape, "y":yshape})
    relay_mod = relay_compose(relay, indices_or_sections=3, axis=1)
    compare_graph(mod, relay_mod) 
开发者ID:apache,项目名称:incubator-tvm,代码行数:25,代码来源:test_graph.py

示例13: test_conv3d_transpose_ncdhw_run

# 需要导入模块: from tvm import relay [as 别名]
# 或者: from tvm.relay import Function [as 别名]
def test_conv3d_transpose_ncdhw_run():
    dshape = (1, 3, 24, 24, 24)
    kshape = (3, 4, 2, 2, 2)

    x = relay.var("x", shape=dshape)
    w = relay.var("w")
    y = relay.nn.conv3d_transpose(x, w,
                                  channels=4, kernel_size=(2, 2, 2), strides=(1, 1, 1),
                                  padding=(1, 1, 1))
    func = relay.Function([x, w], y)
    dtype = "float32"

    data = np.random.uniform(size=dshape).astype(dtype)
    kernel = np.random.uniform(size=kshape).astype(dtype)

    ref_res = topi.testing.conv3d_transpose_ncdhw_python(data, kernel, 1, 1)

    for target, ctx in ctx_list():
        intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
        op_res1 = intrp1.evaluate(func)(data, kernel)
        tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) 
开发者ID:apache,项目名称:incubator-tvm,代码行数:23,代码来源:test_op_level2.py

示例14: test_conv2d_transpose_nchw_run

# 需要导入模块: from tvm import relay [as 别名]
# 或者: from tvm.relay import Function [as 别名]
def test_conv2d_transpose_nchw_run():
    dshape = (1, 3, 18, 18)
    kshape = (3, 10, 3, 3)
    oshape = (1, 10, 36, 36)
    x = relay.var("x", shape=dshape)
    w = relay.var("w")
    y = relay.nn.conv2d_transpose(x, w,
                                  channels=10, kernel_size=(3,3), strides=(2,2),
                                  padding=(1,1), output_padding=(1, 1))
    func = relay.Function([x, w], y)
    dtype = "float32"
    data = np.random.uniform(size=dshape).astype(dtype)
    kernel = np.random.uniform(size=kshape).astype(dtype)
    ref_res = topi.testing.conv2d_transpose_nchw_python(
        data, kernel, 2, 1, (1, 1))

    for target, ctx in ctx_list():
        intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
        op_res1 = intrp1.evaluate(func)(data, kernel)
        tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) 
开发者ID:apache,项目名称:incubator-tvm,代码行数:22,代码来源:test_op_level2.py

示例15: test_conv2d_transpose_nhwc_run

# 需要导入模块: from tvm import relay [as 别名]
# 或者: from tvm.relay import Function [as 别名]
def test_conv2d_transpose_nhwc_run():
    dshape_nhwc = (1, 18, 18, 3)
    kshape_hwoi = (3, 3, 10, 3)
    oshape_nhwc = (1, 36, 36, 10)
    x = relay.var("x", shape=dshape_nhwc)
    w = relay.var("w")
    # kshape and kernel_layout should have swapped IO.
    # kshape is HWOI and kernel_layout is HWIO
    y = relay.nn.conv2d_transpose(x, w,
                                  channels=10, kernel_size=(3, 3), strides=(2, 2),
                                  padding=(1, 1), output_padding=(1, 1),
                                  data_layout="NHWC", kernel_layout="HWIO")
    func = relay.Function([x, w], y)
    dtype = "float32"
    data = np.random.uniform(size=dshape_nhwc).astype(dtype)
    kernel = np.random.uniform(size=kshape_hwoi).astype(dtype)
    # use true kshape layout here - HWOI

    ref_res = topi.testing.conv2d_transpose_nhwc_python(data, kernel, 'HWOI',
                                                        2, 1, output_padding=(1, 1))

    for target, ctx in ctx_list():
        intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
        op_res1 = intrp1.evaluate(func)(data, kernel)
        tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) 
开发者ID:apache,项目名称:incubator-tvm,代码行数:27,代码来源:test_op_level2.py


注:本文中的tvm.relay.Function方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。