當前位置: 首頁>>代碼示例>>Python>>正文


Python tvm.cpu方法代碼示例

本文整理匯總了Python中tvm.cpu方法的典型用法代碼示例。如果您正苦於以下問題:Python tvm.cpu方法的具體用法?Python tvm.cpu怎麽用?Python tvm.cpu使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tvm的用法示例。


在下文中一共展示了tvm.cpu方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: tvm_unpool1d_cpu

# 需要導入模塊: import tvm [as 別名]
# 或者: from tvm import cpu [as 別名]
def tvm_unpool1d_cpu(B, C, L, kernel_size, stride, padding, number=10, dev=0):
    Input = torch.rand([B, C, L], dtype=torch.float32).cuda("cuda:" + str(dev))
    maxpool = torch.nn.MaxPool1d(kernel_size, stride=stride, padding=padding, return_indices=True).cuda("cuda:" + str(dev))
    Input, indices = maxpool(Input)
    Input = Input.cpu()
    indices = indices.cpu()

    s, bufs = maxunpooling1d(B, C, Input.shape[2],  kernel_size, stride, padding)
    s =  tvm.te.create_schedule(s)
    ctx = tvm.cpu(dev)
    f = tvm.build(s, bufs, 'llvm')

    im = tvm.nd.array(Input.numpy().astype(np.float32), ctx)
    fi = tvm.nd.array(indices.numpy().astype(np.float32), ctx)

    in_length = Input.shape[2]
    out_length = (in_length - 1) * stride - 2 * padding + kernel_size
    output_shape = (B, C, out_length)
    un = tvm.nd.array(np.zeros(output_shape).astype(np.float32), ctx)

    start_time = time.time()
    for i in range(number):
        f(im, fi, un)
    end_time = time.time()
    return (end_time - start_time) * 1e3 / number 
開發者ID:KnowingNothing,項目名稱:FlexTensor,代碼行數:27,代碼來源:unpooling1d_baseline.py

示例2: tvm_unpool1d_cuda

# 需要導入模塊: import tvm [as 別名]
# 或者: from tvm import cpu [as 別名]
def tvm_unpool1d_cuda(B, C, L, kernel_size, stride, padding, number=10, dev=0):
    Input = torch.rand([B, C, L], dtype=torch.float32).cuda("cuda:" + str(dev))
    maxpool = torch.nn.MaxPool1d(kernel_size, stride=stride, padding=padding, return_indices=True).cuda("cuda:" + str(dev))
    Input, indices = maxpool(Input)
    Input = Input.cpu()
    indices = indices.cpu()

    s, bufs = maxunpooling1d(B, C, Input.shape[2],  kernel_size, stride, padding)
    s =  tvm.te.create_schedule(s)
    f = tvm.build(s, bufs, "cuda")
    ctx = tvm.context("cuda", dev_id=dev)

    im = tvm.nd.array(Input.numpy().astype(np.float32), ctx)
    fi = tvm.nd.array(indices.numpy().astype(np.float32), ctx)

    in_length = Input.shape[2]
    out_length = (in_length - 1) * stride - 2 * padding + kernel_size
    output_shape = (B, C, out_length)
    un = tvm.nd.array(np.zeros(output_shape).astype(np.float32), ctx)

    start_time = time.time()
    for i in range(number):
        f(im, fi, un)
    end_time = time.time()
    return (end_time - start_time) * 1e3 / number 
開發者ID:KnowingNothing,項目名稱:FlexTensor,代碼行數:27,代碼來源:unpooling1d_baseline.py

示例3: tvm_PixelCNN_cpu

# 需要導入模塊: import tvm [as 別名]
# 或者: from tvm import cpu [as 別名]
def tvm_PixelCNN_cpu(B, H, W, C, out_C, kernel_height, kernel_width, mask_type, bias, dilation, stride, padding, number=10, dev=0):
    Input = torch.rand([B, H, W, C], dtype=torch.float32)
    Kernel = torch.zeros([out_C, C, kernel_height, kernel_width], dtype=torch.float32)

    s, bufs = pixelcnn(B, H, W, C, out_C, kernel_height, kernel_width, mask_type, bias, dilation=dilation, stride=stride, padding=padding)
    ctx = tvm.cpu(dev_id=dev)
    s =  tvm.te.create_schedule(s)
    f = tvm.build(s, bufs, "llvm")

    im = tvm.nd.array(Input.numpy().astype(np.float32), ctx)
    fi = tvm.nd.array(Kernel.numpy().astype(np.float32), ctx)

    in_height = H
    in_width = W
    out_height = (H + 2 * padding - dilation * (kernel_height - 1) - 1) // stride + 1
    out_width = (W + 2 * padding - dilation * (kernel_width - 1) - 1) // stride + 1
    output_shape = (B, out_height, out_width, out_C)
    un = tvm.nd.array(np.zeros(output_shape).astype(np.float32), ctx)

    start_time = time.time()
    for i in range(number):
        f(im, fi, un)
    end_time = time.time()
    return (end_time - start_time) * 1e3 / number 
開發者ID:KnowingNothing,項目名稱:FlexTensor,代碼行數:26,代碼來源:pixelCNN_baseline.py

示例4: _get_tvm_output

# 需要導入模塊: import tvm [as 別名]
# 或者: from tvm import cpu [as 別名]
def _get_tvm_output(net, data):
    '''Compute TVM output'''
    dtype = 'float32'
    sym, params = frontend.darknet.from_darknet(net, dtype)

    target = 'llvm'
    shape_dict = {'data': data.shape}
    graph, library, params = nnvm.compiler.build(sym, target, shape_dict, dtype, params=params)
    # Execute on TVM
    ctx = tvm.cpu(0)
    m = graph_runtime.create(graph, library, ctx)
    # set inputs
    m.set_input('data', tvm.nd.array(data.astype(dtype)))
    m.set_input(**params)
    m.run()
    # get outputs
    out_shape = (net.outputs,)
    tvm_out = m.get_output(0, tvm.nd.empty(out_shape, dtype)).asnumpy()
    return tvm_out 
開發者ID:mlperf,項目名稱:training_results_v0.6,代碼行數:21,代碼來源:test_forward.py

示例5: test_nhwc

# 需要導入模塊: import tvm [as 別名]
# 或者: from tvm import cpu [as 別名]
def test_nhwc():
    data_shape = (1, 3, 224, 224)
    out_channel = 8
    nchw_sym = get_sym("NCHW", "OIHW", out_channel)
    nhwc_sym = get_sym("NHWC", "HWIO", out_channel)
    conv_weight = np.random.uniform(-1, 1, (out_channel, 3, 3, 3)).astype(np.float32)
    conv_bias = np.random.uniform(-1, 1, (out_channel)).astype(np.float32)
    nchw_params = {
        "conv2d0_weight" : tvm.nd.array(conv_weight, ctx=tvm.cpu(0)),
        "conv2d0_bias" : tvm.nd.array(conv_bias, ctx=tvm.cpu(0))
    }
    nhwc_params = {
        "conv2d1_weight" : tvm.nd.array(conv_weight.transpose(2, 3, 1, 0), ctx=tvm.cpu(0)),
        "conv2d1_bias" : tvm.nd.array(conv_bias, ctx=tvm.cpu(0))
    }

    data = np.random.uniform(-1, 1, data_shape).astype(np.float32)
    oshape = (1, out_channel, 224, 224)
    oshape_nhwc = (1, 224, 224, out_channel)
    nchw_output = build_and_run(nchw_sym, nchw_params, data, oshape)
    nhwc_output = build_and_run(nhwc_sym, nhwc_params, data.transpose(0, 2, 3, 1), oshape_nhwc)
    np.testing.assert_allclose(nchw_output, nhwc_output.transpose(0, 3, 1, 2), rtol=1e-5, atol=1e-5) 
開發者ID:mlperf,項目名稱:training_results_v0.6,代碼行數:24,代碼來源:test_nhwc_layout.py

示例6: test_precompute_prune

# 需要導入模塊: import tvm [as 別名]
# 或者: from tvm import cpu [as 別名]
def test_precompute_prune():
    x = sym.Variable("x") + 1
    a = sym.Variable("a")
    y = sym.Variable("y")
    z = y + x + a
    shape = (10, 10)
    dtype = tvm.float32
    nx = tvm.nd.array(np.random.uniform(size=shape).astype(dtype))
    na = tvm.nd.array(np.random.uniform(size=shape).astype(dtype))
    ny = tvm.nd.array(np.random.uniform(size=shape).astype(dtype))
    params = {"x": nx, "a": na}
    graph, lib, params = nnvm.compiler.build(
        z, "llvm", shape={"y": ny.shape}, params=params)
    assert graph.index.num_nodes == 4
    m = graph_runtime.create(graph, lib, tvm.cpu(0))
    params["y"] = ny
    res = tvm.nd.empty(shape)
    m["load_params"](nnvm.compiler.save_param_dict(params))
    m.run()
    out = m.get_output(0, out=res)
    np.testing.assert_allclose(
        res.asnumpy(), nx.asnumpy() + 1 + ny.asnumpy() + na.asnumpy()) 
開發者ID:mlperf,項目名稱:training_results_v0.6,代碼行數:24,代碼來源:test_build.py

示例7: test_ndarray_output

# 需要導入模塊: import tvm [as 別名]
# 或者: from tvm import cpu [as 別名]
def test_ndarray_output():
    x = sym.Variable("x")
    y = sym.Variable("y")
    z = x + y
    shape = (10, 10)
    dtype = tvm.float32
    nx = tvm.nd.array(np.random.uniform(size=shape).astype(dtype))
    ny = tvm.nd.array(np.random.uniform(size=shape).astype(dtype))
    params = {"x": nx, "ny": ny}
    graph, lib, params = nnvm.compiler.build(
        z, "llvm", shape={"y": ny.shape, "x": nx.shape}, params=params)
    m = graph_runtime.create(graph, lib, tvm.cpu(0))
    m.set_input("x", nx)
    m.set_input("y", ny)
    m.run()
    out = m.get_output(0)
    np.testing.assert_allclose(
        out.asnumpy(), nx.asnumpy() + ny.asnumpy()) 
開發者ID:mlperf,項目名稱:training_results_v0.6,代碼行數:20,代碼來源:test_build.py

示例8: test_ndarray_input

# 需要導入模塊: import tvm [as 別名]
# 或者: from tvm import cpu [as 別名]
def test_ndarray_input():
    x = sym.Variable("x")
    y = sym.Variable("y")
    z = x + y
    shape = (10, 10)
    dtype = tvm.float32
    nx = tvm.nd.array(np.random.uniform(size=shape).astype(dtype))
    ny = tvm.nd.array(np.random.uniform(size=shape).astype(dtype))
    params = {"x": nx, "ny": ny}
    graph, lib, params = nnvm.compiler.build(
        z, "llvm", shape={"y": ny.shape, "x": nx.shape}, params=params)
    m = graph_runtime.create(graph, lib, tvm.cpu(0))
    m.set_input("x", nx)
    m.set_input("y", ny)
    in_x = tvm.nd.empty(shape, dtype)
    in_y = tvm.nd.empty(shape, dtype)
    m.get_input("x", in_x)
    m.get_input("y", in_y)
    np.testing.assert_allclose(nx.asnumpy(), in_x.asnumpy())
    np.testing.assert_allclose(ny.asnumpy(), in_y.asnumpy())
    in_nx = m.get_input("x")
    in_ny = m.get_input("y")
    np.testing.assert_allclose(nx.asnumpy(), in_nx.asnumpy())
    np.testing.assert_allclose(ny.asnumpy(), in_ny.asnumpy()) 
開發者ID:mlperf,項目名稱:training_results_v0.6,代碼行數:26,代碼來源:test_build.py

示例9: _convert_to_value

# 需要導入模塊: import tvm [as 別名]
# 或者: from tvm import cpu [as 別名]
def _convert_to_value(arg, ctxt=tvm.cpu(0)):
    # type: (Any, tvm.Context) -> tvm.nd.NDArray
    """Convert Python values into the appropriate types
       for the Relay evaluator.
    """
    if isinstance(arg, bool): # bool is subclass of int
        return tvm.nd.array(np.array(arg, dtype='uint8'), ctxt)
    elif isinstance(arg, int):
        return tvm.nd.array(np.array(arg, dtype='int32'), ctxt)
    elif isinstance(arg, float):
        return tvm.nd.array(arg, ctxt)
    elif isinstance(arg, np.ndarray):
        return tvm.nd.array(arg, ctxt)
    elif isinstance(arg, tvm.ndarray.NDArray):
        return arg
    else:
        # raise Exception(f"can't convert {type(arg)} to a Relay AST")
        raise Exception("unsupported argument type {0}".format(type(arg))) 
開發者ID:mlperf,項目名稱:training_results_v0.6,代碼行數:20,代碼來源:ir_builder.py

示例10: test_combination

# 需要導入模塊: import tvm [as 別名]
# 或者: from tvm import cpu [as 別名]
def test_combination():
    k = 3
    n = 5
    m = 10
    x = tvm.var('x')
    A = tvm.placeholder((n, m), name='A')
    B = tvm.placeholder((n, m), name='B')
    C = tvm.placeholder((n, m), name='C')
    D = k + A - B * C / x
    s = tvm.create_schedule(D.op)
    foo = tvm.build(s, [x, A, B, C, D], "llvm")
    ctx = tvm.cpu(0)
    x = 2
    a = tvm.nd.array(np.random.uniform(size=(n, m)).astype(A.dtype), ctx)
    b = tvm.nd.array(np.random.uniform(size=(n, m)).astype(B.dtype), ctx)
    c = tvm.nd.array(np.random.uniform(size=(n, m)).astype(C.dtype), ctx)
    d = tvm.nd.array(np.zeros((n, m), dtype=D.dtype), ctx)
    foo(x, a, b, c, d)
    np.testing.assert_allclose(d.asnumpy(), k + a.asnumpy() - b.asnumpy() * c.asnumpy() / x) 
開發者ID:mlperf,項目名稱:training_results_v0.6,代碼行數:21,代碼來源:test_lang_tensor_overload_op.py

示例11: test_llvm_flip_pipeline

# 需要導入模塊: import tvm [as 別名]
# 或者: from tvm import cpu [as 別名]
def test_llvm_flip_pipeline():
    def check_llvm(nn, base):
        if not tvm.module.enabled("llvm"):
            return
        n = tvm.convert(nn)
        A = tvm.placeholder((n + base), name='A')
        C = tvm.compute((n,), lambda i: A(nn + base- i - 1), name='C')
        s = tvm.create_schedule(C.op)
        xo, xi = s[C].split(C.op.axis[0], factor=4)
        s[C].parallel(xo)
        s[C].vectorize(xi)
        # build and invoke the kernel.
        f = tvm.build(s, [A, C], "llvm")
        ctx = tvm.cpu(0)
        # launch the kernel.
        n = nn
        a = tvm.nd.array(np.random.uniform(size=(n + base)).astype(A.dtype), ctx)
        c = tvm.nd.array(np.zeros(n, dtype=C.dtype), ctx)
        f(a, c)
        np.testing.assert_allclose(
            c.asnumpy(), a.asnumpy()[::-1][:n])
    check_llvm(4, 0)
    check_llvm(128, 8)
    check_llvm(3, 0)
    check_llvm(128, 1) 
開發者ID:mlperf,項目名稱:training_results_v0.6,代碼行數:27,代碼來源:test_codegen_llvm.py

示例12: test_llvm_madd_pipeline

# 需要導入模塊: import tvm [as 別名]
# 或者: from tvm import cpu [as 別名]
def test_llvm_madd_pipeline():
    def check_llvm(nn, base, stride):
        if not tvm.module.enabled("llvm"):
            return
        n = tvm.convert(nn)
        A = tvm.placeholder((n + base, stride), name='A')
        C = tvm.compute((n, stride), lambda i, j: A(base + i, j) + 1, name='C')
        s = tvm.create_schedule(C.op)
        xo, xi = s[C].split(C.op.axis[0], factor=4)
        s[C].parallel(xo)
        s[C].vectorize(xi)
        # build and invoke the kernel.
        f = tvm.build(s, [A, C], "llvm")
        ctx = tvm.cpu(0)
        # launch the kernel.
        n = nn
        a = tvm.nd.array(np.random.uniform(size=(n + base, stride)).astype(A.dtype), ctx)
        c = tvm.nd.array(np.zeros((n, stride), dtype=C.dtype), ctx)
        f(a, c)
        np.testing.assert_allclose(
            c.asnumpy(), a.asnumpy()[base:] + 1)
    check_llvm(64, 0, 2)
    check_llvm(4, 0, 1)
    with tvm.build_config(restricted_func=False):
        check_llvm(4, 0, 3) 
開發者ID:mlperf,項目名稱:training_results_v0.6,代碼行數:27,代碼來源:test_codegen_llvm.py

示例13: test_llvm_temp_space

# 需要導入模塊: import tvm [as 別名]
# 或者: from tvm import cpu [as 別名]
def test_llvm_temp_space():
    nn = 1024
    n = tvm.convert(nn)
    A = tvm.placeholder((n,), name='A')
    B = tvm.compute(A.shape, lambda i: A(i) + 1, name='B')
    C = tvm.compute(A.shape, lambda i: B(i) + 1, name='C')
    s = tvm.create_schedule(C.op)

    def check_llvm():
        if not tvm.module.enabled("llvm"):
            return
        # build and invoke the kernel.
        f = tvm.build(s, [A, C], "llvm")
        ctx = tvm.cpu(0)
        # launch the kernel.
        n = nn
        a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), ctx)
        c = tvm.nd.array(np.zeros(n, dtype=C.dtype), ctx)
        f(a, c)
        np.testing.assert_allclose(
            c.asnumpy(), a.asnumpy() + 1 + 1)
    check_llvm() 
開發者ID:mlperf,項目名稱:training_results_v0.6,代碼行數:24,代碼來源:test_codegen_llvm.py

示例14: test_llvm_select

# 需要導入模塊: import tvm [as 別名]
# 或者: from tvm import cpu [as 別名]
def test_llvm_select():
    def check_llvm(n, offset):
        if not tvm.module.enabled("llvm"):
            return
        A = tvm.placeholder((n, ), name='A')
        C = tvm.compute((n,), lambda i: tvm.select(i >= offset, A[i], 0.0), name='C')
        s = tvm.create_schedule(C.op)
        # build and invoke the kernel.
        f = tvm.build(s, [A, C], "llvm")
        ctx = tvm.cpu(0)
        # launch the kernel.
        a = tvm.nd.array(np.random.uniform(size=(n,)).astype(A.dtype), ctx)
        c = tvm.nd.empty((n,), A.dtype, ctx)
        f(a, c)
        c_np = a.asnumpy()
        c_np[:offset] = 0
        np.testing.assert_allclose(c.asnumpy(), c_np)
    check_llvm(64, 8) 
開發者ID:mlperf,項目名稱:training_results_v0.6,代碼行數:20,代碼來源:test_codegen_llvm.py

示例15: test_llvm_bool

# 需要導入模塊: import tvm [as 別名]
# 或者: from tvm import cpu [as 別名]
def test_llvm_bool():
    def check_llvm(n):
        if not tvm.module.enabled("llvm"):
            return
        A = tvm.placeholder((n, ), name='A', dtype="int32")
        C = tvm.compute((n,), lambda i: A[i].equal(1).astype("float"), name='C')
        s = tvm.create_schedule(C.op)
        # build and invoke the kernel.
        f = tvm.build(s, [A, C], "llvm")
        ctx = tvm.cpu(0)
        # launch the kernel.
        a = tvm.nd.array(np.random.randint(0, 2, size=(n,)).astype(A.dtype), ctx)
        c = tvm.nd.empty((n,), C.dtype, ctx)
        f(a, c)
        c_np = a.asnumpy() == 1
        np.testing.assert_allclose(c.asnumpy(), c_np)
    check_llvm(64) 
開發者ID:mlperf,項目名稱:training_results_v0.6,代碼行數:19,代碼來源:test_codegen_llvm.py


注:本文中的tvm.cpu方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。