當前位置: 首頁>>代碼示例>>Python>>正文


Python tvm.contrib方法代碼示例

本文整理匯總了Python中tvm.contrib方法的典型用法代碼示例。如果您正苦於以下問題:Python tvm.contrib方法的具體用法?Python tvm.contrib怎麽用?Python tvm.contrib使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tvm的用法示例。


在下文中一共展示了tvm.contrib方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: test_cuda_lib

# 需要導入模塊: import tvm [as 別名]
# 或者: from tvm import contrib [as 別名]
def test_cuda_lib():
    ctx = tvm.gpu(0)
    for device in ["llvm", "cuda"]:
        if not tvm.runtime.enabled(device):
            print("skip because %s is not enabled..." % device)
            return
    nn = 12
    n = tvm.runtime.convert(nn)
    A = te.placeholder((n,), name='A')
    B = te.compute(A.shape, lambda *i: A(*i) + 1.0, name='B')
    s = te.create_schedule(B.op)
    bx, tx = s[B].split(B.op.axis[0], factor=4)
    s[B].bind(bx, te.thread_axis("blockIdx.x"))
    s[B].bind(tx, te.thread_axis("threadIdx.x"))

    from tvm.contrib import util
    temp = util.tempdir()
    fn_add = tvm.build(s, [A, B], target="cuda", target_host="llvm", name="add")
    path_lib = temp.relpath("deploy_lib.so")
    fn_add.export_library(path_lib)
    m = tvm.runtime.load_module(path_lib)
    a = tvm.nd.array(np.random.uniform(size=nn).astype(A.dtype), ctx)
    b = tvm.nd.array(np.zeros(nn, dtype=A.dtype), ctx)
    m['add'](a, b)
    np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1) 
開發者ID:apache,項目名稱:incubator-tvm,代碼行數:27,代碼來源:test_target_codegen_blob.py

示例2: gemv_impl

# 需要導入模塊: import tvm [as 別名]
# 或者: from tvm import contrib [as 別名]
def gemv_impl():
    cc_code = """
      extern "C" int gemv_update(float *cc, float *aa, float *bb, int m, int l, int stride) {
        for (int i = 0; i < m; ++i) {
            for (int j = 0; j < l; ++j) {
                cc[i] += aa[j] * bb[i * stride + j];
            }
        }
        return 0;
      }
    """
    from tvm.contrib import util, clang
    temp = util.tempdir()
    ll_path = temp.relpath("temp.ll")
    # Create LLVM ir from c source code
    ll_code = clang.create_llvm(cc_code, output=ll_path)
    return ll_code

######################################################################
# Now we leverage the pragma attribute :code:`import_llvm` to import llvm asm inline.
# The importing needs to happen before the tensorized GEMV being executed.
# 
開發者ID:mlperf,項目名稱:training_results_v0.6,代碼行數:24,代碼來源:tensorize.py

示例3: test_uniform

# 需要導入模塊: import tvm [as 別名]
# 或者: from tvm import contrib [as 別名]
def test_uniform():
    m = 1024
    n = 1024
    A = random.uniform(0, 1, size=(m, n))
    s = tvm.create_schedule(A.op)

    def verify(target="llvm"):
        if not tvm.module.enabled(target):
            print("skip because %s is not enabled..." % target)
            return
        if not tvm.get_global_func("tvm.contrib.random.uniform", True):
            print("skip because extern function is not available")
            return
        ctx = tvm.cpu(0)
        f = tvm.build(s, [A], target)
        a = tvm.nd.array(np.zeros((m, n), dtype=A.dtype), ctx)
        f(a)
        na = a.asnumpy()
        assert abs(np.mean(na) - 0.5) < 1e-2
        assert abs(np.min(na) - 0.0) < 1e-3
        assert abs(np.max(na) - 1.0) < 1e-3
    verify() 
開發者ID:mlperf,項目名稱:training_results_v0.6,代碼行數:24,代碼來源:test_random.py

示例4: test_normal

# 需要導入模塊: import tvm [as 別名]
# 或者: from tvm import contrib [as 別名]
def test_normal():
    m = 1024
    n = 1024
    A = random.normal(3, 4, size=(m, n))
    s = tvm.create_schedule(A.op)

    def verify(target="llvm"):
        if not tvm.module.enabled(target):
            print("skip because %s is not enabled..." % target)
            return
        if not tvm.get_global_func("tvm.contrib.random.normal", True):
            print("skip because extern function is not available")
            return
        ctx = tvm.cpu(0)
        f = tvm.build(s, [A], target)
        a = tvm.nd.array(np.zeros((m, n), dtype=A.dtype), ctx)
        f(a)
        na = a.asnumpy()
        assert abs(np.mean(na) - 3) < 1e-2
        assert abs(np.std(na) - 4) < 1e-2
    verify() 
開發者ID:mlperf,項目名稱:training_results_v0.6,代碼行數:23,代碼來源:test_random.py

示例5: test_matmul_add

# 需要導入模塊: import tvm [as 別名]
# 或者: from tvm import contrib [as 別名]
def test_matmul_add():
    n = 1024
    l = 128
    m = 235
    A = tvm.placeholder((n, l), name='A')
    B = tvm.placeholder((l, m), name='B')
    C = cublas.matmul(A, B)
    s = tvm.create_schedule(C.op)

    def verify(target="cuda"):
        if not tvm.module.enabled(target):
            print("skip because %s is not enabled..." % target)
            return
        if not tvm.get_global_func("tvm.contrib.cublas.matmul", True):
            print("skip because extern function is not available")
            return
        ctx = tvm.gpu(0)
        f = tvm.build(s, [A, B, C], target)
        a = tvm.nd.array(np.random.uniform(size=(n, l)).astype(A.dtype), ctx)
        b = tvm.nd.array(np.random.uniform(size=(l, m)).astype(B.dtype), ctx)
        c = tvm.nd.array(np.zeros((n, m), dtype=C.dtype), ctx)
        f(a, b, c)
        np.testing.assert_allclose(
            c.asnumpy(), np.dot(a.asnumpy(), b.asnumpy()), rtol=1e-5)
    verify() 
開發者ID:mlperf,項目名稱:training_results_v0.6,代碼行數:27,代碼來源:test_cublas.py

示例6: test_matmul_add

# 需要導入模塊: import tvm [as 別名]
# 或者: from tvm import contrib [as 別名]
def test_matmul_add():
    n = 1024
    l = 128
    m = 235
    A = tvm.placeholder((n, l), name='A')
    B = tvm.placeholder((l, m), name='B')
    C = rocblas.matmul(A, B)
    s = tvm.create_schedule(C.op)

    def verify(target="rocm"):
        if not tvm.module.enabled(target):
            print("skip because %s is not enabled..." % target)
            return
        if not tvm.get_global_func("tvm.contrib.rocblas.matmul", True):
            print("skip because extern function is not available")
            return
        ctx = tvm.rocm(0)
        f = tvm.build(s, [A, B, C], target)
        a = tvm.nd.array(np.random.uniform(size=(n, l)).astype(A.dtype), ctx)
        b = tvm.nd.array(np.random.uniform(size=(l, m)).astype(B.dtype), ctx)
        c = tvm.nd.array(np.zeros((n, m), dtype=C.dtype), ctx)
        f(a, b, c)
        np.testing.assert_allclose(
            c.asnumpy(), np.dot(a.asnumpy(), b.asnumpy()), rtol=1e-5)
    verify() 
開發者ID:mlperf,項目名稱:training_results_v0.6,代碼行數:27,代碼來源:test_rocblas.py

示例7: run_tvm

# 需要導入模塊: import tvm [as 別名]
# 或者: from tvm import contrib [as 別名]
def run_tvm(graph, lib, params):
    from tvm.contrib import graph_runtime
    rt_mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0))
    rt_mod.set_input(**params)
    rt_mod.set_input('input', data)
    rt_mod.run()
    tvm_res = rt_mod.get_output(0).asnumpy()
    tvm_pred = np.squeeze(tvm_res).argsort()[-5:][::-1]
    return tvm_pred, rt_mod


###############################################################################
# TFLite inference
# ----------------

###############################################################################
# Run TFLite inference on the quantized model. 
開發者ID:apache,項目名稱:incubator-tvm,代碼行數:19,代碼來源:deploy_prequantized_tflite.py

示例8: get_relay_op

# 需要導入模塊: import tvm [as 別名]
# 或者: from tvm import contrib [as 別名]
def get_relay_op(op_name):
    """Get the callable function from Relay based on operator name.
    Parameters
    ----------
    op_name : str
        The Relay operator name.
    """
    if '.' in op_name:
        # explicit hierachical modules
        op = _op
        try:
            for opn in op_name.split('.'):
                op = getattr(op, opn)
        except AttributeError:
            op = None
    else:
        # try search op in various modules
        for candidate in (_op, _op.nn, _op.image, _op.vision, _op.contrib):
            op = getattr(candidate, op_name, None)
            if op is not None:
                break
    if not op:
        raise tvm.error.OpNotImplemented("Unable to map op_name {} to relay".format(op_name))
    return op 
開發者ID:apache,項目名稱:incubator-tvm,代碼行數:26,代碼來源:common.py

示例9: test_forward_cond

# 需要導入模塊: import tvm [as 別名]
# 或者: from tvm import contrib [as 別名]
def test_forward_cond():
    def verify(a_np, b_np):
        a_nd, b_nd = mx.nd.array(a_np), mx.nd.array(b_np)
        pred = a_nd * b_nd < 5
        then_func = lambda: (a_nd + 5) * (b_nd + 5)
        else_func = lambda: (a_nd - 5) * (b_nd - 5)
        ref_res = mx.nd.contrib.cond(pred, then_func, else_func)

        a_sym, b_sym = mx.sym.var("a"), mx.sym.var("b")
        pred = a_sym * b_sym < 5
        then_func = lambda: (a_sym + 5) * (b_sym + 5)
        else_func = lambda: (a_sym - 5) * (b_sym - 5)
        mx_sym = mx.sym.contrib.cond(pred, then_func, else_func)

        shape_dict = {"a": a_np.shape, "b": b_np.shape}
        mod, _ = relay.frontend.from_mxnet(mx_sym, shape_dict)
        for target, ctx in ctx_list():
            for kind in ["debug", "vm"]:
                intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
                op_res = intrp.evaluate()(a_np, b_np)
                tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-3)

    verify(np.asarray([1.0], 'float32'), np.asarray([2.0],'float32'))
    verify(np.asarray([4.0], 'float32'), np.asarray([3.0],'float32')) 
開發者ID:apache,項目名稱:incubator-tvm,代碼行數:26,代碼來源:test_forward.py

示例10: test_forward_arange_like

# 需要導入模塊: import tvm [as 別名]
# 或者: from tvm import contrib [as 別名]
def test_forward_arange_like():
    def verify(data_shape, start=None, step=None, axis=None):
        attrs = {}
        if start is not None:
            attrs['start'] = start
        if step is not None:
            attrs['step'] = step
        if axis is not None:
            attrs['axis'] = axis
        data = mx.sym.var('data')
        data_np = np.random.uniform(size=data_shape).astype("float32")
        ref_res = mx.nd.contrib.arange_like(mx.nd.array(data_np), **attrs)
        
        mx_sym = mx.sym.contrib.arange_like(data, **attrs)
        mod, _ = relay.frontend.from_mxnet(mx_sym, {"data": data_shape})
        for target, ctx in ctx_list():
            for kind in ["graph"]:
                intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
                op_res = intrp.evaluate()()
                tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())

    verify(data_shape=(3,), start=0., step=1.)
    verify(data_shape=(3, 4, 5), start=0., step=1.)
    verify(data_shape=(3, 4, 5), start=0., step=1., axis=-1)
    verify(data_shape=(3, 4, 5), start=2., step=3., axis=1) 
開發者ID:apache,項目名稱:incubator-tvm,代碼行數:27,代碼來源:test_forward.py

示例11: test_forward_interleaved_matmul_selfatt_valatt

# 需要導入模塊: import tvm [as 別名]
# 或者: from tvm import contrib [as 別名]
def test_forward_interleaved_matmul_selfatt_valatt():
    def verify(batch, seq_length, num_heads, head_dim):
        data_shape = (seq_length, batch, num_heads * head_dim * 3)
        weight_shape = (batch * num_heads, seq_length, seq_length)
        data = mx.sym.var('data')
        weight = mx.sym.var('weight')
        data_np = np.random.uniform(size=data_shape).astype('float32')
        weight_np = np.random.uniform(size=weight_shape).astype('float32')
        ref_res = mx.nd.contrib.interleaved_matmul_selfatt_valatt(
            mx.nd.array(data_np), mx.nd.array(weight_np), heads=num_heads)

        mx_sym = mx.sym.contrib.interleaved_matmul_selfatt_valatt(
            data, weight, heads=num_heads)
        mod, _ = relay.frontend.from_mxnet(
            mx_sym, {"data": data_shape, "weight": weight_shape})
        for target, ctx in ctx_list():
            for kind in ["graph"]:
                intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
                op_res = intrp.evaluate()(data=data_np, weight=weight_np)
                tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-5)

    verify(1, 10, 4, 16)
    verify(3, 10, 6, 8) 
開發者ID:apache,項目名稱:incubator-tvm,代碼行數:25,代碼來源:test_forward.py

示例12: test_forward_box_decode

# 需要導入模塊: import tvm [as 別名]
# 或者: from tvm import contrib [as 別名]
def test_forward_box_decode():
    def verify(data_shape, anchor_shape, stds=[1, 1, 1, 1], clip=-1, in_format="corner"):
        dtype = "float32"
        data = np.random.uniform(low=-2, high=2, size=data_shape).astype(dtype)
        anchors = np.random.uniform(low=-2, high=2, size=anchor_shape).astype(dtype)
        ref_res = mx.nd.contrib.box_decode(mx.nd.array(data), mx.nd.array(anchors), stds[0], stds[1], stds[2], stds[3], clip, in_format)
        mx_sym = mx.sym.contrib.box_decode(mx.sym.var("data"), mx.sym.var("anchors"), stds[0], stds[1], stds[2], stds[3], clip, in_format)
        shape_dict = {"data": data_shape, "anchors": anchor_shape}
        mod, _ = relay.frontend.from_mxnet(mx_sym, shape_dict)
        for target, ctx in ctx_list():
            for kind in ["graph", "debug"]:
                intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
                op_res = intrp.evaluate()(data, anchors)
                tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-3, atol=1e-5)

    verify((1, 10, 4), (1, 10, 4))
    verify((4, 10, 4), (1, 10, 4))
    verify((1, 10, 4), (1, 10, 4), stds=[2, 3, 0.5, 1.5])
    verify((1, 10, 4), (1, 10, 4), clip=1)
    verify((1, 10, 4), (1, 10, 4), in_format="center") 
開發者ID:apache,項目名稱:incubator-tvm,代碼行數:22,代碼來源:test_forward.py

示例13: run_func

# 需要導入模塊: import tvm [as 別名]
# 或者: from tvm import contrib [as 別名]
def run_func(func, params, x):
    with tvm.transform.PassContext(opt_level=3):
        graph, lib, new_params = relay.build(func, "llvm", params=params)

    from tvm.contrib import graph_runtime
    ctx = tvm.cpu(0)
    dtype = 'float32'
    m = graph_runtime.create(graph, lib, ctx)
    # set inputs
    m.set_input('data', tvm.nd.array(x.astype(dtype)))
    m.set_input(**new_params)
    # execute
    m.run()
    # get outputs
    tvm_output = m.get_output(0)
    return tvm_output.asnumpy() 
開發者ID:apache,項目名稱:incubator-tvm,代碼行數:18,代碼來源:test_sparse_dense_convert.py

示例14: test_randint

# 需要導入模塊: import tvm [as 別名]
# 或者: from tvm import contrib [as 別名]
def test_randint():
    m = 1024
    n = 1024
    A = random.randint(-127, 128, size=(m, n), dtype='int32')
    s = te.create_schedule(A.op)

    def verify(target="llvm"):
        if not tvm.runtime.enabled(target):
            print("skip because %s is not enabled..." % target)
            return
        if not tvm.get_global_func("tvm.contrib.random.randint", True):
            print("skip because extern function is not available")
            return
        ctx = tvm.cpu(0)
        f = tvm.build(s, [A], target)
        a = tvm.nd.array(np.zeros((m, n), dtype=A.dtype), ctx)
        f(a)
        na = a.asnumpy()
        assert abs(np.mean(na)) < 0.2
        assert np.min(na) == -127
        assert np.max(na) == 127
    verify() 
開發者ID:apache,項目名稱:incubator-tvm,代碼行數:24,代碼來源:test_random.py

示例15: test_normal

# 需要導入模塊: import tvm [as 別名]
# 或者: from tvm import contrib [as 別名]
def test_normal():
    m = 1024
    n = 1024
    A = random.normal(3, 4, size=(m, n))
    s = te.create_schedule(A.op)

    def verify(target="llvm"):
        if not tvm.runtime.enabled(target):
            print("skip because %s is not enabled..." % target)
            return
        if not tvm.get_global_func("tvm.contrib.random.normal", True):
            print("skip because extern function is not available")
            return
        ctx = tvm.cpu(0)
        f = tvm.build(s, [A], target)
        a = tvm.nd.array(np.zeros((m, n), dtype=A.dtype), ctx)
        f(a)
        na = a.asnumpy()
        assert abs(np.mean(na) - 3) < 1e-2
        assert abs(np.std(na) - 4) < 1e-2
    verify() 
開發者ID:apache,項目名稱:incubator-tvm,代碼行數:23,代碼來源:test_random.py


注:本文中的tvm.contrib方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。