当前位置: 首页>>代码示例>>Python>>正文


Python tvm.contrib方法代码示例

本文整理汇总了Python中tvm.contrib方法的典型用法代码示例。如果您正苦于以下问题:Python tvm.contrib方法的具体用法?Python tvm.contrib怎么用?Python tvm.contrib使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tvm的用法示例。


在下文中一共展示了tvm.contrib方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_cuda_lib

# 需要导入模块: import tvm [as 别名]
# 或者: from tvm import contrib [as 别名]
def test_cuda_lib():
    ctx = tvm.gpu(0)
    for device in ["llvm", "cuda"]:
        if not tvm.runtime.enabled(device):
            print("skip because %s is not enabled..." % device)
            return
    nn = 12
    n = tvm.runtime.convert(nn)
    A = te.placeholder((n,), name='A')
    B = te.compute(A.shape, lambda *i: A(*i) + 1.0, name='B')
    s = te.create_schedule(B.op)
    bx, tx = s[B].split(B.op.axis[0], factor=4)
    s[B].bind(bx, te.thread_axis("blockIdx.x"))
    s[B].bind(tx, te.thread_axis("threadIdx.x"))

    from tvm.contrib import util
    temp = util.tempdir()
    fn_add = tvm.build(s, [A, B], target="cuda", target_host="llvm", name="add")
    path_lib = temp.relpath("deploy_lib.so")
    fn_add.export_library(path_lib)
    m = tvm.runtime.load_module(path_lib)
    a = tvm.nd.array(np.random.uniform(size=nn).astype(A.dtype), ctx)
    b = tvm.nd.array(np.zeros(nn, dtype=A.dtype), ctx)
    m['add'](a, b)
    np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1) 
开发者ID:apache,项目名称:incubator-tvm,代码行数:27,代码来源:test_target_codegen_blob.py

示例2: gemv_impl

# 需要导入模块: import tvm [as 别名]
# 或者: from tvm import contrib [as 别名]
def gemv_impl():
    cc_code = """
      extern "C" int gemv_update(float *cc, float *aa, float *bb, int m, int l, int stride) {
        for (int i = 0; i < m; ++i) {
            for (int j = 0; j < l; ++j) {
                cc[i] += aa[j] * bb[i * stride + j];
            }
        }
        return 0;
      }
    """
    from tvm.contrib import util, clang
    temp = util.tempdir()
    ll_path = temp.relpath("temp.ll")
    # Create LLVM ir from c source code
    ll_code = clang.create_llvm(cc_code, output=ll_path)
    return ll_code

######################################################################
# Now we leverage the pragma attribute :code:`import_llvm` to import llvm asm inline.
# The importing needs to happen before the tensorized GEMV being executed.
# 
开发者ID:mlperf,项目名称:training_results_v0.6,代码行数:24,代码来源:tensorize.py

示例3: test_uniform

# 需要导入模块: import tvm [as 别名]
# 或者: from tvm import contrib [as 别名]
def test_uniform():
    m = 1024
    n = 1024
    A = random.uniform(0, 1, size=(m, n))
    s = tvm.create_schedule(A.op)

    def verify(target="llvm"):
        if not tvm.module.enabled(target):
            print("skip because %s is not enabled..." % target)
            return
        if not tvm.get_global_func("tvm.contrib.random.uniform", True):
            print("skip because extern function is not available")
            return
        ctx = tvm.cpu(0)
        f = tvm.build(s, [A], target)
        a = tvm.nd.array(np.zeros((m, n), dtype=A.dtype), ctx)
        f(a)
        na = a.asnumpy()
        assert abs(np.mean(na) - 0.5) < 1e-2
        assert abs(np.min(na) - 0.0) < 1e-3
        assert abs(np.max(na) - 1.0) < 1e-3
    verify() 
开发者ID:mlperf,项目名称:training_results_v0.6,代码行数:24,代码来源:test_random.py

示例4: test_normal

# 需要导入模块: import tvm [as 别名]
# 或者: from tvm import contrib [as 别名]
def test_normal():
    m = 1024
    n = 1024
    A = random.normal(3, 4, size=(m, n))
    s = tvm.create_schedule(A.op)

    def verify(target="llvm"):
        if not tvm.module.enabled(target):
            print("skip because %s is not enabled..." % target)
            return
        if not tvm.get_global_func("tvm.contrib.random.normal", True):
            print("skip because extern function is not available")
            return
        ctx = tvm.cpu(0)
        f = tvm.build(s, [A], target)
        a = tvm.nd.array(np.zeros((m, n), dtype=A.dtype), ctx)
        f(a)
        na = a.asnumpy()
        assert abs(np.mean(na) - 3) < 1e-2
        assert abs(np.std(na) - 4) < 1e-2
    verify() 
开发者ID:mlperf,项目名称:training_results_v0.6,代码行数:23,代码来源:test_random.py

示例5: test_matmul_add

# 需要导入模块: import tvm [as 别名]
# 或者: from tvm import contrib [as 别名]
def test_matmul_add():
    n = 1024
    l = 128
    m = 235
    A = tvm.placeholder((n, l), name='A')
    B = tvm.placeholder((l, m), name='B')
    C = cublas.matmul(A, B)
    s = tvm.create_schedule(C.op)

    def verify(target="cuda"):
        if not tvm.module.enabled(target):
            print("skip because %s is not enabled..." % target)
            return
        if not tvm.get_global_func("tvm.contrib.cublas.matmul", True):
            print("skip because extern function is not available")
            return
        ctx = tvm.gpu(0)
        f = tvm.build(s, [A, B, C], target)
        a = tvm.nd.array(np.random.uniform(size=(n, l)).astype(A.dtype), ctx)
        b = tvm.nd.array(np.random.uniform(size=(l, m)).astype(B.dtype), ctx)
        c = tvm.nd.array(np.zeros((n, m), dtype=C.dtype), ctx)
        f(a, b, c)
        np.testing.assert_allclose(
            c.asnumpy(), np.dot(a.asnumpy(), b.asnumpy()), rtol=1e-5)
    verify() 
开发者ID:mlperf,项目名称:training_results_v0.6,代码行数:27,代码来源:test_cublas.py

示例6: test_matmul_add

# 需要导入模块: import tvm [as 别名]
# 或者: from tvm import contrib [as 别名]
def test_matmul_add():
    n = 1024
    l = 128
    m = 235
    A = tvm.placeholder((n, l), name='A')
    B = tvm.placeholder((l, m), name='B')
    C = rocblas.matmul(A, B)
    s = tvm.create_schedule(C.op)

    def verify(target="rocm"):
        if not tvm.module.enabled(target):
            print("skip because %s is not enabled..." % target)
            return
        if not tvm.get_global_func("tvm.contrib.rocblas.matmul", True):
            print("skip because extern function is not available")
            return
        ctx = tvm.rocm(0)
        f = tvm.build(s, [A, B, C], target)
        a = tvm.nd.array(np.random.uniform(size=(n, l)).astype(A.dtype), ctx)
        b = tvm.nd.array(np.random.uniform(size=(l, m)).astype(B.dtype), ctx)
        c = tvm.nd.array(np.zeros((n, m), dtype=C.dtype), ctx)
        f(a, b, c)
        np.testing.assert_allclose(
            c.asnumpy(), np.dot(a.asnumpy(), b.asnumpy()), rtol=1e-5)
    verify() 
开发者ID:mlperf,项目名称:training_results_v0.6,代码行数:27,代码来源:test_rocblas.py

示例7: run_tvm

# 需要导入模块: import tvm [as 别名]
# 或者: from tvm import contrib [as 别名]
def run_tvm(graph, lib, params):
    from tvm.contrib import graph_runtime
    rt_mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0))
    rt_mod.set_input(**params)
    rt_mod.set_input('input', data)
    rt_mod.run()
    tvm_res = rt_mod.get_output(0).asnumpy()
    tvm_pred = np.squeeze(tvm_res).argsort()[-5:][::-1]
    return tvm_pred, rt_mod


###############################################################################
# TFLite inference
# ----------------

###############################################################################
# Run TFLite inference on the quantized model. 
开发者ID:apache,项目名称:incubator-tvm,代码行数:19,代码来源:deploy_prequantized_tflite.py

示例8: get_relay_op

# 需要导入模块: import tvm [as 别名]
# 或者: from tvm import contrib [as 别名]
def get_relay_op(op_name):
    """Get the callable function from Relay based on operator name.
    Parameters
    ----------
    op_name : str
        The Relay operator name.
    """
    if '.' in op_name:
        # explicit hierachical modules
        op = _op
        try:
            for opn in op_name.split('.'):
                op = getattr(op, opn)
        except AttributeError:
            op = None
    else:
        # try search op in various modules
        for candidate in (_op, _op.nn, _op.image, _op.vision, _op.contrib):
            op = getattr(candidate, op_name, None)
            if op is not None:
                break
    if not op:
        raise tvm.error.OpNotImplemented("Unable to map op_name {} to relay".format(op_name))
    return op 
开发者ID:apache,项目名称:incubator-tvm,代码行数:26,代码来源:common.py

示例9: test_forward_cond

# 需要导入模块: import tvm [as 别名]
# 或者: from tvm import contrib [as 别名]
def test_forward_cond():
    def verify(a_np, b_np):
        a_nd, b_nd = mx.nd.array(a_np), mx.nd.array(b_np)
        pred = a_nd * b_nd < 5
        then_func = lambda: (a_nd + 5) * (b_nd + 5)
        else_func = lambda: (a_nd - 5) * (b_nd - 5)
        ref_res = mx.nd.contrib.cond(pred, then_func, else_func)

        a_sym, b_sym = mx.sym.var("a"), mx.sym.var("b")
        pred = a_sym * b_sym < 5
        then_func = lambda: (a_sym + 5) * (b_sym + 5)
        else_func = lambda: (a_sym - 5) * (b_sym - 5)
        mx_sym = mx.sym.contrib.cond(pred, then_func, else_func)

        shape_dict = {"a": a_np.shape, "b": b_np.shape}
        mod, _ = relay.frontend.from_mxnet(mx_sym, shape_dict)
        for target, ctx in ctx_list():
            for kind in ["debug", "vm"]:
                intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
                op_res = intrp.evaluate()(a_np, b_np)
                tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-3)

    verify(np.asarray([1.0], 'float32'), np.asarray([2.0],'float32'))
    verify(np.asarray([4.0], 'float32'), np.asarray([3.0],'float32')) 
开发者ID:apache,项目名称:incubator-tvm,代码行数:26,代码来源:test_forward.py

示例10: test_forward_arange_like

# 需要导入模块: import tvm [as 别名]
# 或者: from tvm import contrib [as 别名]
def test_forward_arange_like():
    def verify(data_shape, start=None, step=None, axis=None):
        attrs = {}
        if start is not None:
            attrs['start'] = start
        if step is not None:
            attrs['step'] = step
        if axis is not None:
            attrs['axis'] = axis
        data = mx.sym.var('data')
        data_np = np.random.uniform(size=data_shape).astype("float32")
        ref_res = mx.nd.contrib.arange_like(mx.nd.array(data_np), **attrs)
        
        mx_sym = mx.sym.contrib.arange_like(data, **attrs)
        mod, _ = relay.frontend.from_mxnet(mx_sym, {"data": data_shape})
        for target, ctx in ctx_list():
            for kind in ["graph"]:
                intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
                op_res = intrp.evaluate()()
                tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())

    verify(data_shape=(3,), start=0., step=1.)
    verify(data_shape=(3, 4, 5), start=0., step=1.)
    verify(data_shape=(3, 4, 5), start=0., step=1., axis=-1)
    verify(data_shape=(3, 4, 5), start=2., step=3., axis=1) 
开发者ID:apache,项目名称:incubator-tvm,代码行数:27,代码来源:test_forward.py

示例11: test_forward_interleaved_matmul_selfatt_valatt

# 需要导入模块: import tvm [as 别名]
# 或者: from tvm import contrib [as 别名]
def test_forward_interleaved_matmul_selfatt_valatt():
    def verify(batch, seq_length, num_heads, head_dim):
        data_shape = (seq_length, batch, num_heads * head_dim * 3)
        weight_shape = (batch * num_heads, seq_length, seq_length)
        data = mx.sym.var('data')
        weight = mx.sym.var('weight')
        data_np = np.random.uniform(size=data_shape).astype('float32')
        weight_np = np.random.uniform(size=weight_shape).astype('float32')
        ref_res = mx.nd.contrib.interleaved_matmul_selfatt_valatt(
            mx.nd.array(data_np), mx.nd.array(weight_np), heads=num_heads)

        mx_sym = mx.sym.contrib.interleaved_matmul_selfatt_valatt(
            data, weight, heads=num_heads)
        mod, _ = relay.frontend.from_mxnet(
            mx_sym, {"data": data_shape, "weight": weight_shape})
        for target, ctx in ctx_list():
            for kind in ["graph"]:
                intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
                op_res = intrp.evaluate()(data=data_np, weight=weight_np)
                tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-5)

    verify(1, 10, 4, 16)
    verify(3, 10, 6, 8) 
开发者ID:apache,项目名称:incubator-tvm,代码行数:25,代码来源:test_forward.py

示例12: test_forward_box_decode

# 需要导入模块: import tvm [as 别名]
# 或者: from tvm import contrib [as 别名]
def test_forward_box_decode():
    def verify(data_shape, anchor_shape, stds=[1, 1, 1, 1], clip=-1, in_format="corner"):
        dtype = "float32"
        data = np.random.uniform(low=-2, high=2, size=data_shape).astype(dtype)
        anchors = np.random.uniform(low=-2, high=2, size=anchor_shape).astype(dtype)
        ref_res = mx.nd.contrib.box_decode(mx.nd.array(data), mx.nd.array(anchors), stds[0], stds[1], stds[2], stds[3], clip, in_format)
        mx_sym = mx.sym.contrib.box_decode(mx.sym.var("data"), mx.sym.var("anchors"), stds[0], stds[1], stds[2], stds[3], clip, in_format)
        shape_dict = {"data": data_shape, "anchors": anchor_shape}
        mod, _ = relay.frontend.from_mxnet(mx_sym, shape_dict)
        for target, ctx in ctx_list():
            for kind in ["graph", "debug"]:
                intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
                op_res = intrp.evaluate()(data, anchors)
                tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-3, atol=1e-5)

    verify((1, 10, 4), (1, 10, 4))
    verify((4, 10, 4), (1, 10, 4))
    verify((1, 10, 4), (1, 10, 4), stds=[2, 3, 0.5, 1.5])
    verify((1, 10, 4), (1, 10, 4), clip=1)
    verify((1, 10, 4), (1, 10, 4), in_format="center") 
开发者ID:apache,项目名称:incubator-tvm,代码行数:22,代码来源:test_forward.py

示例13: run_func

# 需要导入模块: import tvm [as 别名]
# 或者: from tvm import contrib [as 别名]
def run_func(func, params, x):
    with tvm.transform.PassContext(opt_level=3):
        graph, lib, new_params = relay.build(func, "llvm", params=params)

    from tvm.contrib import graph_runtime
    ctx = tvm.cpu(0)
    dtype = 'float32'
    m = graph_runtime.create(graph, lib, ctx)
    # set inputs
    m.set_input('data', tvm.nd.array(x.astype(dtype)))
    m.set_input(**new_params)
    # execute
    m.run()
    # get outputs
    tvm_output = m.get_output(0)
    return tvm_output.asnumpy() 
开发者ID:apache,项目名称:incubator-tvm,代码行数:18,代码来源:test_sparse_dense_convert.py

示例14: test_randint

# 需要导入模块: import tvm [as 别名]
# 或者: from tvm import contrib [as 别名]
def test_randint():
    m = 1024
    n = 1024
    A = random.randint(-127, 128, size=(m, n), dtype='int32')
    s = te.create_schedule(A.op)

    def verify(target="llvm"):
        if not tvm.runtime.enabled(target):
            print("skip because %s is not enabled..." % target)
            return
        if not tvm.get_global_func("tvm.contrib.random.randint", True):
            print("skip because extern function is not available")
            return
        ctx = tvm.cpu(0)
        f = tvm.build(s, [A], target)
        a = tvm.nd.array(np.zeros((m, n), dtype=A.dtype), ctx)
        f(a)
        na = a.asnumpy()
        assert abs(np.mean(na)) < 0.2
        assert np.min(na) == -127
        assert np.max(na) == 127
    verify() 
开发者ID:apache,项目名称:incubator-tvm,代码行数:24,代码来源:test_random.py

示例15: test_normal

# 需要导入模块: import tvm [as 别名]
# 或者: from tvm import contrib [as 别名]
def test_normal():
    m = 1024
    n = 1024
    A = random.normal(3, 4, size=(m, n))
    s = te.create_schedule(A.op)

    def verify(target="llvm"):
        if not tvm.runtime.enabled(target):
            print("skip because %s is not enabled..." % target)
            return
        if not tvm.get_global_func("tvm.contrib.random.normal", True):
            print("skip because extern function is not available")
            return
        ctx = tvm.cpu(0)
        f = tvm.build(s, [A], target)
        a = tvm.nd.array(np.zeros((m, n), dtype=A.dtype), ctx)
        f(a)
        na = a.asnumpy()
        assert abs(np.mean(na) - 3) < 1e-2
        assert abs(np.std(na) - 4) < 1e-2
    verify() 
开发者ID:apache,项目名称:incubator-tvm,代码行数:23,代码来源:test_random.py


注:本文中的tvm.contrib方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。