當前位置: 首頁>>代碼示例>>Python>>正文


Python tvm.context方法代碼示例

本文整理匯總了Python中tvm.context方法的典型用法代碼示例。如果您正苦於以下問題:Python tvm.context方法的具體用法?Python tvm.context怎麽用?Python tvm.context使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tvm的用法示例。


在下文中一共展示了tvm.context方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _evaluate

# 需要導入模塊: import tvm [as 別名]
# 或者: from tvm import context [as 別名]
def _evaluate(s, bufs, target, dev_id, number=1, q=None):
    ctx = tvm.context(target, dev_id)
    tvm_arys = []
    for arg in bufs:
        shape = utils.to_tuple(arg.shape)
        tmp = np.random.uniform(-10, 10, size=shape).astype(arg.dtype)
        tmp = tvm.nd.array(tmp, ctx)
        tvm_arys.append(tmp)
    func, evaluator = None, None
    try:
        func = tvm.build(s, bufs, target)
        evaluator = func.time_evaluator(func.entry_name, ctx, number=number)
        time_cost = evaluator(*tvm_arys).mean * 1e3
        if q:
            q.put(time_cost)
        return time_cost
    except Exception as e:
        for item in tvm_arys:
            del item
        if func is not None:
            del func
        if evaluator is not None:
            del evaluator
        raise e 
開發者ID:KnowingNothing,項目名稱:FlexTensor,代碼行數:26,代碼來源:train.py

示例2: verify_code

# 需要導入模塊: import tvm [as 別名]
# 或者: from tvm import context [as 別名]
def verify_code(stmt, target, dev_id):
    if target == "cuda":
        ctx = tvm.nd.context(target, dev_id)     # just use device 0
        if not ctx.exist:
            # print("Fail to get device %s devid=%d"%(target, dev_id))
            return False
        max_dims = ctx.max_thread_dimensions
        check_gpu = {
            "max_shared_memory_per_block": ctx.max_shared_memory_per_block,
            "max_threads_per_block": ctx.max_threads_per_block,
            "max_thread_x": max_dims[0],
            "max_thread_y": max_dims[1],
            "max_thread_z": max_dims[2]
        }
        valid = tvm.tir.ir_pass.VerifyGPUCode(stmt, check_gpu)
        return valid
    else: 
        # no barrier for other targets
        return True 
開發者ID:KnowingNothing,項目名稱:FlexTensor,代碼行數:21,代碼來源:scheduler.py

示例3: _evaluate

# 需要導入模塊: import tvm [as 別名]
# 或者: from tvm import context [as 別名]
def _evaluate(s, bufs, target, dev_id, number=1, q=None):
    ctx = tvm.context(target, dev_id)
    tvm_arys = []
    for arg in bufs:
        shape = to_tuple(arg.shape)
        tmp = np.random.uniform(-10, 10, size=shape).astype(arg.dtype)
        tmp = tvm.nd.array(tmp, ctx)
        tvm_arys.append(tmp)
    func, evaluator = None, None
    try:
        func = tvm.build(s, bufs, target)
        evaluator = func.time_evaluator(func.entry_name, ctx, number=number)
        time_cost = evaluator(*tvm_arys).mean * 1e3
        if q:
            q.put(time_cost)
        return time_cost
    except Exception as e:
        for item in tvm_arys:
            del item
        if func is not None:
            del func
        if evaluator is not None:
            del evaluator
        raise e 
開發者ID:KnowingNothing,項目名稱:FlexTensor,代碼行數:26,代碼來源:measure.py

示例4: tvm_unpool1d_cuda

# 需要導入模塊: import tvm [as 別名]
# 或者: from tvm import context [as 別名]
def tvm_unpool1d_cuda(B, C, L, kernel_size, stride, padding, number=10, dev=0):
    Input = torch.rand([B, C, L], dtype=torch.float32).cuda("cuda:" + str(dev))
    maxpool = torch.nn.MaxPool1d(kernel_size, stride=stride, padding=padding, return_indices=True).cuda("cuda:" + str(dev))
    Input, indices = maxpool(Input)
    Input = Input.cpu()
    indices = indices.cpu()

    s, bufs = maxunpooling1d(B, C, Input.shape[2],  kernel_size, stride, padding)
    s =  tvm.te.create_schedule(s)
    f = tvm.build(s, bufs, "cuda")
    ctx = tvm.context("cuda", dev_id=dev)

    im = tvm.nd.array(Input.numpy().astype(np.float32), ctx)
    fi = tvm.nd.array(indices.numpy().astype(np.float32), ctx)

    in_length = Input.shape[2]
    out_length = (in_length - 1) * stride - 2 * padding + kernel_size
    output_shape = (B, C, out_length)
    un = tvm.nd.array(np.zeros(output_shape).astype(np.float32), ctx)

    start_time = time.time()
    for i in range(number):
        f(im, fi, un)
    end_time = time.time()
    return (end_time - start_time) * 1e3 / number 
開發者ID:KnowingNothing,項目名稱:FlexTensor,代碼行數:27,代碼來源:unpooling1d_baseline.py

示例5: check_result

# 需要導入模塊: import tvm [as 別名]
# 或者: from tvm import context [as 別名]
def check_result(configs, shape, target="cuda", dev_id=0):
    ctx = tvm.context(target, dev_id)
    name, configs = configs
    batch, in_channel, H, W, out_channel, k, _, stride, padding, dilation, groups = shape
    A_np = np.random.uniform(-10, 10, size=[batch, in_channel, H, W]).astype("float32")
    A_tvm = tvm.nd.array(A_np, ctx)
    A_torch = torch.tensor(A_np)    # .cuda("cuda:" + str(dev_id))
    W_np = np.random.uniform(-10, 10, size=[out_channel, in_channel//groups, k, k]).astype("float32")
    W_tvm = tvm.nd.array(W_np, ctx)
    W_torch = torch.tensor(W_np)    # .cuda("cuda:" + str(dev_id))
    Output_torch = torch.nn.functional.conv2d(A_torch, W_torch, stride=stride, padding=padding, dilation=dilation, groups=groups)
    Output_np = np.zeros(Output_torch.shape).astype(np.float32)
    Output_tvm = tvm.nd.array(Output_np, ctx)
    s, bufs = schedule_with_config(name, configs)
    func = tvm.build(s, bufs, target)
    func(A_tvm, W_tvm, Output_tvm)
    passed = test_allclose(Output_tvm.asnumpy(), Output_torch.cpu().numpy(), rtol=1e-5, print_diff=True)
    if passed == 1:
        print("Passed!")
    else:
        print("Failed!") 
開發者ID:KnowingNothing,項目名稱:FlexTensor,代碼行數:23,代碼來源:check_grouped_results.py

示例6: __init__

# 需要導入模塊: import tvm [as 別名]
# 或者: from tvm import context [as 別名]
def __init__(self, model, train_loader, num_classes, criterion,
                 lr: Union[float, Callable[[int], float]],  # lr(epoch,) -> lr
                 debug_mode=False, print_freq=1000, target='llvm', dtype='float64'):
        self.model = model
        self.train_loader = train_loader
        self.num_classes = num_classes
        self.criterion = criterion
        self.lr = lr if isinstance(lr, float) else lr(0)
        self._lr_func = lr if not isinstance(lr, float) else lambda epoch: lr

        self.debug_mode = debug_mode
        self.print_freq = print_freq
        self.target = target
        self.dtype = dtype
        self.ctx = tvm.context(target)

        self._build_func()
        self._allocate_buffers_for_endpoints()
        self._initialize_weights() 
開發者ID:KnowingNothing,項目名稱:FlexTensor,代碼行數:21,代碼來源:lenet-CEloss-new-api.py

示例7: test_mean

# 需要導入模塊: import tvm [as 別名]
# 或者: from tvm import context [as 別名]
def test_mean():
    #################################
    # test basic case
    inputs_np = np.random.random([2, 3, 27, 3, 17]).astype(np.float32) * 100
    
    inputs_torch = torch.tensor(inputs_np)
    output_torch = torch.mean(inputs_torch, dim=2)

    tvm_ctx = tvm.context("llvm", 0)
    inputs_tvm = tvm.nd.array(inputs_np, tvm_ctx)
    output_tvm = tvm.nd.array(np.zeros(output_torch.shape).astype(np.float32), tvm_ctx)
    inputs_t = tvm.te.placeholder(inputs_np.shape, dtype="float32")
    output_t = mean(inputs_t, dim=2)
    s = tvm.te.create_schedule(output_t.op)
    func = tvm.build(s, [inputs_t, output_t], "llvm")
    func(inputs_tvm, output_tvm)

    passed = test_allclose(output_tvm.asnumpy(), output_torch.numpy(), rtol=1e-5, print_diff=True)
    if passed == 1:
        print("Mean basic case passed!")
    else:
        print("Mean basic case failed!") 
開發者ID:KnowingNothing,項目名稱:FlexTensor,代碼行數:24,代碼來源:test_ops.py

示例8: test_variance

# 需要導入模塊: import tvm [as 別名]
# 或者: from tvm import context [as 別名]
def test_variance():
    #################################
    # test basic case
    inputs_np = np.random.random([2, 3, 27, 3, 17]).astype(np.float32) * 100
    
    inputs_torch = torch.tensor(inputs_np)
    output_torch = inputs_torch.var(dim=2)

    tvm_ctx = tvm.context("llvm", 0)
    inputs_tvm = tvm.nd.array(inputs_np, tvm_ctx)
    output_tvm = tvm.nd.array(np.zeros(output_torch.shape).astype(np.float32), tvm_ctx)
    inputs_t = tvm.te.placeholder(inputs_np.shape, dtype="float32")
    output_t = variance(inputs_t, dim=2)
    s = tvm.te.create_schedule(output_t.op)
    func = tvm.build(s, [inputs_t, output_t], "llvm")
    func(inputs_tvm, output_tvm)

    passed = test_allclose(output_tvm.asnumpy(), output_torch.numpy(), rtol=1e-5, print_diff=True)
    if passed == 1:
        print("Variance basic case passed!")
    else:
        print("Variance basic case failed!") 
開發者ID:KnowingNothing,項目名稱:FlexTensor,代碼行數:24,代碼來源:test_ops.py

示例9: test_batch_norm

# 需要導入模塊: import tvm [as 別名]
# 或者: from tvm import context [as 別名]
def test_batch_norm():
    #################################
    # test basic case
    inputs_np = np.random.random([100, 200]).astype(np.float32) * 100
    
    inputs_torch = torch.tensor(inputs_np)
    running_mean = torch.mean(inputs_torch, dim=0)
    running_var = inputs_torch.var(dim=0)
    output_torch = torch.nn.functional.batch_norm(inputs_torch, running_mean, running_var)

    tvm_ctx = tvm.context("llvm", 0)
    inputs_tvm = tvm.nd.array(inputs_np, tvm_ctx)
    output_tvm = tvm.nd.array(np.zeros(output_torch.shape).astype(np.float32), tvm_ctx)
    inputs_t = tvm.te.placeholder(inputs_np.shape, dtype="float32")
    output_t = batch_normalization2d(inputs_t)
    s = tvm.te.create_schedule(output_t.op)
    func = tvm.build(s, [inputs_t, output_t], "llvm")
    func(inputs_tvm, output_tvm)

    passed = test_allclose(output_tvm.asnumpy(), output_torch.numpy(), rtol=1e-2, print_diff=True)
    if passed == 1:
        print("Batch_norm basic case passed!")
    else:
        print("Batch_norm basic case failed!") 
開發者ID:KnowingNothing,項目名稱:FlexTensor,代碼行數:26,代碼來源:test_ops.py

示例10: test_array_mul

# 需要導入模塊: import tvm [as 別名]
# 或者: from tvm import context [as 別名]
def test_array_mul(extent=1024, target="llvm", dev_id=0, number=10, verbose=False):
    time_cost_lst = []
    for N in range(1, extent+1):
        ctx = tvm.context(target, dev_id)
        ary_ops, ary_bufs = array_mul(N)
        ary_inputs = [tvm.nd.array(np.random.uniform(size=to_tuple(buf.shape)).astype(buf.dtype), ctx) for buf in ary_bufs[:-1]]
        ary_inputs += [tvm.nd.array(np.zeros(shape=to_tuple(buf.shape), dtype=buf.dtype), ctx) for buf in ary_bufs[-1:]]

        s = tvm.te.create_schedule(ary_ops)
        func = tvm.build(s, ary_bufs, target)
        evaluator = func.time_evaluator(func.entry_name, ctx, number=number)

        cost = evaluator(*ary_inputs).mean * 1e3
        # print("N=", N, "cost=", "%f(ms)"%cost, "(target=%s, dev_id=%d, number=%d)"%(target, dev_id, number))
        time_cost_lst.append(cost)
    
    res_lst = [x / time_cost_lst[0] for x in time_cost_lst]
    print("array_mul |(target=%s, dev_id=%d, number=%d)"%(target, dev_id, number))
    if verbose:
        for i, res in enumerate(res_lst):
            print("time_cost: ext=%d / ext=1 = %f"%(i + 1, res))
    else:
        print("time_cost: ext=%d / ext=1 = %f"%(extent, res_lst[-1])) 
開發者ID:KnowingNothing,項目名稱:FlexTensor,代碼行數:25,代碼來源:array_mul.py

示例11: evaluate

# 需要導入模塊: import tvm [as 別名]
# 或者: from tvm import context [as 別名]
def evaluate(s, bufs, target, dev_id, number=1):
    ctx = tvm.context(target, dev_id)
    tvm_arys = []
    for arg in bufs:
        shape = to_tuple(arg.shape)
        tmp = np.random.uniform(-10, 10, size=shape).astype(arg.dtype)
        tmp = tvm.nd.array(tmp, ctx)
        tvm_arys.append(tmp)
    stmt = tvm.lower(s, bufs, simple_mode=True)
    from flextensor.test.test_ir_visit_print import visit
    # visit(stmt, 0)
    func = tvm.build(s, bufs, target)
    print(func.imported_modules[0].get_source())
    evaluator = func.time_evaluator(func.entry_name, ctx, number=number)
    time_cost = evaluator(*tvm_arys).mean * 1e3
    return time_cost 
開發者ID:KnowingNothing,項目名稱:FlexTensor,代碼行數:18,代碼來源:schedule_conv2d_1x1.py

示例12: evaluate

# 需要導入模塊: import tvm [as 別名]
# 或者: from tvm import context [as 別名]
def evaluate(s, bufs, target, dev_id, number=10):
    ctx = tvm.context(target, dev_id)
    tvm_arys = []
    for arg in bufs:
        shape = to_tuple(arg.shape)
        tmp = np.random.uniform(-10, 10, size=shape).astype(arg.dtype)
        tmp = tvm.nd.array(tmp, ctx)
        tvm_arys.append(tmp)
    func, evaluator = None, None
    try:
        func = tvm.build(s, bufs, target)
        # evaluator = func.time_evaluator(func.entry_name, ctx, number=number)
        # time_cost = evaluator(*tvm_arys).mean * 1e3
        beg = time.time()
        for i in range(number):
            func(*tvm_arys)
        end = time.time()
        time_cost = (end - beg) * 1e3 / number
        return time_cost
    except Exception as e:
        print(e)
        return float("inf") 
開發者ID:KnowingNothing,項目名稱:FlexTensor,代碼行數:24,代碼來源:compare_conv_cpu.py

示例13: test_wrong_bind

# 需要導入模塊: import tvm [as 別名]
# 或者: from tvm import context [as 別名]
def test_wrong_bind():
    N = 1024

    A = tvm.placeholder((N, N-1), name='A')
    B = tvm.compute((N, N-1), lambda i, j: A[i, j])

    s = tvm.create_schedule([B.op])

    # bind a thread axis to two loop axes with different lengths
    s[B].bind(s[B].op.axis[0], tvm.thread_axis("threadIdx.x"))
    s[B].bind(s[B].op.axis[1], tvm.thread_axis("threadIdx.x"))

    for target in ['opencl', 'cuda']:
        if not tvm.context(target).exist:
            continue

        valid = [None]
        with tvm.build_config(**{"add_lower_pass": [
                (2, get_verify_pass(valid, max_threads_per_block=N*N))]}):
            tvm.build(s, [A, B], target)
        assert not valid[0] 
開發者ID:mlperf,項目名稱:training_results_v0.6,代碼行數:23,代碼來源:test_pass_verify_gpu_code.py

示例14: test_tuning

# 需要導入模塊: import tvm [as 別名]
# 或者: from tvm import context [as 別名]
def test_tuning():
    def check(target, target_host):
        ctx = tvm.context(target, 0)
        if not ctx.exist:
            logging.info("Skip test because %s is not available" % target)
            return

        # init task
        task, target = get_sample_task(target, target_host)
        logging.info("%s", task.config_space)

        measure_option = autotvm.measure_option(
            autotvm.LocalBuilder(),
            autotvm.LocalRunner())

        tuner = RandomTuner(task)
        tuner.tune(n_trial=20, measure_option=measure_option)

    check("cuda", None)
    check("opencl", None) 
開發者ID:mlperf,項目名稱:training_results_v0.6,代碼行數:22,代碼來源:test_tuning.py

示例15: test_static_tensor

# 需要導入模塊: import tvm [as 別名]
# 或者: from tvm import context [as 別名]
def test_static_tensor():
    dtype = 'float32'
    stype = 'csr'
    target = 'llvm'
    ctx = tvm.context(target, 0)
    m = tvm.var('m')
    n = tvm.var('n')
    A = tvmsp.placeholder(shape=(m, n), name='A', dtype=dtype)
    assert(A.stype == 'csr')
    n = 3
    a = np.maximum(np.random.uniform(size=(n,n)).astype(dtype)-.6, 0.)
    a = tvmsp.array(a, ctx)
    A.data = tvm.placeholder(a.data.shape, dtype, name='A_data')
    Ab = tvm.decl_buffer(a.data.shape, dtype, name='A_data')
    binds = {A.data: Ab}
    C = tvm.compute(A.data.shape, lambda i: A.data[i] * 2., tag='cs_scatter')
    s = tvm.create_schedule(C.op)
    f = tvm.build(s, [A.data, C], target, binds=binds)
    c = tvmsp.array(np.zeros((n,n), dtype), ctx)
    c.data = tvm.nd.empty(a.data.shape, dtype)
    c.indices = a.indices
    c.indptr = a.indptr
    f(a.data, c.data)
    np.testing.assert_allclose(c.asnumpy(), a.asnumpy() * 2., rtol=1e-5) 
開發者ID:mlperf,項目名稱:training_results_v0.6,代碼行數:26,代碼來源:test_sparse.py


注:本文中的tvm.context方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。