本文整理汇总了Python中topi.util.get_const_tuple函数的典型用法代码示例。如果您正苦于以下问题:Python get_const_tuple函数的具体用法?Python get_const_tuple怎么用?Python get_const_tuple使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_const_tuple函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: verify_bitserial_dense
def verify_bitserial_dense(batch, in_dim, out_dim, activation_bits, weight_bits, unipolar):
input_dtype = 'uint32'
out_dtype = 'int16'
with tvm.target.create('llvm'):
A = tvm.placeholder((batch, in_dim), dtype=input_dtype, name='A')
B = tvm.placeholder((out_dim, in_dim), dtype=input_dtype, name='B')
C = topi.nn.bitserial_dense(A, B, activation_bits, weight_bits, out_dtype=out_dtype,
unipolar=unipolar)
s = topi.generic.schedule_bitserial_dense([C])
a_shape = get_const_tuple(A.shape)
b_shape = get_const_tuple(B.shape)
@memoize("topi.tests.test_topi_bitseral_dense")
def get_ref_data():
a_np = generate_quantized_np(get_const_tuple(a_shape), activation_bits, input_dtype)
b_np = generate_quantized_np(get_const_tuple(b_shape), weight_bits, input_dtype)
if unipolar:
b_ = np.copy(b_np).astype(out_dtype)
for x in np.nditer(b_, op_flags=['readwrite']):
x[...] = 1 if x == 1 else -1
c_np = np.dot(a_np, b_.T)
else:
c_np = np.dot(a_np, b_np.T)
return a_np, b_np, c_np
a_np, b_np, c_np = get_ref_data()
ctx = tvm.cpu(0)
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(b_np, ctx)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), ctx)
func = tvm.build(s, [A, B, C], "llvm")
func(a, b, c)
tvm.testing.assert_allclose(c.asnumpy(), c_np, rtol=1e-5)
示例2: verify_conv2d
def verify_conv2d(batch, in_size, in_channel, num_filter, kernel, stride, padding):
in_height = in_width = in_size
with tvm.target.rasp():
A = tvm.placeholder((batch, in_channel, in_height, in_width), name='A')
W = tvm.placeholder((num_filter, in_channel, kernel, kernel), name='W')
B = topi.nn.conv2d(A, W, stride, padding)
s = topi.generic.schedule_conv2d_nchw([B])
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_conv2d.verify_conv2d")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = topi.testing.conv2d_nchw_python(a_np, w_np, stride, padding)
return a_np, w_np, b_np
a_np, w_np, b_np = get_ref_data()
ctx = tvm.cpu(0)
a = tvm.nd.array(a_np, ctx)
w = tvm.nd.array(w_np, ctx)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), ctx)
func = tvm.build(s, [A, W, B], "llvm")
func(a, w, b)
np.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5)
示例3: verify_conv2d_nchw
def verify_conv2d_nchw(batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation=1, add_bias=False, add_relu=False):
print("Workload: (%d, %d, %d, %d, %d, %d, %d, %d)" % (batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation))
in_height = in_width = in_size
A = tvm.placeholder((batch, in_channel, in_height, in_width), name='A')
W = tvm.placeholder((num_filter, in_channel, kernel, kernel), name='W')
bias = tvm.placeholder((num_filter, 1, 1), name='bias')
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
bias_shape = get_const_tuple(bias.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_conv2d_nchw.verify_conv2d_nchw")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
dw_np = topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
c_np = topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding)
if add_bias:
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np += b_np
if add_relu:
c_np = np.maximum(c_np, 0)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
C = topi.nn.conv2d(A, W, (stride, stride), (padding, padding),
(dilation, dilation), layout='NCHW', out_dtype=dtype)
if add_bias:
C = topi.add(C, bias)
if add_relu:
C = topi.nn.relu(C)
s = topi.generic.schedule_conv2d_nchw([C])
a = tvm.nd.array(a_np, ctx)
w = tvm.nd.array(w_np, ctx)
b = tvm.nd.array(b_np, ctx)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), ctx)
if add_bias:
func = tvm.build(s, [A, W, bias, C], device, name="relu_%d_%d_%d_%d_%d_%d_%d_%d" % (batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation))
func(a, w, b, c)
else:
func = tvm.build(s, [A, W, C], device, name="relu_%d_%d_%d_%d_%d_%d_%d_%d" % (batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation))
func(a, w, c)
tvm.testing.assert_allclose(c.asnumpy(), c_np, rtol=1e-4)
for device in get_all_backend():
with autotvm.tophub.context(device): # load tophub pre-tuned parameters
check_device(device)
示例4: compute_contrib_conv2d_NCHWc
def compute_contrib_conv2d_NCHWc(attrs, inputs, _):
"""Compute definition of conv2d NCHWc"""
padding = attrs.get_int_tuple("padding")
strides = attrs.get_int_tuple("strides")
dilation = attrs.get_int_tuple("dilation")
out_channel = attrs.get_int("channels")
groups = attrs.get_int("groups")
layout = attrs.get_str("layout")
out_layout = attrs.get_str("out_layout")
out_dtype = attrs.get_str("out_dtype")
out_dtype = inputs[0].dtype if out_dtype == "same" else out_dtype
if layout == "NCHW":
_, in_channel, _, _ = get_const_tuple(inputs[0].shape)
else:
_, in_channel_chunk, _, _, in_channel_block = get_const_tuple(inputs[0].shape)
in_channel = in_channel_chunk * in_channel_block
assert dilation == (1, 1), "not support dilate now"
if groups == 1:
# pylint: disable=assignment-from-no-return
out = topi.nn.conv2d_NCHWc(inputs[0], inputs[1], strides, padding, dilation,
layout, out_layout, out_dtype)
# pylint: enable=assignment-from-no-return
elif groups == in_channel and groups == out_channel:
# pylint: disable=assignment-from-no-return
out = topi.nn.depthwise_conv2d_NCHWc(inputs[0], inputs[1], strides, padding,
dilation, layout, out_layout, out_dtype)
# pylint: enable=assignment-from-no-return
else:
raise ValueError("not support arbitrary group number > 1 for now")
if attrs.get_bool("use_bias"):
bias = inputs[2]
bias = topi.expand_dims(bias, axis=1, num_newaxis=2)
out = topi.add(out, bias)
return out
示例5: test_bilayout_index
def test_bilayout_index():
bilayout = tvm.bijective_layout("NCHW", "NCHW16c")
dst_index = bilayout.forward_index([0, 18, 6, 6])
assert get_const_tuple(dst_index) == (0, 1, 6, 6, 2)
src_index = bilayout.backward_index([0, 1, 6, 6, 2])
assert get_const_tuple(src_index) == (0, 18, 6, 6)
示例6: test_bilayout_shape
def test_bilayout_shape():
bilayout = tvm.bijective_layout("NCHW", "NCHW16c")
assert isinstance(bilayout, tvm.tensor.BijectiveLayout)
dst_shape = bilayout.forward_shape((1, 32, 7, 7))
assert get_const_tuple(dst_shape) == (1, 2, 7, 7, 16)
src_shape = bilayout.backward_shape(dst_shape)
assert get_const_tuple(src_shape) == (1, 32, 7, 7)
示例7: verify_pool
def verify_pool(n, ic, ih, kh, sh, padding, pool_type, ceil_mode):
iw = ih
kw = kh
sw = sh
ph, pw = padding
A = tvm.placeholder((n, ic, ih, iw), name='A')
B = topi.nn.pool(A, kernel=[kh, kw], stride=[sh, sw], padding=padding,
pool_type=pool_type, ceil_mode=ceil_mode)
B = topi.nn.relu(B)
dtype = A.dtype
bshape = get_const_tuple(B.shape)
ashape = get_const_tuple(A.shape)
if ceil_mode:
assert bshape[2] == int(math.ceil(float(ashape[2] - kh + ph * 2) / sh) + 1)
assert bshape[3] == int(math.ceil(float(ashape[3] - kw + pw * 2) / sw) + 1)
else:
assert bshape[2] == int(math.floor(float(ashape[2] - kh + ph * 2) / sh) + 1)
assert bshape[3] == int(math.floor(float(ashape[3] - kw + pw * 2) / sw) + 1)
a_np = np.random.uniform(size=(n, ic, ih, iw)).astype(dtype)
pad_np = np.zeros(shape=(n, ic, ih+2*ph, iw+2*pw)).astype(dtype)
no_zero = (range(n), range(ic), (range(ph, ih+ph)), (range(pw, iw+pw)))
pad_np[np.ix_(*no_zero)] = a_np
_, oc, oh, ow = get_const_tuple(B.shape)
b_np = np.zeros(shape=(n, oc, oh, ow)).astype(dtype)
if pool_type == 'avg':
for i in range(oh):
for j in range(ow):
b_np[:,:,i,j] = np.mean(pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw], axis=(2,3))
elif pool_type =='max':
for i in range(oh):
for j in range(ow):
b_np[:,:,i,j] = np.max(pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw], axis=(2,3))
b_np = np.maximum(b_np, 0.0)
def check_device(device):
if not tvm.module.enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
s = topi.generic.schedule_pool(B)
ctx = tvm.context(device, 0)
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=dtype), ctx)
print(tvm.lower(s, [A, B], simple_mode=True))
f = tvm.build(s, [A, B], device)
f(a, b)
tvm.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5)
for device in ['opengl']:
check_device(device)
示例8: check_device
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
# declare
DepthwiseConv2d = topi.nn.depthwise_conv2d_NCHWc(Input, Filter,
(stride_h, stride_w),
padding_args,
(dilation, dilation),
in_layout,
out_layout, dtype)
# TODO: add scale_shift implement for NCHWc and add test here
Relu = topi.nn.relu(DepthwiseConv2d)
# schedule
s1 = topi.generic.schedule_depthwise_conv2d_nchw(DepthwiseConv2d)
s2 = topi.generic.schedule_depthwise_conv2d_nchw(Relu)
# build the kernels
f1 = tvm.build(s1, [Input, Filter, DepthwiseConv2d], device)
f2 = tvm.build(s2, [Input, Filter, Relu], device)
# Prepare pod type for test data closure
input_shape = (batch, in_channel, in_height, in_width)
filter_shape = (filter_channel, channel_multiplier, filter_height, filter_width)
# Use memoize, pickle the test data for next time use.
@memoize("topi.tests.test_topi_depthwise_conv2d.NCHWc")
def get_ref_data():
input_np = np.random.uniform(size=input_shape).astype(dtype)
filter_np = np.random.uniform(size=filter_shape).astype(dtype)
# correctness with scipy
depthwise_conv2d_scipy = topi.testing.depthwise_conv2d_python_nchw(
input_np, filter_np, stride, padding)
relu_scipy = np.maximum(depthwise_conv2d_scipy, 0)
return (_transform_data(input_np, ic_block),
_transform_kernel(filter_np, oc_block),
_transform_data(depthwise_conv2d_scipy, oc_block),
_transform_data(relu_scipy, oc_block))
# Get the test data
(input_np, filter_np, depthwise_conv2d_scipy, relu_scipy) = get_ref_data()
input_tvm = tvm.nd.array(input_np, ctx)
filter_tvm = tvm.nd.array(filter_np, ctx)
depthwise_conv2d_tvm = tvm.nd.array(np.zeros(shape=get_const_tuple(DepthwiseConv2d.shape),
dtype=DepthwiseConv2d.dtype), ctx)
relu_tvm = tvm.nd.array(np.zeros(shape=get_const_tuple(Relu.shape), dtype=Relu.dtype), ctx)
# launch kernel 1 (depthwise_conv2d)
f1(input_tvm, filter_tvm, depthwise_conv2d_tvm)
# launch kernel 2 (depthwise_conv2d + relu)
f2(input_tvm, filter_tvm, relu_tvm)
tvm.testing.assert_allclose(depthwise_conv2d_tvm.asnumpy(), depthwise_conv2d_scipy, rtol=1e-5)
tvm.testing.assert_allclose(relu_tvm.asnumpy(), relu_scipy, rtol=1e-5)
示例9: get_ref_data
def get_ref_data():
a_np = generate_quantized_np(get_const_tuple(a_shape), activation_bits, input_dtype)
b_np = generate_quantized_np(get_const_tuple(b_shape), weight_bits, input_dtype)
if unipolar:
b_ = np.copy(b_np).astype(out_dtype)
for x in np.nditer(b_, op_flags=['readwrite']):
x[...] = 1 if x == 1 else -1
c_np = np.dot(a_np, b_.T)
else:
c_np = np.dot(a_np, b_np.T)
return a_np, b_np, c_np
示例10: get_ref_data
def get_ref_data():
a_np = generate_quantized_np(get_const_tuple(A.shape), activation_bits, input_type)
w_np = generate_quantized_np(get_const_tuple(W.shape), weight_bits, input_type)
if unipolar:
w_ = np.copy(w_np).astype(out_dtype)
for x in np.nditer(w_, op_flags=['readwrite']):
x[...] = 1 if x == 1 else -1
b_np = topi.testing.conv2d_nhwc_python(a_np, w_, stride, padding).astype(out_dtype)
else:
b_np = topi.testing.conv2d_nhwc_python(a_np, w_np, stride, padding).astype(out_dtype)
return a_np, w_np, b_np
示例11: verify_leaky_relu
def verify_leaky_relu(m, alpha):
A = tvm.placeholder((m,), name='A')
B = topi.nn.leaky_relu(A, alpha)
s = tvm.create_schedule([B.op])
a_np = np.random.uniform(size=get_const_tuple(A.shape)).astype(A.dtype)
b_np = a_np * (a_np > 0) + a_np * (a_np < 0) * alpha
ctx = tvm.cpu(0)
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), ctx)
foo = tvm.build(s, [A, B], "llvm", name="leaky_relu")
foo(a, b)
tvm.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5)
示例12: compute_deformable_conv2d
def compute_deformable_conv2d(attrs, inputs, out_dtype, target):
"""Compute definition of deformable_conv2d"""
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
dilation = get_const_tuple(attrs.dilation)
deformable_groups = attrs.deformable_groups
groups = attrs.groups
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
with target:
out = topi.nn.deformable_conv2d_nchw(inputs[0], inputs[1], inputs[2], strides, padding,
dilation, deformable_groups, groups, out_dtype)
return [out]
示例13: verify_deformable_conv2d_nchw
def verify_deformable_conv2d_nchw(batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation=1, deformable_groups=1, groups=1):
print("Workload: (%d, %d, %d, %d, %d, %d, %d, %d, %d, %d)" % (batch, in_channel, in_size,
num_filter, kernel, stride, padding, dilation, deformable_groups, groups))
A = tvm.placeholder((batch, in_channel, in_size, in_size), name='A')
out_size = (in_size - (kernel - 1) * dilation - 1 + 2 * padding) // stride + 1
Offset = tvm.placeholder((batch, deformable_groups * kernel * kernel * 2, out_size, out_size), name='offset')
W = tvm.placeholder((num_filter, in_channel, kernel, kernel), name='W')
bias = tvm.placeholder((num_filter, 1, 1), name='bias')
a_shape = get_const_tuple(A.shape)
offset_shape = get_const_tuple(Offset.shape)
w_shape = get_const_tuple(W.shape)
bias_shape = get_const_tuple(bias.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_deformable_conv2d_nchw.verify_deformable_conv2d_nchw")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
offset_np = np.random.randn(*offset_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np = topi.testing.deformable_conv2d_nchw_python(a_np, offset_np, w_np, stride, padding,
dilation, deformable_groups, groups)
return a_np, offset_np, w_np, c_np
a_np, offset_np, w_np, c_np = get_ref_data()
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
C = topi.nn.deformable_conv2d_nchw(A, Offset, W, stride, padding, dilation,
deformable_groups, groups, out_dtype=dtype)
s = topi.generic.schedule_deformable_conv2d_nchw([C])
a = tvm.nd.array(a_np, ctx)
offset = tvm.nd.array(offset_np, ctx)
w = tvm.nd.array(w_np, ctx)
c = tvm.nd.empty(c_np.shape, dtype=c_np.dtype, ctx=ctx)
func = tvm.build(s, [A, Offset, W, C], device)
func(a, offset, w, c)
tvm.testing.assert_allclose(c.asnumpy(), c_np, rtol=1e-5)
for device in ['llvm', 'cuda']:
check_device(device)
示例14: check_device
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
# build the kernel
f = tvm.build(schedule, [Filter, Out_grad, In_grad], device)
# prepare pod type for test data closure
dtype = Out_grad.dtype
out_grad_shape = get_const_tuple(Out_grad.shape)
filter_shape = get_const_tuple(Filter.shape)
# use memoize to pickle the test data for next time use
@memoize("topi.tests.test_topi_depthwise_conv2d_backward_input.nhwc")
def get_ref_data():
out_grad_np = np.random.uniform(size=out_grad_shape).astype(dtype)
filter_np = np.random.uniform(size=filter_shape).astype(dtype)
dilated_out_grad_np = topi.testing.dilate_python(out_grad_np, [1, stride_h, stride_w, 1])
# padding params in forward propagation
fpad_top, fpad_left, fpad_bottom, fpad_right = get_pad_tuple([padding_h, padding_w], (filter_h, filter_w))
# padding params in backward propagation
bpad_top = filter_h - 1 - fpad_top
bpad_bottom = (filter_h - 1 - fpad_bottom) + (stride_h - 1)
bpad_left = filter_w - 1 - fpad_left
bpad_right = (filter_w - 1 - fpad_right) + (stride_w - 1)
padded_out_grad = np.zeros((batch, dilated_out_grad_np.shape[1]+bpad_top+bpad_bottom,
dilated_out_grad_np.shape[2]+bpad_left+bpad_right, out_channel))
padded_out_grad[:, bpad_top:dilated_out_grad_np.shape[1]+bpad_top,
bpad_left:dilated_out_grad_np.shape[2]+bpad_left, :] = dilated_out_grad_np
in_grad_np = np.zeros((batch, in_h, in_w, in_channel))
for b in range(batch):
for c in range(in_channel):
for m in range(channel_multiplier):
in_grad_np[b, :, :, c] += signal.convolve2d(padded_out_grad[b, :, :, c*channel_multiplier+m], \
filter_np[:, :, c, m], mode='valid')[0:in_h, 0:in_w]
return (out_grad_np, filter_np, in_grad_np)
(out_grad_np, filter_np, in_grad_np) = get_ref_data()
out_grad_tvm = tvm.nd.array(out_grad_np, ctx)
filter_tvm = tvm.nd.array(filter_np, ctx)
in_grad_tvm = tvm.nd.array(np.zeros(shape=ishape, dtype=dtype), ctx)
# launch the kernel
timer = f.time_evaluator(f.entry_name, ctx, number=1)
tcost = timer(filter_tvm, out_grad_tvm, in_grad_tvm).mean
tvm.testing.assert_allclose(in_grad_np, in_grad_tvm.asnumpy(), rtol=1e-5)
示例15: check_device
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
k = 10.0
dilation = (1, 1)
with tvm.target.create(device):
A = tvm.placeholder((batch, in_channel, in_size, in_size), name='A')
W = tvm.placeholder((num_filter, in_channel, kernel, kernel), name='W')
B = topi.nn.conv2d(A, W, stride, padding, dilation)
if typ == "add":
C = B + k
elif typ == "sub":
C = B - k
elif typ == "mul":
C = B * k
elif typ == "div":
C = B / k
else:
raise NotImplementedError()
s = topi.generic.schedule_conv2d_nchw([C])
foo = tvm.build(s, [A, W, B, C], device, name="conv2d_scalar_" + typ)
a_npy = np.random.uniform(size=get_const_tuple(A.shape)).astype(A.dtype)
w_npy = np.random.uniform(size=get_const_tuple(W.shape)).astype(W.dtype)
b_npy = topi.testing.conv2d_nchw_python(a_npy, w_npy, stride, padding)
c_npy = np.random.uniform(size=get_const_tuple(B.shape)).astype(B.dtype)
if typ == "add":
c_npy = b_npy + k
elif typ == "sub":
c_npy = b_npy - k
elif typ == "mul":
c_npy = b_npy * k
elif typ == "div":
c_npy = b_npy / k
else:
raise NotImplementedError()
a_nd = tvm.nd.array(a_npy, ctx)
w_nd = tvm.nd.array(w_npy, ctx)
b_nd = tvm.nd.array(np.empty(b_npy.shape).astype(B.dtype), ctx)
c_nd = tvm.nd.array(np.empty(c_npy.shape).astype(C.dtype), ctx)
foo(a_nd, w_nd, b_nd, c_nd)
tvm.testing.assert_allclose(c_nd.asnumpy(), c_npy, rtol=1E-4, atol=1E-4)