本文整理汇总了Python中cupy.int32方法的典型用法代码示例。如果您正苦于以下问题:Python cupy.int32方法的具体用法?Python cupy.int32怎么用?Python cupy.int32使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cupy
的用法示例。
在下文中一共展示了cupy.int32方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: do_eval
# 需要导入模块: import cupy [as 别名]
# 或者: from cupy import int32 [as 别名]
def do_eval(args):
ced, charlist, chardict = load_encdec_from_config(args.config, args.model)
if args.gpu is not None:
chainer.cuda.Device(args.gpu).use()
import cupy
ced = ced.to_gpu(args.gpu)
xp = cupy
else:
xp = np
def enc(word):
w_array=xp.array([chardict[c] for c in word], dtype=xp.int32)
hx=ced.enc.compute_h((w_array,), train=False)
return hx
def dec(hx):
decoded = ced.dec.decode(hx, length = 40, train = False)
return "".join([charlist[int(idx)] for idx in decoded[0]])
IPython.embed()
示例2: _non_maximum_suppression_gpu
# 需要导入模块: import cupy [as 别名]
# 或者: from cupy import int32 [as 别名]
def _non_maximum_suppression_gpu(bbox, thresh, score=None, limit=None):
if len(bbox) == 0:
return cp.zeros((0,), dtype=np.int32)
n_bbox = bbox.shape[0]
if score is not None:
order = score.argsort()[::-1].astype(np.int32)
else:
order = cp.arange(n_bbox, dtype=np.int32)
sorted_bbox = bbox[order, :]
selec, n_selec = _call_nms_kernel(
sorted_bbox, thresh)
selec = selec[:n_selec]
selec = order[selec]
if limit is not None:
selec = selec[:limit]
return selec
示例3: _call_nms_kernel
# 需要导入模块: import cupy [as 别名]
# 或者: from cupy import int32 [as 别名]
def _call_nms_kernel(bbox, thresh):
assert False, "Not supported."
n_bbox = bbox.shape[0]
threads_per_block = 64
col_blocks = np.ceil(n_bbox / threads_per_block).astype(np.int32)
blocks = (col_blocks, col_blocks, 1)
threads = (threads_per_block, 1, 1)
mask_dev = cp.zeros((n_bbox * col_blocks,), dtype=np.uint64)
bbox = cp.ascontiguousarray(bbox, dtype=np.float32)
kern = cp.RawKernel(_nms_gpu_code, 'nms_kernel')
kern(blocks, threads, args=(cp.int32(n_bbox), cp.float32(thresh),
bbox, mask_dev))
mask_host = mask_dev.get()
selection, n_selec = _nms_gpu_post(
mask_host, n_bbox, threads_per_block, col_blocks)
return selection, n_selec
示例4: _non_maximum_suppression_gpu
# 需要导入模块: import cupy [as 别名]
# 或者: from cupy import int32 [as 别名]
def _non_maximum_suppression_gpu(bbox, thresh, score=None, limit=None):
if len(bbox) == 0:
return cp.zeros((0,), dtype=np.int32)
n_bbox = bbox.shape[0]
if score is not None:
order = score.argsort()[::-1].astype(np.int32)
else:
order = cp.arange(n_bbox, dtype=np.int32)
sorted_bbox = bbox[order, :]
selec, n_selec = _call_nms_kernel(
sorted_bbox, thresh)
selec = selec[:n_selec]
selec = order[selec]
if limit is not None:
selec = selec[:limit]
return cp.asnumpy(selec)
示例5: _call_nms_kernel
# 需要导入模块: import cupy [as 别名]
# 或者: from cupy import int32 [as 别名]
def _call_nms_kernel(bbox, thresh):
# PyTorch does not support unsigned long Tensor.
# Doesn't matter,since it returns ndarray finally.
# So I'll keep it unmodified.
n_bbox = bbox.shape[0]
threads_per_block = 64
col_blocks = np.ceil(n_bbox / threads_per_block).astype(np.int32)
blocks = (col_blocks, col_blocks, 1)
threads = (threads_per_block, 1, 1)
mask_dev = cp.zeros((n_bbox * col_blocks,), dtype=np.uint64)
bbox = cp.ascontiguousarray(bbox, dtype=np.float32)
kern = _load_kernel('nms_kernel', _nms_gpu_code)
kern(blocks, threads, args=(cp.int32(n_bbox), cp.float32(thresh),
bbox, mask_dev))
mask_host = mask_dev.get()
selection, n_selec = _nms_gpu_post(
mask_host, n_bbox, threads_per_block, col_blocks)
return selection, n_selec
示例6: _label
# 需要导入模块: import cupy [as 别名]
# 或者: from cupy import int32 [as 别名]
def _label(x, structure, y):
elems = numpy.where(structure != 0)
vecs = [elems[dm] - 1 for dm in range(x.ndim)]
offset = vecs[0]
for dm in range(1, x.ndim):
offset = offset * 3 + vecs[dm]
indxs = numpy.where(offset < 0)[0]
dirs = [[vecs[dm][dr] for dm in range(x.ndim)] for dr in indxs]
dirs = cupy.array(dirs, dtype=numpy.int32)
ndirs = indxs.shape[0]
y_shape = cupy.array(y.shape, dtype=numpy.int32)
count = cupy.zeros(2, dtype=numpy.int32)
_kernel_init()(x, y)
_kernel_connect()(y_shape, dirs, ndirs, x.ndim, y, size=y.size)
_kernel_count()(y, count, size=y.size)
maxlabel = int(count[0])
labels = cupy.empty(maxlabel, dtype=numpy.int32)
_kernel_labels()(y, count, labels, size=y.size)
_kernel_finalize()(maxlabel, cupy.sort(labels), y, size=y.size)
return maxlabel
示例7: _kernel_finalize
# 需要导入模块: import cupy [as 别名]
# 或者: from cupy import int32 [as 别名]
def _kernel_finalize():
return cupy.ElementwiseKernel(
'int32 maxlabel', 'raw int32 labels, raw Y y',
'''
if (y[i] < 0) {
y[i] = 0;
continue;
}
int yi = y[i];
int j_min = 0;
int j_max = maxlabel - 1;
int j = (j_min + j_max) / 2;
while (j_min < j_max) {
if (yi == labels[j]) break;
if (yi < labels[j]) j_max = j - 1;
else j_min = j + 1;
j = (j_min + j_max) / 2;
}
y[i] = j + 1;
''',
'cupyx_nd_label_finalize')
示例8: test_template_specialization
# 需要导入模块: import cupy [as 别名]
# 或者: from cupy import int32 [as 别名]
def test_template_specialization(self):
if self.backend == 'nvcc':
self.skipTest('nvcc does not support template specialization')
# compile code
name_expressions = ['my_sqrt<int>', 'my_sqrt<float>',
'my_sqrt<complex<double>>', 'my_func']
mod = cupy.RawModule(code=test_cxx_template, options=('--std=c++11',),
name_expressions=name_expressions)
dtypes = (cupy.int32, cupy.float32, cupy.complex128, cupy.float64)
for ker_T, dtype in zip(name_expressions, dtypes):
# get specialized kernels
ker = mod.get_function(ker_T)
# prepare inputs & expected outputs
in_arr = cupy.testing.shaped_random((10,), dtype=dtype)
out_arr = in_arr**2
# run
ker((1,), (10,), (in_arr, 10))
# check results
assert cupy.allclose(in_arr, out_arr)
示例9: _call_nms_kernel
# 需要导入模块: import cupy [as 别名]
# 或者: from cupy import int32 [as 别名]
def _call_nms_kernel(bbox, thresh):
n_bbox = bbox.shape[0]
threads_per_block = 64
col_blocks = np.ceil(n_bbox / threads_per_block).astype(np.int32)
blocks = (col_blocks, col_blocks, 1)
threads = (threads_per_block, 1, 1)
mask_dev = cp.zeros((n_bbox * col_blocks,), dtype=np.uint64)
bbox = cp.ascontiguousarray(bbox, dtype=np.float32)
kern = cp.RawKernel(_nms_gpu_code, 'nms_kernel')
kern(blocks, threads, args=(cp.int32(n_bbox), cp.float32(thresh),
bbox, mask_dev))
mask_host = mask_dev.get()
selection, n_selec = _nms_gpu_post(
mask_host, n_bbox, threads_per_block, col_blocks)
return selection, n_selec
示例10: __init__
# 需要导入模块: import cupy [as 别名]
# 或者: from cupy import int32 [as 别名]
def __init__(self, V, Hw, Hs):
super(CharDec, self).__init__(
lin_out = L.Linear(Hs, Hw),
nstep_dec = L.NStepLSTM(1, Hw, Hs, dropout = 0.5)
)
# self.start_id = V
# self.H = H
# self.eos_id = V #self.xp.array([V], dtype = self.xp.int32)
示例11: append_eos_id
# 需要导入模块: import cupy [as 别名]
# 或者: from cupy import int32 [as 别名]
def append_eos_id(self, a):
return self.xp.concatenate((a, self.xp.array([self.eos_id], dtype = self.xp.int32)), axis = 0)
示例12: decode
# 需要导入模块: import cupy [as 别名]
# 或者: from cupy import int32 [as 别名]
def decode(self, hx, length = 10, verbose = False, train = False):
hx_dec = hx
cx_dec = None
# prev_word = xp.array([self.start_id], dtype = xp.float32)
nb_inpt = hx.data.shape[1]
result = [[] for _ in xrange(nb_inpt)]
finished = [False] * nb_inpt
for i in xrange(length):
logits = self.lin_out(hx_dec.reshape(-1, self.H))
if verbose:
print "logits", i
print logits.data
prev_word = self.xp.argmax(logits.data, axis = 1).astype(self.xp.int32)
for num_inpt in xrange(nb_inpt):
if prev_word[num_inpt] == self.eos_id:
finished[num_inpt] = True
if not finished[num_inpt]:
result[num_inpt].append(prev_word[num_inpt])
if finished[num_inpt]:
prev_word[num_inpt] = 0
if verbose:
print "prev_word", prev_word
# print prev_word
prev_word_emb = F.split_axis(self.c_emb_dec(prev_word), len(prev_word), axis = 0, force_tuple = True)
hx_dec, cx_dec, xs_dec = self.nstep_dec(hx_dec, cx_dec, prev_word_emb, train = train)
return result
示例13: encode_voc_list
# 需要导入模块: import cupy [as 别名]
# 或者: from cupy import int32 [as 别名]
def encode_voc_list(voc_list, charlist):
chardict = {}
for num, c in enumerate(charlist):
chardict[c] = num
dataset = []
for w in voc_list:
encoded = [chardict[c] for c in w]
dataset.append(np.array(encoded, dtype = np.int32))
return dataset
示例14: _non_maximum_suppression_cpu
# 需要导入模块: import cupy [as 别名]
# 或者: from cupy import int32 [as 别名]
def _non_maximum_suppression_cpu(bbox, thresh, score=None, limit=None):
if len(bbox) == 0:
return np.zeros((0,), dtype=np.int32)
if score is not None:
order = score.argsort()[::-1]
bbox = bbox[order]
bbox_area = np.prod(bbox[:, 2:] - bbox[:, :2], axis=1)
selec = np.zeros(bbox.shape[0], dtype=bool)
for i, b in enumerate(bbox):
tl = np.maximum(b[:2], bbox[selec, :2])
br = np.minimum(b[2:], bbox[selec, 2:])
area = np.prod(br - tl, axis=1) * (tl < br).all(axis=1)
iou = area / (bbox_area[i] + bbox_area[selec] - area)
if (iou >= thresh).any():
continue
selec[i] = True
if limit is not None and np.count_nonzero(selec) >= limit:
break
selec = np.where(selec)[0]
if score is not None:
selec = order[selec]
return selec.astype(np.int32)
示例15: upcast
# 需要导入模块: import cupy [as 别名]
# 或者: from cupy import int32 [as 别名]
def upcast(*args):
"""Returns the nearest supported sparse dtype for the
combination of one or more types.
upcast(t0, t1, ..., tn) -> T where T is a supported dtype
Examples:
>>> upcast('int32')
<type 'numpy.int32'>
>>> upcast('int32','float32')
<type 'numpy.float64'>
>>> upcast('bool',float)
<type 'numpy.complex128'>
"""
t = _upcast_memo.get(args)
if t is not None:
return t
upcast = cupy.find_common_type(args, [])
for t in supported_dtypes:
if cupy.can_cast(upcast, t):
_upcast_memo[args] = t
return t
raise TypeError('no supported conversion for types: %r' % (args,))