本文整理汇总了Python中cupy.argmax方法的典型用法代码示例。如果您正苦于以下问题:Python cupy.argmax方法的具体用法?Python cupy.argmax怎么用?Python cupy.argmax使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cupy
的用法示例。
在下文中一共展示了cupy.argmax方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: argmax
# 需要导入模块: import cupy [as 别名]
# 或者: from cupy import argmax [as 别名]
def argmax(self, axis=None, out=None, sum_duplicates=False):
"""Returns indices of maximum elements along an axis.
Implicit zero elements are taken into account. If there are several
maximum values, the index of the first occurrence is returned. If
``NaN`` values occur in the matrix, the output defaults to a zero entry
for the row/column in which the NaN occurs.
Args:
axis (int): {-2, -1, 0, 1, ``None``} (optional)
Axis along which the argmax is computed. If ``None`` (default),
index of the maximum element in the flatten data is returned.
out (None): (optional)
This argument is in the signature *solely* for NumPy
compatibility reasons. Do not pass in anything except for
the default value, as this argument is not used.
sum_duplicates (bool): Flag to indicate that duplicate elements
should be combined prior to the operation
Returns:
(cupy.narray or int): Indices of maximum elements. If array,
its size along ``axis`` is 1.
"""
return self._arg_min_or_max(axis, out, cupy.argmax, cupy.greater,
sum_duplicates)
示例2: phasecorr_gpu
# 需要导入模块: import cupy [as 别名]
# 或者: from cupy import argmax [as 别名]
def phasecorr_gpu(X, cfRefImg, lcorr):
''' not being used - no speed up - may be faster with cuda.jit'''
nimg,Ly,Lx = X.shape
ly,lx = cfRefImg.shape[-2:]
lyhalf = int(np.floor(ly/2))
lxhalf = int(np.floor(lx/2))
# put on GPU
ref_gpu = cp.asarray(cfRefImg)
x_gpu = cp.asarray(X)
# phasecorrelation
x_gpu = fftn(x_gpu, axes=(1,2), overwrite_x=True) * np.sqrt(Ly-1) * np.sqrt(Lx-1)
for t in range(x_gpu.shape[0]):
tmp = x_gpu[t,:,:]
tmp = cp.multiply(tmp, ref_gpu)
tmp = cp.divide(tmp, cp.absolute(tmp) + 1e-5)
x_gpu[t,:,:] = tmp
x_gpu = ifftn(x_gpu, axes=(1,2), overwrite_x=True) * np.sqrt(Ly-1) * np.sqrt(Lx-1)
x_gpu = cp.fft.fftshift(cp.real(x_gpu), axes=(1,2))
# get max index
x_gpu = x_gpu[cp.ix_(np.arange(0,nimg,1,int),
np.arange(lyhalf-lcorr,lyhalf+lcorr+1,1,int),
np.arange(lxhalf-lcorr,lxhalf+lcorr+1,1,int))]
ix = cp.argmax(cp.reshape(x_gpu, (nimg, -1)), axis=1)
cmax = x_gpu[np.arange(0,nimg,1,int), ix]
ymax,xmax = cp.unravel_index(ix, (2*lcorr+1,2*lcorr+1))
cmax = cp.asnumpy(cmax).flatten()
ymax = cp.asnumpy(ymax)
xmax = cp.asnumpy(xmax)
ymax,xmax = ymax-lcorr, xmax-lcorr
return ymax, xmax, cmax
示例3: __call__
# 需要导入模块: import cupy [as 别名]
# 或者: from cupy import argmax [as 别名]
def __call__(self, x):
with chainer.function.force_backprop_mode():
with chainer.configuration.using_config('train', False):
if isinstance(x, chainer.Variable):
x = x.data
x = x[:, :, 10:309, 10:309]
x = chainer.Variable(x)
hs_enc = self.model(x)
prob = hs_enc[-1]
hs_enc = [x] + hs_enc[:-1]
t = xp.argmax(prob.data, axis=1).astype(xp.int32)
loss = F.softmax_cross_entropy(prob, t) * float(x.shape[0])
loss.backward(retain_grad=True)
del loss
del prob
for h in hs_enc:
h.unchain_backward()
data_scales = [1e-2, 1e0, 1e0, 1e0, 1e1, 1e0]
grad_scales = [1e4, 1e3, 1e3, 1e2, 1e2, 1e4]
for h, ds, gs in zip(hs_enc, data_scales, grad_scales):
h.data *= ds
h.grad *= gs
#self.hoge.append([float(xp.std(h.data)) for h in hs_enc])
#import numpy as np
#print(1 / np.mean(self.hoge, axis=0))
target_sizes = [320, 160, 80, 40, 20, 10]
for i, h in enumerate(hs_enc):
t = target_sizes[i]
s = h.shape[2]
h = xp.concatenate((h.data, h.grad), axis=1)
p1 = (t - s) // 2
p2 = t - s - p1
h = xp.pad(h, ((0, 0), (0, 0), (p1, p2), (p1, p2)), 'constant', constant_values=0.0)
hs_enc[i] = h
return hs_enc
示例4: _arg_minor_reduce
# 需要导入模块: import cupy [as 别名]
# 或者: from cupy import argmax [as 别名]
def _arg_minor_reduce(self, ufunc, axis):
"""Reduce nonzeros with a ufunc over the minor axis when non-empty
Can be applied to a function of self.data by supplying data parameter.
Warning: this does not call sum_duplicates()
Args:
ufunc (object): Function handle giving the operation to be
conducted.
axis (int): Maxtrix over which the reduction should be conducted
Returns:
(cupy.ndarray): Reduce result for nonzeros in each
major_index
"""
# Call to the appropriate kernel function
if axis == 1:
# Create the vector to hold output
value = cupy.zeros(self.shape[0]).astype(cupy.int64)
# Perform the calculation
if ufunc == cupy.argmax:
self._max_arg_reduction_kern(
(self.shape[0],), (1,),
(self.data.astype(cupy.float64), self.indices,
self.indptr[:len(self.indptr) - 1],
self.indptr[1:], cupy.int64(self.shape[1]),
value))
if ufunc == cupy.argmin:
self._min_arg_reduction_kern(
(self.shape[0],), (1,),
(self.data.astype(cupy.float64), self.indices,
self.indptr[:len(self.indptr) - 1],
self.indptr[1:], cupy.int64(self.shape[1]),
value))
if axis == 0:
# Create the vector to hold output
value = cupy.zeros(self.shape[1]).astype(cupy.int64)
# Perform the calculation
if ufunc == cupy.argmax:
self._max_arg_reduction_kern(
(self.shape[1],), (1,),
(self.data.astype(cupy.float64), self.indices,
self.indptr[:len(self.indptr) - 1],
self.indptr[1:], cupy.int64(self.shape[0]),
value))
if ufunc == cupy.argmin:
self._min_arg_reduction_kern(
(self.shape[1],), (1,),
(self.data.astype(cupy.float64), self.indices,
self.indptr[:len(self.indptr) - 1],
self.indptr[1:],
cupy.int64(self.shape[0]), value))
return value