本文整理汇总了Python中nervanagpu.NervanaGPU.sum方法的典型用法代码示例。如果您正苦于以下问题:Python NervanaGPU.sum方法的具体用法?Python NervanaGPU.sum怎么用?Python NervanaGPU.sum使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类nervanagpu.NervanaGPU
的用法示例。
在下文中一共展示了NervanaGPU.sum方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: GPU
# 需要导入模块: from nervanagpu import NervanaGPU [as 别名]
# 或者: from nervanagpu.NervanaGPU import sum [as 别名]
#.........这里部分代码省略.........
raise AttributeError("unexpected pooling op type: %s", op)
def logistic(self, x, out):
"""
Logistic sigmoid nonlinearity, 1/(1+exp(-x))
Arguments:
x (GPUTensor): Input tensor
out (GPUTensor): Output tensor
"""
self.ng.sig(x, out=out)
return out
def rectlin(self, x, out):
"""
Rectified Linear nonlinearity
Arguments:
x (GPUTensor): Input tensor
out (GPUTensor): Output tensor
"""
self.ng.maximum(x, 0., out=out)
return out
def rectleaky(self, x, slope, out):
out[:] = self.ng.maximum(x, x*slope)
def rectleaky_derivative(self, x, slope, out):
out[:] = self.ng.greater(x, 0) * (1.0 - slope) + slope
def sum(self, tsr, axes, out):
"""
Sum
Arguments:
tsr (GPUTensor): Input tensor
axes (int): Axis along which the reduction is performed. If axes
is None, the tensor is flattened and reduced over
both dimensions.
out (GPUTensor): Output tensor
"""
if axes is None:
sze = tsr.shape[0]*tsr.shape[1]
self.ng.sum(tsr.reshape(sze, 1), axis=0, out=out)
else:
self.ng.sum(tsr, axis=axes, out=out)
return out
def mean(self, tsr, axes, out):
"""
Calculates the arithmetic mean of the elements along the specified
axes.
Arguments:
tsr (GPUTensor): Input tensor
axes (int): Axis along which the reduction is performed. If axes
is None, the tensor is flattened and reduced over
both dimensions.
out (GPUTensor): Output tensor
"""
if axes is None:
sze = tsr.shape[0]*tsr.shape[1]
self.ng.mean(tsr.reshape(sze, 1), axis=0, out=out)
示例2: MGPU
# 需要导入模块: from nervanagpu import NervanaGPU [as 别名]
# 或者: from nervanagpu.NervanaGPU import sum [as 别名]
#.........这里部分代码省略.........
persist_values=True):
# TODO: set ptype to be fragment in this case ??
return self.empty((shape[0], shape[1] / self.num_dev), dtype,
persist_values=persist_values)
def zeros_like(self, ary, dtype=default_dtype, persist_values=True,
name=None):
result = self.zeros(ary.shape, dtype=dtype,
persist_values=persist_values)
result.ptype = ary.ptype
return result
def empty_like(self, ary, dtype=default_dtype, persist_values=True,
name=None):
result = self.empty(ary.shape, dtype=dtype,
persist_values=persist_values, name=name)
result.ptype = ary.ptype
return result
def set(self, tensor, data):
assert isinstance(tensor, MGPUTensor)
if tensor.ptype == 'replica':
for dest, strm, ctx in zip(tensor.tlist, self.strms, self.ctxs):
ctx.push()
drv.memcpy_htod_async(dest.ptr, data, strm)
ctx.pop()
# tensor.copy_from(data)
else:
self.scatter(data, tensor)
def scatter(self, hbuf, dbuf):
'''
scatters the array data in hbuf to the mgpu tensor
assumes that dbuf is a M x N and hbuf is M x (Nxk) where k is the
number of replicas
also assumes that dtype of hbuf and dbuf are the same
'''
assert hbuf.size == dbuf.size * dbuf.num_dev
assert isinstance(dbuf, MGPUTensor)
assert hbuf.dtype == dbuf.dtype
ndata = dbuf.size
starts = [i * ndata for i in range(self.num_dev)]
for dest, strm, ctx, doff in zip(dbuf.tlist, self.strms, self.ctxs,
starts):
src = hbuf.reshape((hbuf.size))[doff:(doff + ndata)]
ctx.push()
drv.memcpy_htod_async(dest.ptr, src, strm)
ctx.pop()
self.synchronize()
def fprop_fc(self, out, inputs, weights, layer=None):
"""
In this case, the weights are shards, the acts are replicas
ubuf should be of size nout/num_dev x mbsz
"""
ubuf = layer.mempool[0]
assert ubuf.shape == (weights.shape[0], inputs.shape[1])
if layer.use_biases:
biases = layer.biases.tlist
else:
biases = [None for i in range(self.num_dev)]
for dbuf, ibuf, wt, bs, strm, ctx in zip(ubuf.tlist, inputs.tlist,