本文整理汇总了Python中nervanagpu.NervanaGPU.mean方法的典型用法代码示例。如果您正苦于以下问题:Python NervanaGPU.mean方法的具体用法?Python NervanaGPU.mean怎么用?Python NervanaGPU.mean使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类nervanagpu.NervanaGPU
的用法示例。
在下文中一共展示了NervanaGPU.mean方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: max
# 需要导入模块: from nervanagpu import NervanaGPU [as 别名]
# 或者: from nervanagpu.NervanaGPU import mean [as 别名]
glops = max(glops16, glops32, glops64, glops128)
if glops16 == glops:
fastest = 16
elif glops32 == glops:
fastest = 32
elif glops64 == glops:
fastest = 64
else:
fastest = 128
glopsref = cublas_dot(devA2, devB2, devC2, repeat=repeat)
partial1 = ng.empty((devC1.shape[0],1), dtype=np.float32)
partial2 = partial1[0:1,0:1]
diff = ng.max(abs(devC2 - devC1), partial=partial1, out=partial2).get()[0,0]
mean = ng.mean(abs(devC2), partial=partial1, out=partial2).get()[0,0]
flops_diff = glops - glopsref
note = "**************" if flops_diff <= 0 else ""
print "Faster: %.0f gflops Choice: %d Error: %.3f%%%s" % (flops_diff, fastest, 100 * diff / mean, note)
print "--------------------------------------------------------------------------------"
cublas.cublasDestroy(handle)
示例2: GPU
# 需要导入模块: from nervanagpu import NervanaGPU [as 别名]
# 或者: from nervanagpu.NervanaGPU import mean [as 别名]
#.........这里部分代码省略.........
Arguments:
x (GPUTensor): Input tensor
out (GPUTensor): Output tensor
"""
self.ng.maximum(x, 0., out=out)
return out
def rectleaky(self, x, slope, out):
out[:] = self.ng.maximum(x, x*slope)
def rectleaky_derivative(self, x, slope, out):
out[:] = self.ng.greater(x, 0) * (1.0 - slope) + slope
def sum(self, tsr, axes, out):
"""
Sum
Arguments:
tsr (GPUTensor): Input tensor
axes (int): Axis along which the reduction is performed. If axes
is None, the tensor is flattened and reduced over
both dimensions.
out (GPUTensor): Output tensor
"""
if axes is None:
sze = tsr.shape[0]*tsr.shape[1]
self.ng.sum(tsr.reshape(sze, 1), axis=0, out=out)
else:
self.ng.sum(tsr, axis=axes, out=out)
return out
def mean(self, tsr, axes, out):
"""
Calculates the arithmetic mean of the elements along the specified
axes.
Arguments:
tsr (GPUTensor): Input tensor
axes (int): Axis along which the reduction is performed. If axes
is None, the tensor is flattened and reduced over
both dimensions.
out (GPUTensor): Output tensor
"""
if axes is None:
sze = tsr.shape[0]*tsr.shape[1]
self.ng.mean(tsr.reshape(sze, 1), axis=0, out=out)
else:
self.ng.mean(tsr, axis=axes, out=out)
return out
def min(self, tsr, axes, out):
"""
Calculates the minimum of the elements along the specified
axes.
Arguments:
tsr (GPUTensor): Input tensor
axes (int): Axis along which the reduction is performed. If axes
is None, the tensor is flattened and reduced over
both dimensions.
out (GPUTensor): Output tensor
"""
if axes is None:
sze = tsr.shape[0]*tsr.shape[1]
示例3:
# 需要导入模块: from nervanagpu import NervanaGPU [as 别名]
# 或者: from nervanagpu.NervanaGPU import mean [as 别名]
nlI = nlF = nlE = None
print "\ncudnn vs nervanaLib:"
parO = ng.empty((N,1), dtype=np.float32)
parB = ng.empty((N,1), dtype=np.float32)
parU = ng.empty((K,1), dtype=np.float32)
maxO = parO[0:1,0:1]
maxB = parB[0:1,0:1]
maxU = parU[0:1,0:1]
maxo = ng.max(abs(cuO - nlO.T), partial=parO, out=maxO).get()[0,0]
maxb = ng.max(abs(cuB - nlB.T), partial=parB, out=maxB).get()[0,0]
maxu = ng.max(abs(cuU - nlU.T), partial=parU, out=maxU).get()[0,0]
meano = ng.mean(abs(cuO), partial=parO, out=maxO).get()[0,0]
meanb = ng.mean(abs(cuB), partial=parB, out=maxB).get()[0,0]
meanu = ng.mean(abs(cuU), partial=parU, out=maxU).get()[0,0]
print " maxerr mean pct"
print "fprop: %7.5f %6.2f %5.3f" % (maxo, meano, 100*maxo/meano)
print "bprop: %7.5f %6.2f %5.3f" % (maxb, meanb, 100*maxb/meanb)
print "updat: %7.5f %6.2f %5.3f" % (maxu, meanu, 100*maxu/meanu)
# free up memory from this layer before proceeding
cuB = cuU = cuO = None
nlB = nlU = nlO = None
parO = parB = parU = maxO = maxB = maxU = None
libcudnn.cudnnDestroyTensorDescriptor(I_desc)