本文整理汇总了Python中nervanagpu.NervanaGPU.update_conv方法的典型用法代码示例。如果您正苦于以下问题:Python NervanaGPU.update_conv方法的具体用法?Python NervanaGPU.update_conv怎么用?Python NervanaGPU.update_conv使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类nervanagpu.NervanaGPU
的用法示例。
在下文中一共展示了NervanaGPU.update_conv方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1:
# 需要导入模块: from nervanagpu import NervanaGPU [as 别名]
# 或者: from nervanagpu.NervanaGPU import update_conv [as 别名]
nlF = ng.empty(dimF, dtype=dtype)
nlF[:] = cuF.T
cuF = None
nlE = ng.empty(dimO, dtype=dtype)
nlE[:] = cuE.T
cuE = None
nlB = ng.empty(dimI, dtype=dtype)
nlU = ng.empty(dimF, dtype=dtype)
nlO = ng.empty(dimO, dtype=dtype)
#print drv.mem_get_info()
ng.fprop_conv (conv, nlI, nlF, nlO, alpha=alpha, repeat=repeat)
ng.bprop_conv (conv, nlF, nlE, nlB, alpha=alpha, repeat=repeat)
ng.update_conv(conv, nlI, nlE, nlU, alpha=alpha, repeat=repeat)
nlI = nlF = nlE = None
print "\ncudnn vs nervanaLib:"
parO = ng.empty((N,1), dtype=np.float32)
parB = ng.empty((N,1), dtype=np.float32)
parU = ng.empty((K,1), dtype=np.float32)
maxO = parO[0:1,0:1]
maxB = parB[0:1,0:1]
maxU = parU[0:1,0:1]
maxo = ng.max(abs(cuO - nlO.T), partial=parO, out=maxO).get()[0,0]
maxb = ng.max(abs(cuB - nlB.T), partial=parB, out=maxB).get()[0,0]
maxu = ng.max(abs(cuU - nlU.T), partial=parU, out=maxU).get()[0,0]
示例2: GPU
# 需要导入模块: from nervanagpu import NervanaGPU [as 别名]
# 或者: from nervanagpu.NervanaGPU import update_conv [as 别名]
#.........这里部分代码省略.........
def bprop_conv(self, out, weights, deltas, ofmshape, ofmsize, ofmlocs,
ifmshape, links, padding, stride, nifm, ngroups, bpropbuf,
local=False):
"""
Backward propagate the error through a convolutional network layer.
Arguments:
out (GPUTensor): Where to store the backward propagated errors.
weights (GPUTensor): The weight coefficient values for this layer.
deltas (GPUTensor): The error values for this layer
ofmshape (tuple): Dimensions of each output feature map (typically
height and width).
ofmsize (int): Total size of each output feature map.
ofmlocs (GPUTensor): Indices giving the location of each element in
each output feature map stored in out.
ifmshape (tuple): Dimensions of each input feature map (typically
height and width).
links (GPUTensor): Input receptive field indices.
nifm (int): Total number of input feature maps.
padding (int): Number of additional elements to include along each
dimension of each local receptive field during the
convolution operation.
stride (int): Number of neurons to shift the filter at each step.
ngroups (int): Number of groups.
bpropbuf (GPUTensor): Temporary storage buffer used to hold the
backpropagated error for a single receptive
field
local (bool, optional): Whether to do local filtering (True) or
convolution (False, the default)
"""
self.ng.bprop_conv(layer=bpropbuf, F=weights, E=deltas, grad_I=out,
alpha=1.0, repeat=1)
def update_conv(self, out, inputs, weights, deltas, ofmshape, ofmsize,
ofmlocs, ifmshape, links, nifm, padding, stride, ngroups,
fwidth, updatebuf, local=False, layer=None):
"""
Compute the updated gradient for a convolutional network layer.
Arguments:
out (GPUTensor): Where to store the updated gradient value.
inputs (GPUTensor): Will be either the dataset input values (first
layer), or the outputs from the previous layer.
weights (GPUTensor): The weight coefficient values for this layer.
deltas (GPUTensor): The error values for this layer
ofmshape (tuple): Dimensions of each output feature map (typically
height and width).
ofmsize (int): Total size of each output feature map.
ofmlocs (GPUTensor): Indices giving the location of each element in
each output feature map stored in out.
ifmshape (tuple): Dimensions of each input feature map (typically
height and width).
links (GPUTensor): Input receptive field indices.
nifm (int): Total number of input feature maps.
padding (int): Number of additional elements to include along each
dimension of each local receptive field during the
convolution operation.
stride (int): Number of neurons to shift the filter at each step.
ngroups (int): Number of groups.
fwidth (int): Filter width.
updatebuf (GPUTensor): Temporary storage buffer used to hold the
updated gradient for a single receptive
field
local (bool, optional): Whether to do local filtering (True) or
convolution (False, the default)
layer (Layer): The layer object.
示例3: pixel_indices
# 需要导入模块: from nervanagpu import NervanaGPU [as 别名]
# 或者: from nervanagpu.NervanaGPU import update_conv [as 别名]
devF = ng.array(cpuF.reshape(dimF), dtype=dtype)
devE = ng.array(cpuE, dtype=dtype)
devO = devB = devU = 0
if "fprop" in ops:
devO = ng.empty(dimO, dtype=dtype)
ng.fprop_conv(conv, devI, devF, devO, alpha=1.0, repeat=repeat)
if "bprop" in ops:
devB = ng.empty(dimI, dtype=dtype)
ng.bprop_conv(conv, devF, devE, devB, alpha=1.0, repeat=repeat)
if "update" in ops:
devU = ng.empty(dimF, dtype=dtype)
ng.update_conv(conv, devI, devE, devU, alpha=1.0, repeat=repeat)
def pixel_indices(mt, pr, qs):
T,R,S = conv.TRS
D,H,W = conv.DHW
C = conv.C
HW = H*W
DHW = D*H*W
imax = C*DHW
idx = []
for c in range(C):
ci = c*DHW