当前位置: 首页>>代码示例>>Python>>正文


Python NervanaGPU.bprop_conv方法代码示例

本文整理汇总了Python中nervanagpu.NervanaGPU.bprop_conv方法的典型用法代码示例。如果您正苦于以下问题:Python NervanaGPU.bprop_conv方法的具体用法?Python NervanaGPU.bprop_conv怎么用?Python NervanaGPU.bprop_conv使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在nervanagpu.NervanaGPU的用法示例。


在下文中一共展示了NervanaGPU.bprop_conv方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1:

# 需要导入模块: from nervanagpu import NervanaGPU [as 别名]
# 或者: from nervanagpu.NervanaGPU import bprop_conv [as 别名]
    nlF = ng.empty(dimF, dtype=dtype)
    nlF[:] = cuF.T
    cuF = None

    nlE = ng.empty(dimO, dtype=dtype)
    nlE[:] = cuE.T
    cuE = None

    nlB = ng.empty(dimI, dtype=dtype)
    nlU = ng.empty(dimF, dtype=dtype)
    nlO = ng.empty(dimO, dtype=dtype)
    #print drv.mem_get_info()

    ng.fprop_conv (conv, nlI, nlF, nlO, alpha=alpha, repeat=repeat)
    ng.bprop_conv (conv, nlF, nlE, nlB, alpha=alpha, repeat=repeat)
    ng.update_conv(conv, nlI, nlE, nlU, alpha=alpha, repeat=repeat)

    nlI = nlF = nlE = None

    print "\ncudnn vs nervanaLib:"

    parO = ng.empty((N,1), dtype=np.float32)
    parB = ng.empty((N,1), dtype=np.float32)
    parU = ng.empty((K,1), dtype=np.float32)
    maxO = parO[0:1,0:1]
    maxB = parB[0:1,0:1]
    maxU = parU[0:1,0:1]

    maxo  = ng.max(abs(cuO - nlO.T), partial=parO, out=maxO).get()[0,0]
    maxb  = ng.max(abs(cuB - nlB.T), partial=parB, out=maxB).get()[0,0]
开发者ID:KayneWest,项目名称:nervanagpu,代码行数:32,代码来源:cudnn.py

示例2: GPU

# 需要导入模块: from nervanagpu import NervanaGPU [as 别名]
# 或者: from nervanagpu.NervanaGPU import bprop_conv [as 别名]

#.........这里部分代码省略.........
            ifmshape (tuple): Dimensions of each input feature map (typically
                              number of height and width neurons).  For this
                              backend we expect these values to be square.
            links (GPUTensor): Input receptive field indices.
            nifm (int): Total number of input feature maps.
            padding (int): Number of additional elements to include along each
                           dimension of each local receptive field during the
                           convolution operation.
            stride (int): Number of neurons to shift the filter at each step.
            ngroups (int): Number of groups.
            fpropbuf (GPUTensor): Temporary storage buffer used to hold the
                                  convolved outputs for a single receptive
                                  field.  Not used for this backend.
            local (bool, optional): Whether to do local filtering (True) or
                                    convolution (False, the default)
        """

        '''
        N: Number of images in mini-batch
        C: Number of input feature maps
        K: Number of output feature maps

        D: Depth  of input image
        H: Height of input image
        W: Width  of input image

        T: Depth  of filter kernel
        R: Height of filter kernel
        S: Width  of filter kernel
        '''
        self.ng.fprop_conv(layer=fpropbuf, I=inputs, F=weights, O=out,
                           alpha=1.0, repeat=1)

    def bprop_conv(self, out, weights, deltas, ofmshape, ofmsize, ofmlocs,
                   ifmshape, links, padding, stride, nifm, ngroups, bpropbuf,
                   local=False):
        """
        Backward propagate the error through a convolutional network layer.

        Arguments:
            out (GPUTensor): Where to store the backward propagated errors.
            weights (GPUTensor): The weight coefficient values for this layer.
            deltas (GPUTensor): The error values for this layer
            ofmshape (tuple): Dimensions of each output feature map (typically
                              height and width).
            ofmsize (int): Total size of each output feature map.
            ofmlocs (GPUTensor): Indices giving the location of each element in
                                 each output feature map stored in out.
            ifmshape (tuple): Dimensions of each input feature map (typically
                              height and width).
            links (GPUTensor): Input receptive field indices.
            nifm (int): Total number of input feature maps.
            padding (int): Number of additional elements to include along each
                           dimension of each local receptive field during the
                           convolution operation.
            stride (int): Number of neurons to shift the filter at each step.
            ngroups (int): Number of groups.
            bpropbuf (GPUTensor): Temporary storage buffer used to hold the
                                  backpropagated error for a single receptive
                                  field
            local (bool, optional): Whether to do local filtering (True) or
                                    convolution (False, the default)
        """
        self.ng.bprop_conv(layer=bpropbuf, F=weights, E=deltas, grad_I=out,
                           alpha=1.0, repeat=1)
开发者ID:YouVentures,项目名称:neon,代码行数:69,代码来源:gpu.py

示例3: padding

# 需要导入模块: from nervanagpu import NervanaGPU [as 别名]
# 或者: from nervanagpu.NervanaGPU import bprop_conv [as 别名]
cpuU = np.zeros(slicable(dimF),   dtype=np.float32)

# give gpu the input array without zero padding (not needed)
devI = ng.array(cpuI[:-1,:].reshape(dimI), dtype=dtype)
devF = ng.array(cpuF.reshape(dimF), dtype=dtype)
devE = ng.array(cpuE, dtype=dtype)

devO = devB = devU = 0

if "fprop"  in ops:
    devO = ng.empty(dimO, dtype=dtype)
    ng.fprop_conv(conv,  devI, devF, devO, alpha=1.0, repeat=repeat)

if "bprop"  in ops:
    devB = ng.empty(dimI, dtype=dtype)
    ng.bprop_conv(conv,  devF, devE, devB, alpha=1.0, repeat=repeat)

if "update" in ops:
    devU = ng.empty(dimF, dtype=dtype)
    ng.update_conv(conv, devI, devE, devU, alpha=1.0, repeat=repeat)


def pixel_indices(mt, pr, qs):

    T,R,S = conv.TRS
    D,H,W = conv.DHW
    C     = conv.C
    HW    = H*W
    DHW   = D*H*W
    imax  = C*DHW
开发者ID:KayneWest,项目名称:nervanagpu,代码行数:32,代码来源:conv_test.py


注:本文中的nervanagpu.NervanaGPU.bprop_conv方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。