本文整理汇总了Python中nervanagpu.NervanaGPU.fprop_conv方法的典型用法代码示例。如果您正苦于以下问题:Python NervanaGPU.fprop_conv方法的具体用法?Python NervanaGPU.fprop_conv怎么用?Python NervanaGPU.fprop_conv使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类nervanagpu.NervanaGPU
的用法示例。
在下文中一共展示了NervanaGPU.fprop_conv方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: GPU
# 需要导入模块: from nervanagpu import NervanaGPU [as 别名]
# 或者: from nervanagpu.NervanaGPU import fprop_conv [as 别名]
#.........这里部分代码省略.........
out (GPUTensor): Where to store the forward propagated results.
inputs (GPUTensor): Will be either the dataset input values (first
layer), or the outputs from the previous layer.
weights (GPUTensor): The weight coefficient values for this layer.
layer (Layer): The layer object.
"""
self.ng.dot(weights, inputs, out)
def bprop_fc(self, out, weights, deltas, layer=None):
"""
Backward propagate the error through a fully connected network layer.
Arguments:
out (GPUTensor): Where to store the backward propagated errors.
weights (GPUTensor): The weight coefficient values for this layer.
deltas (GPUTensor): The error values for this layer
layer (Layer): The layer object.
"""
self.ng.dot(weights.T, deltas, out)
def update_fc(self, out, inputs, deltas, layer=None):
"""
Compute the updated gradient for a fully connected network layer.
Arguments:
out (GPUTensor): Where to store the updated gradient value.
inputs (GPUTensor): Will be either the dataset input values (first
layer), or the outputs from the previous layer.
deltas (GPUTensor): The error values for this layer
layer (Layer): The layer object.
"""
self.ng.dot(deltas, inputs.T, out)
def fprop_conv(self, out, inputs, weights, ofmshape, ofmsize, ofmlocs,
ifmshape, links, nifm, padding, stride, ngroups, fpropbuf,
local=False):
"""
Forward propagate the inputs of a convolutional network layer to
produce output pre-activations (ready for transformation by an
activation function).
Arguments:
out (GPUTensor): Where to store the forward propagated results.
inputs (GPUTensor): Will be either the dataset input values (first
layer), or the outputs from the previous layer.
weights (GPUTensor): The weight coefficient values for this layer.
ofmshape (tuple): Dimensions of each output feature map (typically
number of height and width neurons).
ofmsize (int): Total size of each output feature map.
ofmlocs (GPUTensor): Indices giving the location of each element
in each output feature map stored in out.
ifmshape (tuple): Dimensions of each input feature map (typically
number of height and width neurons). For this
backend we expect these values to be square.
links (GPUTensor): Input receptive field indices.
nifm (int): Total number of input feature maps.
padding (int): Number of additional elements to include along each
dimension of each local receptive field during the
convolution operation.
stride (int): Number of neurons to shift the filter at each step.
ngroups (int): Number of groups.
fpropbuf (GPUTensor): Temporary storage buffer used to hold the
convolved outputs for a single receptive
field. Not used for this backend.
local (bool, optional): Whether to do local filtering (True) or
convolution (False, the default)
示例2: run
# 需要导入模块: from nervanagpu import NervanaGPU [as 别名]
# 或者: from nervanagpu.NervanaGPU import fprop_conv [as 别名]
def run():
ng = NervanaGPU(stochastic_round=False)
dt = np.float32
# N: Number of images in mini-batch
# C: Number of input feature maps
# K: Number of output feature maps
# D: Depth of input image
# H: Height of input image
# W: Width of input image
# T: Depth of filter kernel
# R: Height of filter kernel
# S: Width of filter kernel
#
# * images: (numColors, imgSizeY, imgSizeX, numImages) with stride given
# * filters: (numColors, filterPixels, numFilters) if conv
# * (numModules, numColors, filterPixels, numFilters) otherwise
# *
# * targets: (numFilters, numModulesY, numModulesX, numImages)
N = 128
C = 3
K = 64
D = 1
H = 64
W = 64
T = 1
R = 8
S = 8
pad_h = pad_w = 0
str_h = str_w = 4
layer = ng.conv_layer(dt, N, C, K,
D=D, H=H, W=W,
T=T, R=R, S=S,
pad_d=0, pad_h=pad_h, pad_w=pad_w,
str_d=1, str_h=str_h, str_w=str_w,
grid_P=0, grid_Q=0, update_size=None)
numImages = N
numFilters = K
numModulesY = int(math.ceil(float(H - R + 1 + 2*pad_h) / str_h))
numModulesX = int(math.ceil(float(W - S + 1 + 2*pad_w) / str_w))
print "Num Modules ", numModulesX, numModulesY
# Set up images, filters, and outputs
# imgd = np.loadtxt("im1.txt")
# img = np.zeros((64, 64, 3))
# print imgd.shape
# for i in range(3):
# img[:, :, i] = imgd[i*64:(i+1)*64, :]
# hostImages = np.tile(img)
hostImages = np.random.rand(C, H, W, N)
hostFilters = np.random.uniform(low=0.0, high=1.0, size=(C, S*R, numFilters)) #np.ones((C, S*R, numFilters)) #
hostOutputs = np.zeros((numFilters, numModulesY, numModulesX, N))
print "Input sum", np.sum(hostImages)
# Run cc2 kernel
devI = ng.array(hostImages, dtype=dt)
devF = ng.array(hostFilters, dtype=dt)
devO = ng.array(hostOutputs, dtype=dt)
ng.fprop_cuda_conv(layer, devI, devF, devO)
print "CC2 input sum: ", np.sum(devI.asnumpyarray())
print "CC2 output sum: ", np.sum(devO.asnumpyarray())
# Run maxwel kernel
# images: (C * H * W, N)
# filters: (C * S * R , numFilters)
# outputs: (numFilters * numModulesX * numModulesY, N)
devI = ng.array(hostImages.reshape((C*H*W, N)), dtype=dt)
devF = ng.array(hostFilters.reshape((C*S*R, numFilters)), dtype=dt)
devO2 = ng.array(hostOutputs.reshape(numFilters*numModulesX*numModulesY, N), dtype=dt)
ng.fprop_conv(layer, devI, devF, devO2)
print "NG input sum: ", np.sum(devI.asnumpyarray())
print "NG output sum: ", np.sum(devO2.asnumpyarray())
hostOutputs1 = np.reshape(devO.asnumpyarray(), devO2.shape)
hostOutputs2 = devO2.asnumpyarray()
for i in xrange(hostOutputs1.shape[0]):
for j in xrange(hostOutputs1.shape[1]):
assert(abs(hostOutputs1[i, j] - hostOutputs2[i, j]) < 1e-4)
示例3:
# 需要导入模块: from nervanagpu import NervanaGPU [as 别名]
# 或者: from nervanagpu.NervanaGPU import fprop_conv [as 别名]
cuI = None
nlF = ng.empty(dimF, dtype=dtype)
nlF[:] = cuF.T
cuF = None
nlE = ng.empty(dimO, dtype=dtype)
nlE[:] = cuE.T
cuE = None
nlB = ng.empty(dimI, dtype=dtype)
nlU = ng.empty(dimF, dtype=dtype)
nlO = ng.empty(dimO, dtype=dtype)
#print drv.mem_get_info()
ng.fprop_conv (conv, nlI, nlF, nlO, alpha=alpha, repeat=repeat)
ng.bprop_conv (conv, nlF, nlE, nlB, alpha=alpha, repeat=repeat)
ng.update_conv(conv, nlI, nlE, nlU, alpha=alpha, repeat=repeat)
nlI = nlF = nlE = None
print "\ncudnn vs nervanaLib:"
parO = ng.empty((N,1), dtype=np.float32)
parB = ng.empty((N,1), dtype=np.float32)
parU = ng.empty((K,1), dtype=np.float32)
maxO = parO[0:1,0:1]
maxB = parB[0:1,0:1]
maxU = parU[0:1,0:1]
maxo = ng.max(abs(cuO - nlO.T), partial=parO, out=maxO).get()[0,0]
示例4: padding
# 需要导入模块: from nervanagpu import NervanaGPU [as 别名]
# 或者: from nervanagpu.NervanaGPU import fprop_conv [as 别名]
# cpu output arrays
cpuO = np.zeros(dimO, dtype=np.float32)
cpuB = np.zeros(slicable(dimI,1), dtype=np.float32)
cpuU = np.zeros(slicable(dimF), dtype=np.float32)
# give gpu the input array without zero padding (not needed)
devI = ng.array(cpuI[:-1,:].reshape(dimI), dtype=dtype)
devF = ng.array(cpuF.reshape(dimF), dtype=dtype)
devE = ng.array(cpuE, dtype=dtype)
devO = devB = devU = 0
if "fprop" in ops:
devO = ng.empty(dimO, dtype=dtype)
ng.fprop_conv(conv, devI, devF, devO, alpha=1.0, repeat=repeat)
if "bprop" in ops:
devB = ng.empty(dimI, dtype=dtype)
ng.bprop_conv(conv, devF, devE, devB, alpha=1.0, repeat=repeat)
if "update" in ops:
devU = ng.empty(dimF, dtype=dtype)
ng.update_conv(conv, devI, devE, devU, alpha=1.0, repeat=repeat)
def pixel_indices(mt, pr, qs):
T,R,S = conv.TRS
D,H,W = conv.DHW
C = conv.C