本文整理汇总了Python中theano.sandbox.cuda.dnn.dnn_conv方法的典型用法代码示例。如果您正苦于以下问题:Python dnn.dnn_conv方法的具体用法?Python dnn.dnn_conv怎么用?Python dnn.dnn_conv使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano.sandbox.cuda.dnn
的用法示例。
在下文中一共展示了dnn.dnn_conv方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: predict_batchnorm
# 需要导入模块: from theano.sandbox.cuda import dnn [as 别名]
# 或者: from theano.sandbox.cuda.dnn import dnn_conv [as 别名]
def predict_batchnorm(_x, _params, n_layers=3):
w = _params[0]
h0 = lrelu(dnn_conv(_x, w, subsample=(2, 2), border_mode=(2, 2)))
hs = [h0]
output = []
for n in range(n_layers):
hin = hs[-1]
w, g, b = _params[1 + 3 * n:1 + 3 * (n + 1)]
h_o = dnn_conv(hin, w, subsample=(2, 2), border_mode=(2, 2))
hout = lrelu(batchnorm(h_o, g=g, b=b))
hs.append(hout)
output.append(h_o)
h = T.flatten(hs[-1], 2)
y = tanh(T.dot(h, _params[-1]))
return y, output
return y, output
示例2: test_dnn_conv_merge_broad
# 需要导入模块: from theano.sandbox.cuda import dnn [as 别名]
# 或者: from theano.sandbox.cuda.dnn import dnn_conv [as 别名]
def test_dnn_conv_merge_broad():
# Make sure that we don't apply output_merge on broadcasted values.
if not cuda.dnn.dnn_available():
raise SkipTest(cuda.dnn.dnn_available.msg)
img = T.ftensor4()
kern = T.ftensor4()
conv = dnn.dnn_conv(img, kern)
lr = numpy.asarray(0.05, dtype='float32')
# this does broadcasting
fr = conv + lr
f = theano.function([img, kern], [fr])
convs = [n for n in f.maker.fgraph.toposort()
if isinstance(n.op, dnn.GpuDnnConv)]
assert len(convs) == 1
conv = convs[0]
# Assert output was not merged
assert isinstance(conv.inputs[2].owner.op, GpuAllocEmpty)
示例3: discrim
# 需要导入模块: from theano.sandbox.cuda import dnn [as 别名]
# 或者: from theano.sandbox.cuda.dnn import dnn_conv [as 别名]
def discrim(X):
current_input = dropout(X, 0.3)
### encoder ###
cv1 = relu(dnn_conv(current_input, aew1, subsample=(1,1), border_mode=(1,1)))
cv2 = relu(batchnorm(dnn_conv(cv1, aew2, subsample=(4,4), border_mode=(2,2)), g=aeg2, b=aeb2))
cv3 = relu(batchnorm(dnn_conv(cv2, aew3, subsample=(1,1), border_mode=(1,1)), g=aeg3, b=aeb3))
cv4 = relu(batchnorm(dnn_conv(cv3, aew4, subsample=(4,4), border_mode=(2,2)), g=aeg4, b=aeb4))
cv5 = relu(batchnorm(dnn_conv(cv4, aew5, subsample=(1,1), border_mode=(1,1)), g=aeg5, b=aeb5))
cv6 = relu(batchnorm(dnn_conv(cv5, aew6, subsample=(4,4), border_mode=(0,0)), g=aeg6, b=aeb6))
### decoder ###
dv6 = relu(batchnorm(deconv(cv6, aew6, subsample=(4,4), border_mode=(0,0)), g=aeg6t, b=aeb6t))
dv5 = relu(batchnorm(deconv(dv6, aew5, subsample=(1,1), border_mode=(1,1)), g=aeg5t, b=aeb5t))
dv4 = relu(batchnorm(deconv(dv5, aew4, subsample=(4,4), border_mode=(2,2)), g=aeg4t, b=aeb4t))
dv3 = relu(batchnorm(deconv(dv4, aew3, subsample=(1,1), border_mode=(1,1)), g=aeg3t, b=aeb3t))
dv2 = relu(batchnorm(deconv(dv3, aew2, subsample=(4,4), border_mode=(2,2)), g=aeg2t, b=aeb2t))
dv1 = tanh(deconv(dv2, aew1, subsample=(1,1), border_mode=(1,1)))
rX = dv1
mse = T.sqrt(T.sum(T.abs_(T.flatten(X-rX, 2)),axis=1)) + T.sqrt(T.sum(T.flatten((X-rX)**2, 2), axis=1)) # L1 and L2 loss
return T.flatten(cv6, 2), rX, mse
示例4: discrim
# 需要导入模块: from theano.sandbox.cuda import dnn [as 别名]
# 或者: from theano.sandbox.cuda.dnn import dnn_conv [as 别名]
def discrim(X):
current_input = dropout(X, 0.3)
### encoder ###
cv1 = relu(dnn_conv(current_input, aew1, subsample=(1,1), border_mode=(1,1)))
cv2 = relu(batchnorm(dnn_conv(cv1, aew2, subsample=(4,4), border_mode=(2,2)), g=aeg2, b=aeb2))
cv3 = relu(batchnorm(dnn_conv(cv2, aew3, subsample=(1,1), border_mode=(1,1)), g=aeg3, b=aeb3))
cv4 = relu(batchnorm(dnn_conv(cv3, aew4, subsample=(4,4), border_mode=(2,2)), g=aeg4, b=aeb4))
cv5 = relu(batchnorm(dnn_conv(cv4, aew5, subsample=(1,1), border_mode=(1,1)), g=aeg5, b=aeb5))
cv6 = relu(batchnorm(dnn_conv(cv5, aew6, subsample=(4,4), border_mode=(0,0)), g=aeg6, b=aeb6))
### decoder ###
dv6 = relu(batchnorm(deconv(cv6, aew6, subsample=(4,4), border_mode=(0,0)), g=aeg6t, b=aeb6t))
dv5 = relu(batchnorm(deconv(dv6, aew5, subsample=(1,1), border_mode=(1,1)), g=aeg5t, b=aeb5t))
dv4 = relu(batchnorm(deconv(dv5, aew4, subsample=(4,4), border_mode=(2,2)), g=aeg4t, b=aeb4t))
dv3 = relu(batchnorm(deconv(dv4, aew3, subsample=(1,1), border_mode=(1,1)), g=aeg3t, b=aeb3t))
dv2 = relu(batchnorm(deconv(dv3, aew2, subsample=(4,4), border_mode=(2,2)), g=aeg2t, b=aeb2t))
dv1 = tanh(deconv(dv2, aew1, subsample=(1,1), border_mode=(1,1)))
rX = dv1
mse = T.sqrt(T.sum(T.abs_(T.flatten(X-rX, 2)),axis=1)) + T.sqrt(T.sum(T.flatten((X-rX)**2, 2), axis=1))
return T.flatten(cv6, 2), rX, mse
示例5: get_output_for
# 需要导入模块: from theano.sandbox.cuda import dnn [as 别名]
# 或者: from theano.sandbox.cuda.dnn import dnn_conv [as 别名]
def get_output_for(self, input, *args, **kwargs):
if not dnn_available:
raise RuntimeError("cudnn is not available.")
# by default we assume 'cross', consistent with earlier versions of conv2d.
conv_mode = 'conv' if self.flip_filters else 'cross'
# if 'border_mode' is one of 'valid' or 'full' use that.
# else use pad directly.
border_mode = self.border_mode if (self.border_mode is not None) else self.pad
conved = dnn.dnn_conv(img=input,
kerns=self.W,
subsample=self.strides,
border_mode=border_mode,
conv_mode=conv_mode
)
if self.b is None:
activation = conved
elif self.untie_biases:
activation = conved + self.b.dimshuffle('x', 0, 1, 2)
else:
activation = conved + self.b.dimshuffle('x', 0, 'x', 'x')
return self.nonlinearity(activation)
示例6: get_output
# 需要导入模块: from theano.sandbox.cuda import dnn [as 别名]
# 或者: from theano.sandbox.cuda.dnn import dnn_conv [as 别名]
def get_output(self, train=False):
X = self.get_input(train)
newshape = (X.shape[0]*X.shape[1], X.shape[2], X.shape[3], X.shape[4])
Y = theano.tensor.reshape(X, newshape) #collapse num_samples and num_timesteps
border_mode = self.border_mode
if on_gpu() and dnn.dnn_available():
if border_mode == 'same':
assert(self.subsample == (1, 1))
pad_x = (self.nb_row - self.subsample[0]) // 2
pad_y = (self.nb_col - self.subsample[1]) // 2
conv_out = dnn.dnn_conv(img=Y,
kerns=self.W,
border_mode=(pad_x, pad_y))
else:
conv_out = dnn.dnn_conv(img=Y,
kerns=self.W,
border_mode=border_mode,
subsample=self.subsample)
else:
if border_mode == 'same':
border_mode = 'full'
conv_out = theano.tensor.nnet.conv.conv2d(Y, self.W,
border_mode=border_mode, subsample=self.subsample)
if self.border_mode == 'same':
shift_x = (self.nb_row - 1) // 2
shift_y = (self.nb_col - 1) // 2
conv_out = conv_out[:, :, shift_x:Y.shape[2] + shift_x, shift_y:Y.shape[3] + shift_y]
output = self.activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
newshape = (X.shape[0], X.shape[1], output.shape[1], output.shape[2], output.shape[3])
return theano.tensor.reshape(output, newshape)
示例7: discrim
# 需要导入模块: from theano.sandbox.cuda import dnn [as 别名]
# 或者: from theano.sandbox.cuda.dnn import dnn_conv [as 别名]
def discrim(_x, _params, n_layers=3):
w = _params[0]
h0 = lrelu(dnn_conv(_x, w, subsample=(2, 2), border_mode=(2, 2)))
hs = [h0]
for n in range(n_layers):
hin = hs[-1]
w, g, b = _params[1 + 3 * n: 1 + 3 * (n + 1)]
hout = lrelu(batchnorm(dnn_conv(hin, w, subsample=(2, 2), border_mode=(2, 2)), g=g, b=b))
hs.append(hout)
h = T.flatten(hs[-1], 2)
y = sigmoid(T.dot(h, _params[-1]))
return y
示例8: discrim_batchnorm
# 需要导入模块: from theano.sandbox.cuda import dnn [as 别名]
# 或者: from theano.sandbox.cuda.dnn import dnn_conv [as 别名]
def discrim_batchnorm(_x, _params, n_layers=3):
w = _params[0]
h0 = lrelu(dnn_conv(_x, w, subsample=(2, 2), border_mode=(2, 2)))
hs = [h0]
output = []
for n in range(n_layers):
hin = hs[-1]
w, g, b = _params[1 + 3 * n:1 + 3 * (n + 1)]
h_o = dnn_conv(hin, w, subsample=(2, 2), border_mode=(2, 2))
hout = lrelu(batchnorm(h_o, g=g, b=b))
hs.append(hout)
output.append(h_o)
h = T.flatten(hs[-1], 2)
y = sigmoid(T.dot(h, _params[-1]))
return y, output
示例9: predict
# 需要导入模块: from theano.sandbox.cuda import dnn [as 别名]
# 或者: from theano.sandbox.cuda.dnn import dnn_conv [as 别名]
def predict(_x, _params, n_layers=3):
w = _params[0]
h0 = lrelu(dnn_conv(_x, w, subsample=(2, 2), border_mode=(2, 2)))
hs = [h0]
for n in range(n_layers):
hin = hs[-1]
w, g, b = _params[1 + 3 * n:1 + 3 * (n + 1)]
hout = lrelu(batchnorm(dnn_conv(hin, w, subsample=(2, 2), border_mode=(2, 2)), g=g, b=b))
hs.append(hout)
h = T.flatten(hs[-1], 2)
y = tanh(T.dot(h, _params[-1]))
return y
示例10: disc_test
# 需要导入模块: from theano.sandbox.cuda import dnn [as 别名]
# 或者: from theano.sandbox.cuda.dnn import dnn_conv [as 别名]
def disc_test(_x, _params, _batchnorm, n_layers=3):
w = _params[0]
h0 = lrelu(dnn_conv(_x, w, subsample=(2, 2), border_mode=(2, 2)))
hs = [h0]
for n in range(n_layers):
hin = hs[-1]
w, g, b = _params[1 + 3 * n:1 + 3 * (n + 1)]
u = _batchnorm[n]
s = _batchnorm[n + n_layers]
hout = lrelu(batchnorm(dnn_conv(hin, w, subsample=(2, 2), border_mode=(2, 2)), u=u, s=s, g=g, b=b))
hs.append(hout)
h = T.flatten(hs[-1], 2)
y = sigmoid(T.dot(h, _params[-1]))
return y
示例11: def_comp_mask
# 需要导入模块: from theano.sandbox.cuda import dnn [as 别名]
# 或者: from theano.sandbox.cuda.dnn import dnn_conv [as 别名]
def def_comp_mask(self):
BS = self.BS
print('COMPILING')
t = time()
m = T.tensor4()
bf_w = np.ones((1, 1, 2 * BS, 2 * BS))
bf = sharedX(floatX(bf_w))
m_b = dnn_conv(m, bf, subsample=(BS, BS), border_mode=(BS / 2, BS / 2))
_comp_mask = theano.function(inputs=[m], outputs=m_b)
print('%.2f seconds to compile [compMask] functions' % (time() - t))
return _comp_mask
示例12: setup_class
# 需要导入模块: from theano.sandbox.cuda import dnn [as 别名]
# 或者: from theano.sandbox.cuda.dnn import dnn_conv [as 别名]
def setup_class(cls):
cls.conv_ops.append(cls.gemm_conv_op)
if cuda.dnn.dnn_available():
cls.conv_ops.append(cuda.dnn.dnn_conv)
示例13: dnn_op
# 需要导入模块: from theano.sandbox.cuda import dnn [as 别名]
# 或者: from theano.sandbox.cuda.dnn import dnn_conv [as 别名]
def dnn_op(mode, subsample):
def f(img, kern):
return dnn_conv(img, kern, border_mode=mode, conv_mode='cross',
subsample=subsample)
return f
示例14: test_dnn_conv_merge
# 需要导入模块: from theano.sandbox.cuda import dnn [as 别名]
# 或者: from theano.sandbox.cuda.dnn import dnn_conv [as 别名]
def test_dnn_conv_merge():
"""This test that we merge correctly multiple dnn_conv.
This can is more difficult due to GpuEmptyAlloc that aren't
merged.
"""
if not cuda.dnn.dnn_available():
raise SkipTest(cuda.dnn.dnn_available.msg)
img_shp = [2, 5, 6, 8]
kern_shp = [3, 5, 5, 6]
img = T.ftensor4('img')
kern = T.ftensor4('kern')
out = T.ftensor4('out')
desc = dnn.GpuDnnConvDesc(
border_mode='valid')(img.shape, kern.shape)
# Test forward op
o1 = dnn.dnn_conv(img, kern)
o2 = dnn.dnn_conv(img, kern)
f = theano.function([img, kern], [o1, o2], mode=mode_with_gpu)
d1, d2 = f(numpy.random.rand(*img_shp).astype('float32'),
numpy.random.rand(*kern_shp).astype('float32'))
topo = f.maker.fgraph.toposort()
assert len([n for n in topo if isinstance(n.op, dnn.GpuDnnConv)]) == 1
# Test grad w op
o1 = dnn.GpuDnnConvGradW()(img, kern, out, desc)
o2 = dnn.GpuDnnConvGradW()(img, kern, out, desc)
f = theano.function([img, kern, out], [o1, o2], mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert len([n for n in topo if isinstance(n.op, dnn.GpuDnnConvGradW)]) == 1
# Test grad i op
o1 = dnn.GpuDnnConvGradI()(img, kern, out, desc)
o2 = dnn.GpuDnnConvGradI()(img, kern, out, desc)
f = theano.function([img, kern, out], [o1, o2], mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert len([n for n in topo if isinstance(n.op, dnn.GpuDnnConvGradI)]) == 1
示例15: test_dnn_conv_border_mode
# 需要导入模块: from theano.sandbox.cuda import dnn [as 别名]
# 或者: from theano.sandbox.cuda.dnn import dnn_conv [as 别名]
def test_dnn_conv_border_mode():
if not cuda.dnn.dnn_available():
raise SkipTest(cuda.dnn.dnn_available.msg)
img = T.ftensor4()
kern = T.ftensor4()
dnn.dnn_conv(img, kern, border_mode=1)
dnn.dnn_conv(img, kern, border_mode=(2, 3))
dnn.dnn_conv(img, kern, border_mode='full')
dnn.dnn_conv(img, kern, border_mode='valid')
dnn.dnn_conv(img, kern, border_mode='half')