本文整理汇总了Python中theano.sandbox.cuda.dnn.GpuDnnConvDesc方法的典型用法代码示例。如果您正苦于以下问题:Python dnn.GpuDnnConvDesc方法的具体用法?Python dnn.GpuDnnConvDesc怎么用?Python dnn.GpuDnnConvDesc使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano.sandbox.cuda.dnn
的用法示例。
在下文中一共展示了dnn.GpuDnnConvDesc方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: convolve
# 需要导入模块: from theano.sandbox.cuda import dnn [as 别名]
# 或者: from theano.sandbox.cuda.dnn import GpuDnnConvDesc [as 别名]
def convolve(self, input, **kwargs):
# Messy to have these imports here, but seems to allow for switching DNN off.
from theano.sandbox.cuda.basic_ops import (as_cuda_ndarray_variable,
host_from_gpu,
gpu_contiguous, HostFromGpu,
gpu_alloc_empty)
from theano.sandbox.cuda.dnn import GpuDnnConvDesc, GpuDnnConv, GpuDnnConvGradI, dnn_conv, dnn_pool
# Straight outta Radford
img = gpu_contiguous(input)
kerns = gpu_contiguous(self.W)
desc = GpuDnnConvDesc(border_mode=self.crop, subsample=self.stride,
conv_mode='conv')(gpu_alloc_empty(img.shape[0], kerns.shape[1], img.shape[2]*self.stride[0], img.shape[3]*self.stride[1]).shape, kerns.shape)
out = gpu_alloc_empty(img.shape[0], kerns.shape[1], img.shape[2]*self.stride[0], img.shape[3]*self.stride[1])
conved = GpuDnnConvGradI()(kerns, img, out, desc)
return conved
# Minibatch discrimination layer from OpenAI's improved GAN techniques
示例2: deconv
# 需要导入模块: from theano.sandbox.cuda import dnn [as 别名]
# 或者: from theano.sandbox.cuda.dnn import GpuDnnConvDesc [as 别名]
def deconv(X, w, subsample=(1, 1), border_mode=(0, 0), conv_mode='conv'):
"""
sets up dummy convolutional forward pass and uses its grad as deconv
currently only tested/working with same padding
"""
img = gpu_contiguous(X)
kerns = gpu_contiguous(w)
desc = GpuDnnConvDesc(border_mode=border_mode, subsample=subsample,
conv_mode=conv_mode)(gpu_alloc_empty(img.shape[0], kerns.shape[1], img.shape[2] * subsample[0], img.shape[3] * subsample[1]).shape, kerns.shape)
out = gpu_alloc_empty(img.shape[0], kerns.shape[1], img.shape[2] * subsample[0], img.shape[3] * subsample[1])
d_img = GpuDnnConvGradI()(kerns, img, out, desc)
return d_img
示例3: test_dnn_conv_desc_merge
# 需要导入模块: from theano.sandbox.cuda import dnn [as 别名]
# 或者: from theano.sandbox.cuda.dnn import GpuDnnConvDesc [as 别名]
def test_dnn_conv_desc_merge():
if not cuda.dnn.dnn_available():
raise SkipTest(cuda.dnn.dnn_available.msg)
img_shp = T.as_tensor_variable(
numpy.asarray([2, 1, 8, 8]).astype('int64'))
kern_shp = T.as_tensor_variable(
numpy.asarray([3, 1, 2, 2]).astype('int64'))
desc1 = dnn.GpuDnnConvDesc(border_mode='valid', subsample=(2, 2),
conv_mode='conv')(img_shp, kern_shp)
desc2 = dnn.GpuDnnConvDesc(border_mode='full', subsample=(1, 1),
conv_mode='cross')(img_shp, kern_shp)
# CDataType is not DeepCopyable so this will crash if we don't use
# borrow=True
f = theano.function([], [theano.Out(desc1, borrow=True),
theano.Out(desc2, borrow=True)],
mode=mode_with_gpu)
d1, d2 = f()
# This will be the case if they are merged, which would be bad.
assert d1 != d2
desc1v2 = dnn.GpuDnnConvDesc(border_mode='valid', subsample=(2, 2),
conv_mode='conv')(img_shp, kern_shp)
f = theano.function([], [theano.Out(desc1, borrow=True),
theano.Out(desc1v2, borrow=True)],
mode=mode_with_gpu)
assert len([n for n in f.maker.fgraph.apply_nodes
if isinstance(n.op, dnn.GpuDnnConvDesc)]) == 1
# CDATA type don't equal even if they represent the same object
# So we can't use debugmode with it.
if theano.config.mode not in ["DebugMode", "DEBUG_MODE"]:
d1, d2 = f()
# They won't be equal if they aren't merged.
assert d1 == d2
示例4: test_dnn_conv_merge
# 需要导入模块: from theano.sandbox.cuda import dnn [as 别名]
# 或者: from theano.sandbox.cuda.dnn import GpuDnnConvDesc [as 别名]
def test_dnn_conv_merge():
"""This test that we merge correctly multiple dnn_conv.
This can is more difficult due to GpuEmptyAlloc that aren't
merged.
"""
if not cuda.dnn.dnn_available():
raise SkipTest(cuda.dnn.dnn_available.msg)
img_shp = [2, 5, 6, 8]
kern_shp = [3, 5, 5, 6]
img = T.ftensor4('img')
kern = T.ftensor4('kern')
out = T.ftensor4('out')
desc = dnn.GpuDnnConvDesc(
border_mode='valid')(img.shape, kern.shape)
# Test forward op
o1 = dnn.dnn_conv(img, kern)
o2 = dnn.dnn_conv(img, kern)
f = theano.function([img, kern], [o1, o2], mode=mode_with_gpu)
d1, d2 = f(numpy.random.rand(*img_shp).astype('float32'),
numpy.random.rand(*kern_shp).astype('float32'))
topo = f.maker.fgraph.toposort()
assert len([n for n in topo if isinstance(n.op, dnn.GpuDnnConv)]) == 1
# Test grad w op
o1 = dnn.GpuDnnConvGradW()(img, kern, out, desc)
o2 = dnn.GpuDnnConvGradW()(img, kern, out, desc)
f = theano.function([img, kern, out], [o1, o2], mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert len([n for n in topo if isinstance(n.op, dnn.GpuDnnConvGradW)]) == 1
# Test grad i op
o1 = dnn.GpuDnnConvGradI()(img, kern, out, desc)
o2 = dnn.GpuDnnConvGradI()(img, kern, out, desc)
f = theano.function([img, kern, out], [o1, o2], mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert len([n for n in topo if isinstance(n.op, dnn.GpuDnnConvGradI)]) == 1
示例5: test_conv
# 需要导入模块: from theano.sandbox.cuda import dnn [as 别名]
# 或者: from theano.sandbox.cuda.dnn import GpuDnnConvDesc [as 别名]
def test_conv(self):
if not dnn.dnn_available():
raise SkipTest(dnn.dnn_available.msg)
img = T.ftensor4('img')
kerns = T.ftensor4('kerns')
out = T.ftensor4('out')
img_val = numpy.asarray(
numpy.random.rand(10, 2, 6, 4),
dtype='float32'
)
kern_vals = numpy.asarray(
numpy.random.rand(8, 2, 4, 3),
dtype='float32'
)
for params in product(
['valid', 'full', 'half'],
[(1, 1), (2, 2)],
['conv', 'cross']
):
out_vals = numpy.zeros(
dnn.GpuDnnConv.get_out_shape(img_val.shape, kern_vals.shape,
border_mode=params[0],
subsample=params[1]),
dtype='float32')
desc = dnn.GpuDnnConvDesc(
border_mode=params[0],
subsample=params[1],
conv_mode=params[2]
)(img.shape, kerns.shape)
conv = dnn.GpuDnnConv()(img, kerns, out, desc)
self._compile_and_check(
[img, kerns, out],
[conv],
[img_val, kern_vals, out_vals],
dnn.GpuDnnConv
)
示例6: test_conv3d
# 需要导入模块: from theano.sandbox.cuda import dnn [as 别名]
# 或者: from theano.sandbox.cuda.dnn import GpuDnnConvDesc [as 别名]
def test_conv3d(self):
if not (cuda.dnn.dnn_available() and dnn.version() >= (2000, 2000)):
raise SkipTest('"CuDNN 3D convolution requires CuDNN v2')
ftensor5 = T.TensorType(dtype="float32", broadcastable=(False,) * 5)
img = ftensor5('img')
kerns = ftensor5('kerns')
out = ftensor5('out')
img_val = numpy.asarray(
numpy.random.rand(10, 2, 6, 4, 11),
dtype='float32'
)
kern_vals = numpy.asarray(
numpy.random.rand(8, 2, 4, 3, 1),
dtype='float32'
)
for params in product(
['valid', 'full', 'half'],
[(1, 1, 1), (2, 2, 2)],
['conv', 'cross']
):
out_vals = numpy.zeros(
dnn.GpuDnnConv3d.get_out_shape(img_val.shape, kern_vals.shape,
border_mode=params[0],
subsample=params[1]),
dtype='float32')
desc = dnn.GpuDnnConvDesc(
border_mode=params[0],
subsample=params[1],
conv_mode=params[2]
)(img.shape, kerns.shape)
conv = dnn.GpuDnnConv3d()(img, kerns, out, desc)
self._compile_and_check(
[img, kerns, out],
[conv],
[img_val, kern_vals, out_vals],
dnn.GpuDnnConv3d
)
示例7: test_dnn_conv_grad
# 需要导入模块: from theano.sandbox.cuda import dnn [as 别名]
# 或者: from theano.sandbox.cuda.dnn import GpuDnnConvDesc [as 别名]
def test_dnn_conv_grad():
if not cuda.dnn.dnn_available() or dnn.version() == -1:
raise SkipTest('alpha != 1.0 not supported in cudnn v1')
b = 1
c = 4
f = 3
ih = 2
iw = 8
kh = 2
kw = 2
img_val = numpy.random.random((b, c, ih, iw)).astype('float32')
kern_val = numpy.random.random((f, c, kh, kw)).astype('float32')
out_val = numpy.random.random((b, f, ih - kw + 1,
iw - kw + 1)).astype('float32')
def dconv(img, kern, out):
desc = dnn.GpuDnnConvDesc(border_mode='valid', subsample=(1, 1),
conv_mode='conv')(img.shape, kern.shape)
return dnn.GpuDnnConv()(img, kern, out, desc, alpha=0.5, beta=0.75)
def dconvi(img, kern, out):
desc = dnn.GpuDnnConvDesc(border_mode='valid', subsample=(1, 1),
conv_mode='conv')(img.shape, kern.shape)
return dnn.GpuDnnConvGradI()(kern, out, img, desc, alpha=-1.0,
beta=0.0)
def dconvw(img, kern, out):
desc = dnn.GpuDnnConvDesc(border_mode='valid', subsample=(1, 1),
conv_mode='conv')(img.shape, kern.shape)
return dnn.GpuDnnConvGradW()(img, out, kern, desc, alpha=0.75,
beta=-1.0)
utt.verify_grad(dconv, [img_val, kern_val, out_val], mode=mode_with_gpu)
utt.verify_grad(dconvi, [img_val, kern_val, out_val], mode=mode_with_gpu)
utt.verify_grad(dconvw, [img_val, kern_val, out_val], mode=mode_with_gpu)
示例8: deconv
# 需要导入模块: from theano.sandbox.cuda import dnn [as 别名]
# 或者: from theano.sandbox.cuda.dnn import GpuDnnConvDesc [as 别名]
def deconv(X, w, subsample=(1, 1), border_mode=(0, 0), conv_mode='conv'):
"""
sets up dummy convolutional forward pass and uses its grad as deconv
currently only tested/working with same padding
"""
img = gpu_contiguous(X)
kerns = gpu_contiguous(w)
desc = GpuDnnConvDesc(border_mode=border_mode, subsample=subsample,
conv_mode=conv_mode)(gpu_alloc_empty(img.shape[0], kerns.shape[1], img.shape[2]*subsample[0], img.shape[3]*subsample[1]).shape, kerns.shape)
out = gpu_alloc_empty(img.shape[0], kerns.shape[1], img.shape[2]*subsample[0], img.shape[3]*subsample[1])
d_img = GpuDnnConvGradI()(kerns, img, out, desc)
return d_img
示例9: test_dnn_conv_grad
# 需要导入模块: from theano.sandbox.cuda import dnn [as 别名]
# 或者: from theano.sandbox.cuda.dnn import GpuDnnConvDesc [as 别名]
def test_dnn_conv_grad():
if not cuda.dnn.dnn_available() or dnn.version() == -1:
raise SkipTest('alpha != 1.0 not supported in cudnn v1')
b = 1
c = 4
f = 3
ih = 2
iw = 8
kh = 2
kw = 2
img_val = numpy.random.random((b, c, ih, iw)).astype('float32')
kern_val = numpy.random.random((f, c, kh, kw)).astype('float32')
out_val = numpy.random.random((b, f, ih - kw + 1,
iw - kw + 1)).astype('float32')
def dconv(img, kern, out):
desc = dnn.GpuDnnConvDesc(border_mode='valid', subsample=(1, 1),
conv_mode='conv')(img.shape, kern.shape)
return dnn.GpuDnnConv()(img, kern, out, desc, alpha=0.5, beta=0.75)
def dconvi(img, kern, out):
desc = dnn.GpuDnnConvDesc(border_mode='valid', subsample=(1, 1),
conv_mode='conv')(img.shape, kern.shape)
return dnn.GpuDnnConvGradI()(kern, out, img, desc, alpha=-1.0,
beta=0.0)
def dconvw(img, kern, out):
desc = dnn.GpuDnnConvDesc(border_mode='valid', subsample=(1, 1),
conv_mode='conv')(img.shape, kern.shape)
return dnn.GpuDnnConvGradW()(img, out, kern, desc, alpha=0.75,
beta=-1.0)
utt.verify_grad(dconv, [img_val, kern_val, out_val])
utt.verify_grad(dconvi, [img_val, kern_val, out_val])
utt.verify_grad(dconvw, [img_val, kern_val, out_val])
示例10: __init__
# 需要导入模块: from theano.sandbox.cuda import dnn [as 别名]
# 或者: from theano.sandbox.cuda.dnn import GpuDnnConvDesc [as 别名]
def __init__(self,
filters,
batch_size,
input_space,
output_axes=('b', 'c', 0, 1),
subsample=(1, 1),
border_mode='valid',
filters_shape=None,
message=''):
assert batch_size is None or batch_size > 0
self._input_space = input_space
self._output_axes = output_axes
self._subsample = tuple(subsample)
self._border_mode = border_mode
super(Cudnn2D, self).__init__(
filters=filters,
img_shape=(batch_size, input_space.num_channels,
input_space.shape[0], input_space.shape[1]),
subsample=self._subsample,
border_mode=border_mode,
filters_shape=filters.get_value(borrow=True).shape,
message=message
)
# conv_op has to be changed
self._conv_op = GpuDnnConv()
self._desc = GpuDnnConvDesc(border_mode=border_mode,
subsample=self._subsample,
conv_mode='conv')
示例11: test_dnn_conv_inplace
# 需要导入模块: from theano.sandbox.cuda import dnn [as 别名]
# 或者: from theano.sandbox.cuda.dnn import GpuDnnConvDesc [as 别名]
def test_dnn_conv_inplace():
"""This test that we have inplace work correctly even when
GpuAllocEmpty get merged together.
"""
if not cuda.dnn.dnn_available():
raise SkipTest(cuda.dnn.dnn_available.msg)
img_shp = [2, 5, 6, 8]
kern_shp = [3, 5, 5, 6]
img = T.ftensor4('img')
kern = T.ftensor4('kern')
out = T.ftensor4('out')
desc1 = dnn.GpuDnnConvDesc(border_mode='valid', conv_mode='conv')(
img.shape, kern.shape)
desc2 = dnn.GpuDnnConvDesc(
border_mode='valid', conv_mode='cross')(img.shape, kern.shape)
# Test forward op
o1 = dnn.dnn_conv(img, kern, conv_mode='conv')
o2 = dnn.dnn_conv(img, kern, conv_mode='cross')
f = theano.function([img, kern], [o1, o2], mode=mode_with_gpu)
d1, d2 = f(numpy.random.rand(*img_shp).astype('float32'),
numpy.random.rand(*kern_shp).astype('float32'))
topo = f.maker.fgraph.toposort()
convs = [n for n in topo if isinstance(n.op, dnn.GpuDnnConv)]
assert len(convs) == 2
assert all([node.op.inplace for node in convs])
assert len([n for n in topo if isinstance(n.op, GpuAllocEmpty)]) == 2
# Test grad w op
out = gpu_alloc_empty(*kern.shape)
o1 = dnn.GpuDnnConvGradW()(img, kern, out, desc1)
o2 = dnn.GpuDnnConvGradW()(img, kern, out, desc2)
f = theano.function([img, kern], [o1, o2], mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
convs = [n for n in topo if isinstance(n.op, dnn.GpuDnnConvGradW)]
assert len(convs) == 2
assert all([node.op.inplace for node in convs])
assert len([n for n in topo if isinstance(n.op, GpuAllocEmpty)]) == 2
# Test grad i op
out = gpu_alloc_empty(*img.shape)
o1 = dnn.GpuDnnConvGradI()(img, kern, out, desc1)
o2 = dnn.GpuDnnConvGradI()(img, kern, out, desc2)
f = theano.function([img, kern], [o1, o2], mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
convs = [n for n in topo if isinstance(n.op, dnn.GpuDnnConvGradI)]
assert len(convs) == 2
assert all([node.op.inplace for node in convs])
assert len([n for n in topo if isinstance(n.op, GpuAllocEmpty)]) == 2
示例12: test_conv3d_gradw
# 需要导入模块: from theano.sandbox.cuda import dnn [as 别名]
# 或者: from theano.sandbox.cuda.dnn import GpuDnnConvDesc [as 别名]
def test_conv3d_gradw(self):
if not (cuda.dnn.dnn_available() and dnn.version() >= (2000, 2000)):
raise SkipTest('"CuDNN 3D convolution requires CuDNN v2')
ftensor5 = T.TensorType(dtype="float32", broadcastable=(False,) * 5)
img = ftensor5('img')
kerns = ftensor5('kerns')
out = ftensor5('out')
img_val = numpy.asarray(
numpy.random.rand(9, 2, 4, 8, 13),
dtype='float32'
)
kern_vals = numpy.asarray(
numpy.random.rand(11, 2, 3, 1, 4),
dtype='float32'
)
for params in product(
['valid', 'full', 'half'],
[(1, 1, 1), (2, 2, 2)],
['conv', 'cross']
):
out_vals = numpy.zeros(
dnn.GpuDnnConv3d.get_out_shape(img_val.shape, kern_vals.shape,
border_mode=params[0],
subsample=params[1]),
dtype='float32')
desc = dnn.GpuDnnConvDesc(
border_mode=params[0],
subsample=params[1],
conv_mode=params[2]
)(img.shape, out.shape)
conv_grad_w = dnn.GpuDnnConv3dGradW()(
img,
out,
kerns,
desc,
)
self._compile_and_check(
[img, out, kerns],
[conv_grad_w],
[img_val, out_vals, kern_vals],
dnn.GpuDnnConv3dGradW
)
示例13: test_conv_gradi
# 需要导入模块: from theano.sandbox.cuda import dnn [as 别名]
# 或者: from theano.sandbox.cuda.dnn import GpuDnnConvDesc [as 别名]
def test_conv_gradi(self):
if not dnn.dnn_available():
raise SkipTest(dnn.dnn_available.msg)
img = T.ftensor4('img')
kerns = T.ftensor4('kerns')
out = T.ftensor4('out')
img_val = numpy.asarray(
numpy.random.rand(3, 4, 5, 6),
dtype='float32'
)
kern_vals = numpy.asarray(
numpy.random.rand(4, 14, 15, 16),
dtype='float32'
)
for params in product(
['valid'], # Should this work for 'full'?
[(1, 1)],
['conv', 'cross']
):
temp_kerns = kerns.dimshuffle(1, 0, 2, 3)
shape = (
img_val.shape[0], kern_vals.shape[1],
img_val.shape[2] + kern_vals.shape[2] - 1,
img_val.shape[3] + kern_vals.shape[3] - 1
)
out_vals = numpy.zeros(shape, dtype='float32')
desc = dnn.GpuDnnConvDesc(
border_mode=params[0],
subsample=params[1],
conv_mode=params[2]
)(out.shape, temp_kerns.shape)
conv_grad_i = dnn.GpuDnnConvGradI()(
temp_kerns,
img,
out,
desc,
)
self._compile_and_check(
[temp_kerns, img, out],
[conv_grad_i],
[kern_vals, img_val, out_vals],
dnn.GpuDnnConvGradI
)
示例14: test_conv3d_gradi
# 需要导入模块: from theano.sandbox.cuda import dnn [as 别名]
# 或者: from theano.sandbox.cuda.dnn import GpuDnnConvDesc [as 别名]
def test_conv3d_gradi(self):
if not (cuda.dnn.dnn_available() and dnn.version() >= (2000, 2000)):
raise SkipTest('"CuDNN 3D convolution requires CuDNN v2')
ftensor5 = T.TensorType(dtype="float32", broadcastable=(False,) * 5)
img = ftensor5('img')
kerns = ftensor5('kerns')
out = ftensor5('out')
img_val = numpy.asarray(
numpy.random.rand(8, 4, 6, 7, 11),
dtype='float32'
)
kern_vals = numpy.asarray(
numpy.random.rand(9, 4, 5, 1, 2),
dtype='float32'
)
for params in product(
['valid', 'full', 'half'],
[(1, 1, 1), (2, 2, 2)],
['conv', 'cross']
):
out_vals = numpy.zeros(
dnn.GpuDnnConv3d.get_out_shape(img_val.shape, kern_vals.shape,
border_mode=params[0],
subsample=params[1]),
dtype='float32')
desc = dnn.GpuDnnConvDesc(
border_mode=params[0],
subsample=params[1],
conv_mode=params[2]
)(img.shape, kerns.shape)
conv_grad_i = dnn.GpuDnnConv3dGradI()(
kerns,
out,
img,
desc,
)
self._compile_and_check(
[kerns, out, img],
[conv_grad_i],
[kern_vals, out_vals, img_val],
dnn.GpuDnnConv3dGradI
)