本文整理汇总了Python中theano.sandbox.cuda.dnn.dnn_available函数的典型用法代码示例。如果您正苦于以下问题:Python dnn_available函数的具体用法?Python dnn_available怎么用?Python dnn_available使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了dnn_available函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: pool2d
def pool2d(x, pool_size, strides=(1, 1), border_mode='valid',
dim_ordering='th', pool_mode='max'):
if border_mode == 'same':
# TODO: add implementation for border_mode="same"
raise Exception('border_mode="same" not supported with Theano.')
elif border_mode == 'valid':
ignore_border = True
padding = (0, 0)
else:
raise Exception('Invalid border mode: ' + str(border_mode))
if dim_ordering not in {'th', 'tf'}:
raise Exception('Unknown dim_ordering ' + str(dim_ordering))
if dim_ordering == 'tf':
x = x.dimshuffle((0, 3, 1, 2))
if pool_mode == 'max':
if _on_gpu() and dnn.dnn_available():
pool_out = dnn_pool(x,
pool_size,
stride=strides,
mode='max')
else:
pool_out = downsample.max_pool_2d(x,
ds=pool_size,
st=strides,
ignore_border=ignore_border,
padding=padding,
mode='max')
elif pool_mode == 'avg':
if _on_gpu() and dnn.dnn_available():
pool_out = dnn_pool(x,
pool_size,
stride=strides,
mode='average_exc_pad')
else:
pool_out = downsample.max_pool_2d(x,
ds=pool_size,
st=strides,
ignore_border=ignore_border,
padding=padding,
mode='average_exc_pad')
else:
raise Exception('Invalid pooling mode: ' + str(pool_mode))
if dim_ordering == 'tf':
pool_out = pool_out.dimshuffle((0, 2, 3, 1))
return pool_out
示例2: get_output
def get_output(self, train=False):
X = self.get_input(train)
border_mode = self.border_mode
if on_gpu() and dnn.dnn_available():
if border_mode == 'same':
assert(self.subsample == (1, 1))
pad_x = (self.nb_row - self.subsample[0]) // 2
pad_y = (self.nb_col - self.subsample[1]) // 2
conv_out = dnn.dnn_conv(img=X,
kerns=self.W,
border_mode=(pad_x, pad_y))
else:
conv_out = dnn.dnn_conv(img=X,
kerns=self.W,
border_mode=border_mode,
subsample=self.subsample)
else:
if border_mode == 'same':
border_mode = 'full'
assert(self.subsample == (1, 1))
conv_out = T.nnet.conv.conv2d(X, self.W,
border_mode=border_mode,
subsample=self.subsample,
image_shape=self.input_shape,
filter_shape=self.W_shape)
if self.border_mode == 'same':
shift_x = (self.nb_row - 1) // 2
shift_y = (self.nb_col - 1) // 2
conv_out = conv_out[:, :, shift_x:X.shape[2] + shift_x, shift_y:X.shape[3] + shift_y]
return self.activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
示例3: test_cormm_conv
def test_cormm_conv(self):
if not dnn_available():
raise SkipTest(cuda.dnn.dnn_available.msg)
mode = mode_without_gpu
for (i, f), s, b, flip, provide_shape in itertools.product(
zip(self.inputs_shapes, self.filters_shapes),
self.subsamples,
self.border_modes,
self.filter_flip,
[False, True]):
o = self.get_output_shape(i, f, s, b)
self.run_fwd(inputs_shape=i, filters_shape=f, subsample=s,
verify_grad=True, mode=mode, device='cpu',
provide_shape=provide_shape, border_mode=b,
filter_flip=flip)
self.run_gradweight(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=True, mode=mode, device='cpu',
provide_shape=provide_shape, border_mode=b,
filter_flip=flip)
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=True, mode=mode, device='cpu',
provide_shape=provide_shape, border_mode=b,
filter_flip=flip)
示例4: test_import_without_gpu_or_cudnn_raises
def test_import_without_gpu_or_cudnn_raises(self):
from theano.sandbox.cuda import dnn
if theano.config.device.startswith("gpu") and dnn.dnn_available():
pytest.skip()
else:
with pytest.raises(ImportError):
import lasagne.layers.dnn
示例5: create_NIPS_Sprag_init
def create_NIPS_Sprag_init(inp_shape, output_num, stride=None, untie_biases=False, input_var=None):
import theano.tensor.signal.conv
from theano.sandbox.cuda import dnn
# if no dnn support use default conv
if not theano.config.device.startswith("gpu") or not dnn.dnn_available(): # code stolen from lasagne dnn.py
import lasagne.layers.conv
conv = lasagne.layers.conv.Conv2DLayer
else:
import lasagne.layers.dnn
conv = lasagne.layers.dnn.Conv2DDNNLayer
# setup network layout
l_in = lasagne.layers.InputLayer(inp_shape, input_var=input_var)
l_hid1 = conv(l_in, 16, (8, 8), stride=stride[0], untie_biases=untie_biases,
W=lasagne.init.Normal(.01),
b=lasagne.init.Constant(.1))
l_hid2 = conv(l_hid1, 32, (4, 4), stride=stride[1], untie_biases=untie_biases,
W=lasagne.init.Normal(.01),
b=lasagne.init.Constant(.1))
l_hid3 = lasagne.layers.DenseLayer(l_hid2, 256,
W=lasagne.init.Normal(.01),
b=lasagne.init.Constant(.1))
l_out = lasagne.layers.DenseLayer(l_hid3, output_num, nonlinearity=lasagne.nonlinearities.linear,
W=lasagne.init.Normal(.01),
b=lasagne.init.Constant(.1))
return {'l_in': l_in, 'l_hid1': l_hid1, 'l_hid2': l_hid2, 'l_hid3': l_hid3, 'l_out': l_out}
示例6: test_softmax
def test_softmax(self):
if not dnn.dnn_available():
raise SkipTest(dnn.dnn_available.msg)
t = T.ftensor4('t')
rand_tensor = numpy.asarray(
numpy.random.rand(5, 4, 3, 2),
dtype='float32'
)
self._compile_and_check(
[t],
[dnn.GpuDnnSoftmax('bc01', 'accurate', 'channel')(t)],
[rand_tensor],
dnn.GpuDnnSoftmax
)
self._compile_and_check(
[t],
[
T.grad(
dnn.GpuDnnSoftmax(
'bc01',
'accurate',
'channel'
)(t).mean(),
t
)
],
[rand_tensor],
dnn.GpuDnnSoftmaxGrad
)
示例7: test_conv
def test_conv(self):
if not dnn.dnn_available():
raise SkipTest(dnn.dnn_available.msg)
img = T.ftensor4('img')
kerns = T.ftensor4('kerns')
img_val = numpy.asarray(
numpy.random.rand(3, 4, 5, 6),
dtype='float32'
)
kern_vals = numpy.asarray(
numpy.random.rand(3, 4, 5, 6),
dtype='float32'
)
for params in product(
['valid', 'full'],
[(1, 1), (2, 2)],
['conv', 'cross']
):
desc = dnn.GpuDnnConvDesc(
border_mode=params[0],
subsample=params[1],
conv_mode=params[2]
)(img.shape, kerns.shape)
conv = dnn.GpuDnnConv()(img_val, kern_vals, desc)
self._compile_and_check(
[img, kerns],
[conv],
[img_val, kern_vals],
dnn.GpuDnnConv
)
示例8: test_dnn_conv
def test_dnn_conv(self):
if not dnn_available():
raise SkipTest(cuda.dnn.dnn_available.msg)
mode = mode_with_gpu
# provide_shape is not used by the CuDNN impementation
provide_shape = False
for (i, f), s, b, flip in itertools.product(
zip(self.inputs_shapes, self.filters_shapes),
self.subsamples,
self.border_modes,
self.filter_flip):
o = self.get_output_shape(i, f, s, b)
self.run_fwd(inputs_shape=i, filters_shape=f, subsample=s,
verify_grad=True, mode=mode, device='gpu',
provide_shape=provide_shape, border_mode=b,
filter_flip=flip, target_op=GpuDnnConv)
self.run_gradweight(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=True, mode=mode, device='gpu',
provide_shape=provide_shape, border_mode=b,
filter_flip=flip, target_op=GpuDnnConvGradW)
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=True, mode=mode, device='gpu',
provide_shape=provide_shape, border_mode=b,
filter_flip=flip, target_op=GpuDnnConvGradI)
示例9: get_output
def get_output(self, train):
X = self.get_input(train)
border_mode = self.border_mode
if dnn.dnn_available() and theano.config.device[:3] == 'gpu':
if border_mode == 'same':
assert(self.subsample == (1, 1))
pad_x = (self.nb_row - self.subsample[0]) // 2
pad_y = (self.nb_col - self.subsample[1]) // 2
conv_out = dnn.dnn_conv(img=X,
kerns=self.W,
border_mode=(pad_x, pad_y))
else:
conv_out = dnn.dnn_conv(img=X,
kerns=self.W,
border_mode=border_mode,
subsample=self.subsample)
else:
if border_mode == 'same':
border_mode = 'full'
conv_out = T.nnet.conv.conv2d(X, self.W,
border_mode=border_mode,
subsample=self.subsample)
if self.border_mode == 'same':
shift_x = (self.nb_row - 1) // 2
shift_y = (self.nb_col - 1) // 2
conv_out = conv_out[:, :, shift_x:X.shape[2] + shift_x, shift_y:X.shape[3] + shift_y]
return self.activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
示例10: get_output
def get_output(self, train=False):
X = self.get_input(train)
newshape = (X.shape[0]*X.shape[1], X.shape[2], X.shape[3], X.shape[4])
Y = theano.tensor.reshape(X, newshape) #collapse num_samples and num_timesteps
border_mode = self.border_mode
if on_gpu() and dnn.dnn_available():
if border_mode == 'same':
assert(self.subsample == (1, 1))
pad_x = (self.nb_row - self.subsample[0]) // 2
pad_y = (self.nb_col - self.subsample[1]) // 2
conv_out = dnn.dnn_conv(img=Y,
kerns=self.W,
border_mode=(pad_x, pad_y))
else:
conv_out = dnn.dnn_conv(img=Y,
kerns=self.W,
border_mode=border_mode,
subsample=self.subsample)
else:
if border_mode == 'same':
border_mode = 'full'
conv_out = theano.tensor.nnet.conv.conv2d(Y, self.W,
border_mode=border_mode, subsample=self.subsample)
if self.border_mode == 'same':
shift_x = (self.nb_row - 1) // 2
shift_y = (self.nb_col - 1) // 2
conv_out = conv_out[:, :, shift_x:Y.shape[2] + shift_x, shift_y:Y.shape[3] + shift_y]
output = self.activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
newshape = (X.shape[0], X.shape[1], output.shape[1], output.shape[2], output.shape[3])
return theano.tensor.reshape(output, newshape)
示例11: test_gpucorrmm_conv
def test_gpucorrmm_conv(self):
if not dnn_available():
raise SkipTest(cuda.dnn.dnn_available.msg)
mode = mode_with_gpu.excluding('cudnn')
for (i, f), s, b, flip, provide_shape in itertools.product(
zip(self.inputs_shapes, self.filters_shapes),
self.subsamples,
self.border_modes,
self.filter_flip,
[False, True]):
o = self.get_output_shape(i, f, s, b)
self.run_fwd(inputs_shape=i, filters_shape=f, subsample=s,
verify_grad=True, mode=mode, device='gpu',
provide_shape=provide_shape, border_mode=b,
filter_flip=flip,
target_op=(GpuCorrMM,
GpuCorrMM_gradWeights,
GpuCorrMM_gradInputs))
self.run_gradweight(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=True, mode=mode, device='gpu',
provide_shape=provide_shape, border_mode=b,
filter_flip=flip,
target_op=GpuCorrMM_gradWeights)
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=True, mode=mode, device='gpu',
provide_shape=provide_shape, border_mode=b,
filter_flip=flip,
target_op=GpuCorrMM_gradInputs)
示例12: test_conv_gradi
def test_conv_gradi(self):
if not dnn.dnn_available():
raise SkipTest(dnn.dnn_available.msg)
img = T.ftensor4("img")
kerns = T.ftensor4("kerns")
out = T.ftensor4("out")
img_val = numpy.asarray(numpy.random.rand(3, 4, 5, 6), dtype="float32")
kern_vals = numpy.asarray(numpy.random.rand(3, 4, 5, 6), dtype="float32")
for params in product(["valid"], [(1, 1)], ["conv", "cross"]): # Should this work for 'full'?
temp_kerns = kerns.dimshuffle(1, 0, 2, 3)
shape = (
img_val.shape[0],
kern_vals.shape[1],
img_val.shape[2] + kern_vals.shape[2] - 1,
img_val.shape[3] + kern_vals.shape[3] - 1,
)
out_vals = numpy.zeros(shape, dtype="float32")
desc = dnn.GpuDnnConvDesc(border_mode=params[0], subsample=params[1], conv_mode=params[2])(
out.shape, temp_kerns.shape
)
conv_grad_i = dnn.GpuDnnConvGradI()(temp_kerns, img, out, desc)
self._compile_and_check(
[temp_kerns, img, out], [conv_grad_i], [kern_vals, img_val, out_vals], dnn.GpuDnnConvGradI
)
示例13: pool2d
def pool2d(x, pool_size, strides=(1, 1), border_mode='valid',
dim_ordering='th', pool_mode='max'):
# ====== dim ordering ====== #
if dim_ordering not in {'th', 'tf'}:
raise Exception('Unknown dim_ordering ' + str(dim_ordering))
if dim_ordering == 'tf':
x = x.dimshuffle((0, 3, 1, 2))
# ====== border mode ====== #
if border_mode == 'same':
w_pad = pool_size[0] - 2 if pool_size[0] % 2 == 1 else pool_size[0] - 1
h_pad = pool_size[1] - 2 if pool_size[1] % 2 == 1 else pool_size[1] - 1
padding = (w_pad, h_pad)
elif border_mode == 'valid':
padding = (0, 0)
elif isinstance(border_mode, (tuple, list)):
padding = tuple(border_mode)
else:
raise Exception('Invalid border mode: ' + str(border_mode))
# ====== pooling ====== #
if _on_gpu() and dnn.dnn_available():
pool_out = dnn.dnn_pool(x, pool_size,
stride=strides,
mode=pool_mode,
pad=padding)
else: # CPU veresion support by theano
pool_out = pool.pool_2d(x, ds=pool_size, st=strides,
ignore_border=True,
padding=padding,
mode=pool_mode)
if dim_ordering == 'tf':
pool_out = pool_out.dimshuffle((0, 2, 3, 1))
return pool_out
示例14: test_conv_gradw
def test_conv_gradw(self):
if not dnn.dnn_available():
raise SkipTest(dnn.dnn_available.msg)
img = T.ftensor4("img")
kerns = T.ftensor4("kerns")
out = T.ftensor4("out")
img_val = numpy.asarray(numpy.random.rand(2, 5, 6, 8), dtype="float32")
kern_vals = numpy.asarray(numpy.random.rand(2, 1, 5, 6), dtype="float32")
out_vals = numpy.zeros((3, 3, 1, 1), dtype="float32")
for params in product(["valid", "full"], [(1, 1)], ["conv", "cross"]): # strides besides (1, 1)
temp_img = img.dimshuffle(1, 0, 2, 3)
temp_kerns = kerns
if params[2] == "conv":
temp_kerns = temp_kerns[:, :, ::-1, ::-1]
temp_kerns = temp_kerns.dimshuffle(1, 0, 2, 3)
shape = (
kern_vals.shape[1],
img_val.shape[1],
img_val.shape[2] - kern_vals.shape[2] + 1,
img_val.shape[3] - kern_vals.shape[3] + 1,
)
out_vals = numpy.zeros(shape, dtype="float32")
desc = dnn.GpuDnnConvDesc(border_mode=params[0], subsample=params[1], conv_mode=params[2])(
temp_img.shape, out.shape
)
conv_grad_w = dnn.GpuDnnConvGradW()(temp_img, temp_kerns, out, desc)
self._compile_and_check(
[temp_img, temp_kerns, out], [conv_grad_w], [img_val, kern_vals, out_vals], dnn.GpuDnnConvGradW
)
示例15: get_output
def get_output(self, train=False):
X = self.get_input(train)
X = T.reshape(X, (X.shape[0], X.shape[1], X.shape[2], 1)).dimshuffle(0, 2, 1, 3)
border_mode = self.border_mode
if on_gpu() and dnn.dnn_available():
if border_mode == 'same':
assert(self.subsample_length == 1)
pad_x = (self.filter_length - self.subsample_length) // 2
conv_out = dnn.dnn_conv(img=X,
kerns=self.W,
border_mode=(pad_x, 0))
else:
conv_out = dnn.dnn_conv(img=X,
kerns=self.W,
border_mode=border_mode,
subsample=self.subsample)
else:
if border_mode == 'same':
assert(self.subsample_length == 1)
border_mode = 'full'
conv_out = T.nnet.conv.conv2d(X, self.W,
border_mode=border_mode,
subsample=self.subsample)
if self.border_mode == 'same':
shift_x = (self.filter_length - 1) // 2
conv_out = conv_out[:, :, shift_x:X.shape[2] + shift_x, :]
output = self.activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
output = T.reshape(output, (output.shape[0], output.shape[1], output.shape[2])).dimshuffle(0, 2, 1)
return output