本文整理汇总了Python中theano.sandbox.cuda.dnn.dnn_conv3d方法的典型用法代码示例。如果您正苦于以下问题:Python dnn.dnn_conv3d方法的具体用法?Python dnn.dnn_conv3d怎么用?Python dnn.dnn_conv3d使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano.sandbox.cuda.dnn
的用法示例。
在下文中一共展示了dnn.dnn_conv3d方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_output_for
# 需要导入模块: from theano.sandbox.cuda import dnn [as 别名]
# 或者: from theano.sandbox.cuda.dnn import dnn_conv3d [as 别名]
def get_output_for(self, input, *args, **kwargs):
conv_mode = 'conv' if self.flip_filters else 'cross'
# Fractionally strided convolutions
if any([s<1.0 for s in self.strides]):
subsample=tuple([int(1.0/s) for s in self.strides])
img_shape = list(self.output_shape)
if img_shape[0] is None:
img_shape[0] = input.shape[0]
image = T.alloc(0.,*img_shape)
base = dnn.dnn_conv3d(img = image,
kerns = self.W.transpose(1,0,2,3,4),
subsample = subsample,
border_mode = self.pad,
conv_mode = conv_mode
)
conved = T.grad(base.sum(), wrt = image, known_grads = {base: input})
else:
conved = dnn.dnn_conv3d(img = input,
kerns = self.W,
subsample = self.strides,
border_mode = self.pad,
conv_mode = conv_mode
)
if self.b is None:
activation = conved
else:
activation = conved + self.b.dimshuffle('x', 0, 'x', 'x', 'x')
return self.nonlinearity(activation)
# Repeat upscale 3d layer
示例2: compute_output
# 需要导入模块: from theano.sandbox.cuda import dnn [as 别名]
# 或者: from theano.sandbox.cuda.dnn import dnn_conv3d [as 别名]
def compute_output(self, network, in_vw):
# gather hyperparameters
num_filters = network.find_hyperparameter(["num_filters"])
filter_size = network.find_hyperparameter(["filter_size"])
stride = network.find_hyperparameter(["conv_stride", "stride"],
(1, 1, 1))
pad = network.find_hyperparameter(["conv_pad", "pad"], (0, 0, 0))
pad = conv.conv_parse_pad(filter_size, pad)
# by default, do convolution instead of cross-correlation
# rationale: be compatible with standard (non-cuDNN) conv2d
conv_mode = network.find_hyperparameter(["conv_mode"], "conv")
assert len(filter_size) == 3
assert conv_mode in ["conv", "cross"]
# create weight
num_channels = in_vw.shape[1]
W = network.create_vw(
name="weight",
is_shared=True,
shape=(num_filters, num_channels) + tuple(filter_size),
tags={"parameter", "weight"},
default_inits=[],
).variable
out_var = dnn.dnn_conv3d(img=in_vw.variable,
kerns=W,
border_mode=pad,
subsample=stride,
conv_mode=conv_mode)
out_shape = conv.conv_output_shape(input_shape=in_vw.shape,
num_filters=num_filters,
axes=(2, 3, 4),
conv_shape=filter_size,
strides=stride,
pads=pad)
network.create_vw(
"default",
variable=out_var,
shape=out_shape,
tags={"output"},
)
示例3: compute_output
# 需要导入模块: from theano.sandbox.cuda import dnn [as 别名]
# 或者: from theano.sandbox.cuda.dnn import dnn_conv3d [as 别名]
def compute_output(self, network, in_vw):
num_filters = network.find_hyperparameter(['num_filters'])
stride = network.find_hyperparameter(['upsample_factor'])
filter_size = network.find_hyperparameter(['filter_size'])
pad_name = 'same'
pad = treeano.nodes.conv.conv_parse_pad(filter_size, pad_name)
# In the case, the 0th element of shape is the number of channels
# in the low-res layer, and the 1st element is that of the hi-res
# layer. We put it in W this way, because W is a convolution from
# hi-res to low-res.
W = network.create_vw(
name='weight',
is_shared=True,
shape=(in_vw.shape[1], num_filters,) + filter_size,
tags={'parameter', 'weight'},
default_inits=[],
).variable
out_shape = list(in_vw.shape)
symbolic_shape = list(in_vw.symbolic_shape())
out_shape[1] = num_filters
symbolic_shape[1] = num_filters
for axis, s in zip((2, 3, 4), stride):
if out_shape[axis] is not None:
out_shape[axis] *= s
symbolic_shape[axis] *= s
out_shape = tuple(out_shape)
symbolic_shape = tuple(symbolic_shape)
x = T.zeros(symbolic_shape)
conved = dnn.dnn_conv3d(img=x,
kerns=W,
border_mode=pad,
subsample=stride)
out_var = T.grad(None, wrt=x, known_grads={conved: in_vw.variable})
network.create_vw(
'default',
variable=out_var,
shape=out_shape,
tags={'output'}
)