本文整理匯總了Python中mxnet.gluon.nn.Conv2DTranspose方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.Conv2DTranspose方法的具體用法?Python nn.Conv2DTranspose怎麽用?Python nn.Conv2DTranspose使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類mxnet.gluon.nn
的用法示例。
在下文中一共展示了nn.Conv2DTranspose方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_deconv2d_16c
# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Conv2DTranspose [as 別名]
def test_deconv2d_16c():
in_chn_list = [1024, 512, 256, 128, 64, 32, 16]
out_chn_list = [512, 256, 128, 64, 32, 16, 3]
kernel_list = [1, 3, 5, 7]
in_shape = [4, 8, 16, 32, 64, 224]
batch_size = 4
class Net(gluon.HybridBlock):
def __init__(self, chn_num, kernel, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.deconv0 = gluon.nn.Conv2DTranspose(chn_num, (kernel, kernel))
def hybrid_forward(self, F, x):
out = self.deconv0(x)
return out
for i in range(len(in_shape)):
x = mx.nd.random.uniform(-1.0, 1.0, shape=(batch_size, in_chn_list[i], in_shape[i], in_shape[i]))
for j in range(len(kernel_list)):
net = Net(out_chn_list[i], kernel_list[j])
check_layer_forward_withinput(net, x)
示例2: test_reshape_deconv
# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Conv2DTranspose [as 別名]
def test_reshape_deconv():
class Net(gluon.HybridBlock):
def __init__(self, shape, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.reshape = shape
self.conv0 = nn.Conv2DTranspose(64, (3, 3))
def hybrid_forward(self, F, x):
x_reshape = x.reshape(self.reshape)
out = self.conv0(x_reshape)
return out
x = mx.nd.random.uniform(shape=(4, 16, 32, 32))
shape = (4, 16, 64, -1)
net = Net(shape)
check_layer_forward_withinput(net, x)
示例3: test_slice_deconv
# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Conv2DTranspose [as 別名]
def test_slice_deconv():
class Net(gluon.HybridBlock):
def __init__(self, slice, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.slice = slice
self.conv0 = nn.Conv2DTranspose(64, (3, 3))
def hybrid_forward(self, F, x):
x_slice = x.slice(begin=self.slice[0], end=self.slice[1])
out = self.conv0(x_slice)
return out
x = mx.nd.random.uniform(shape=(8, 32, 64, 64))
slice = [(0, 16, 0, 0), (4, 32, 32, 32)]
net = Net(slice)
check_layer_forward_withinput(net, x)
示例4: test_reshape_deconv_reshape_deconv
# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Conv2DTranspose [as 別名]
def test_reshape_deconv_reshape_deconv():
class Net(gluon.HybridBlock):
def __init__(self, shape, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.reshape = shape
self.conv0 = nn.Conv2DTranspose(32, (3, 3))
self.conv1 = nn.Conv2DTranspose(64, (3, 3), strides=(2, 2))
def hybrid_forward(self, F, x):
x_reshape = x.reshape(self.reshape[0])
y = self.conv0(x_reshape)
"shape of y is (4, 32, 66, 18)"
y_reshape = y.reshape(self.reshape[1])
out = self.conv1(y_reshape)
return out
x = mx.nd.random.uniform(shape=(4, 16, 32, 32))
shape = [(4, 16, 64, -1), (4, 32, 33, -1)]
net = Net(shape)
check_layer_forward_withinput(net, x)
示例5: test_reshape_deconv_slice_deconv
# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Conv2DTranspose [as 別名]
def test_reshape_deconv_slice_deconv():
class Net(gluon.HybridBlock):
def __init__(self, shape, slice, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.reshape = shape
self.slice = slice
self.conv0 = nn.Conv2DTranspose(32, (3, 3))
self.conv1 = nn.Conv2DTranspose(64, (3, 3), strides=(2, 2))
def hybrid_forward(self, F, x):
x_reshape = x.reshape(self.reshape)
y = self.conv0(x_reshape)
"shape of y is (4, 32, 66, 18)"
y_slice = y.slice(begin=self.slice[0], end=self.slice[1])
out = self.conv1(y_slice)
return out
x = mx.nd.random.uniform(shape=(4, 16, 32, 32))
shape = (4, 16, 64, -1)
slice = [(0, 0, 0, 0), (2, 16, 16, 16)]
net = Net(shape, slice)
check_layer_forward_withinput(net, x)
示例6: test_slice_deconv_reshape_deconv
# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Conv2DTranspose [as 別名]
def test_slice_deconv_reshape_deconv():
class Net(gluon.HybridBlock):
def __init__(self, shape, slice, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.reshape = shape
self.slice = slice
self.conv0 = nn.Conv2DTranspose(32, (3, 3))
self.conv1 = nn.Conv2DTranspose(96, (3, 3), strides=(2, 2))
def hybrid_forward(self, F, x):
x_slice = x.slice(begin=self.slice[0], end=self.slice[1])
y = self.conv0(x_slice)
"shape of y is (4, 32, 34, 34)"
y_reshape = y.reshape(self.reshape)
out = self.conv1(y_reshape)
return out
x = mx.nd.random.uniform(shape=(8, 32, 64, 64))
shape = (4, 64, 34, -1)
slice = [(4, 0, 0, 0), (8, 16, 32, 32)]
net = Net(shape, slice)
check_layer_forward_withinput(net, x)
示例7: __init__
# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Conv2DTranspose [as 別名]
def __init__(self, batch_images, classes, mask_channels, deep_fcn=False, **kwargs):
super(Mask, self).__init__(**kwargs)
self._batch_images = batch_images
self.classes = classes
init = mx.init.Xavier(rnd_type='gaussian', factor_type='out', magnitude=2)
with self.name_scope():
if deep_fcn:
self.deconv = nn.HybridSequential()
for _ in range(4):
self.deconv.add(
nn.Conv2D(mask_channels, kernel_size=(3, 3), strides=(1, 1), padding=(1, 1),
weight_initializer=init),
nn.Activation('relu'))
self.deconv.add(
nn.Conv2DTranspose(mask_channels, kernel_size=(2, 2), strides=(2, 2),
padding=(0, 0), weight_initializer=init))
else:
self.deconv = nn.Conv2DTranspose(mask_channels, kernel_size=(2, 2), strides=(2, 2),
padding=(0, 0), weight_initializer=init)
self.mask = nn.Conv2D(len(classes), kernel_size=(1, 1), strides=(1, 1), padding=(0, 0),
weight_initializer=init)
# pylint: disable=arguments-differ
示例8: test_deconv
# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Conv2DTranspose [as 別名]
def test_deconv():
# layers1d = [
# nn.Conv1DTranspose(16, 3, in_channels=4),
# nn.Conv1DTranspose(16, 3, groups=2, in_channels=4),
# nn.Conv1DTranspose(16, 3, strides=3, groups=2, in_channels=4),
# ]
# for layer in layers1d:
# check_layer_forward(layer, (1, 4, 10))
layers2d = [
nn.Conv2DTranspose(16, (3, 4), in_channels=4),
nn.Conv2DTranspose(16, (5, 4), in_channels=4),
nn.Conv2DTranspose(16, (3, 4), groups=2, in_channels=4),
nn.Conv2DTranspose(16, (3, 4), strides=4, in_channels=4),
nn.Conv2DTranspose(16, (3, 4), dilation=4, in_channels=4),
# nn.Conv2DTranspose(16, (3, 4), padding=4, in_channels=4),
nn.Conv2DTranspose(16, (3, 4), strides=4, output_padding=3, in_channels=4),
]
for layer in layers2d:
check_layer_forward(layer, (1, 4, 20, 20))
# layers3d = [
# nn.Conv3DTranspose(16, (1, 8, 4), in_channels=4),
# nn.Conv3DTranspose(16, (5, 4, 3), in_channels=4),
# nn.Conv3DTranspose(16, (3, 3, 3), groups=2, in_channels=4),
# nn.Conv3DTranspose(16, 4, strides=4, in_channels=4),
# nn.Conv3DTranspose(16, (3, 3, 3), padding=4, in_channels=4),
# ]
# for layer in layers3d:
# check_layer_forward(layer, (1, 4, 10, 10, 10))
#
#
# layer = nn.Conv2DTranspose(16, (3, 3), layout='NHWC', in_channels=4)
# # check_layer_forward(layer, (1, 10, 10, 4))
#
# layer = nn.Conv3DTranspose(16, (3, 3, 3), layout='NDHWC', in_channels=4)
# # check_layer_forward(layer, (1, 10, 10, 10, 4))
示例9: __init__
# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Conv2DTranspose [as 別名]
def __init__(self, isize, nz, nc, ngf, ngpu, n_extra_layers=0):
super(DCGAN_G, self).__init__()
self.ngpu = ngpu
assert isize % 16 == 0, "isize has to be a multiple of 16"
cngf, tisize = ngf // 2, 4
while tisize != isize:
cngf = cngf * 2
tisize = tisize * 2
with self.name_scope():
main = nn.Sequential()
# input is Z, going into a convolution
main.add(
nn.Conv2DTranspose(in_channels=nz, channels=cngf, kernel_size=4, strides=1, padding=0, use_bias=False,
prefix='initial.{0}-{1}.convt'.format(nz, cngf)))
main.add(nn.BatchNorm(in_channels=cngf, prefix='initial.{0}.batchnorm'.format(cngf)))
main.add(nn.LeakyReLU(0, prefix='initial.{0}.relu'.format(cngf)))
csize, cndf = 4, cngf
while csize < isize // 2:
main.add(nn.Conv2DTranspose(in_channels=cngf, channels=cngf // 2, kernel_size=4, strides=2, padding=1,
use_bias=False, prefix='pyramid.{0}-{1}.convt'.format(cngf, cngf // 2)))
main.add(nn.BatchNorm(in_channels=cngf // 2, prefix='pyramid.{0}.batchnorm'.format(cngf // 2)))
main.add(nn.LeakyReLU(0, prefix='pyramid.{0}.relu'.format(cngf // 2)))
cngf = cngf // 2
csize = csize * 2
# Extra layers
for t in range(n_extra_layers):
main.add(nn.Conv2D(in_channels=cngf, channels=cngf, kernel_size=3, strides=1, padding=1, use_bias=False,
prefix='extra-layers-{0}.{1}.conv'.format(t, cngf)))
main.add(nn.BatchNorm(in_channels=cngf, prefix='extra-layers-{0}.{1}.batchnorm'.format(t, cngf)))
main.add(nn.LeakyReLU(0, prefix='extra-layers-{0}.{1}.relu'.format(t, cngf)))
main.add(
nn.Conv2DTranspose(in_channels=cngf, channels=nc, kernel_size=4, strides=2, padding=1, use_bias=False,
activation='tanh', prefix='final.{0}-{1}.convt'.format(cngf, nc)))
self.main = main
示例10: __init__
# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Conv2DTranspose [as 別名]
def __init__(self, output_nc, ngf=64, use_dropout=False, n_blocks=6, padding_type='reflect'):
assert(n_blocks >= 0)
super(ResnetGenerator, self).__init__()
self.output_nc = output_nc
self.ngf = ngf
self.model = nn.HybridSequential()
with self.name_scope():
self.model.add(
nn.ReflectionPad2D(3),
nn.Conv2D(ngf, kernel_size=7, padding=0),
nn.InstanceNorm(),
nn.Activation('relu')
)
n_downsampling = 2
for i in range(n_downsampling):
mult = 2**i
self.model.add(
nn.Conv2D(ngf * mult * 2, kernel_size=3,strides=2, padding=1),
nn.InstanceNorm(),
nn.Activation('relu')
)
mult = 2**n_downsampling
for i in range(n_blocks):
self.model.add(
ResnetBlock(ngf * mult, padding_type=padding_type, use_dropout=use_dropout)
)
for i in range(n_downsampling):
mult = 2**(n_downsampling - i)
self.model.add(
nn.Conv2DTranspose(int(ngf * mult / 2),kernel_size=3,strides=2,padding=1,output_padding=1),
nn.InstanceNorm(),
nn.Activation('relu')
)
self.model.add(
nn.ReflectionPad2D(3),
nn.Conv2D(output_nc,kernel_size=7,padding=0),
nn.Activation('tanh')
)
示例11: __init__
# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Conv2DTranspose [as 別名]
def __init__(self, out_channels, in_channels, up_f,
use_dcnv2=False, norm_layer=nn.BatchNorm, norm_kwargs=None, **kwargs):
super(IDAUp, self).__init__(**kwargs)
self.startp = 0
self.endp = 1
with self.name_scope():
self.projs = nn.HybridSequential('ida_proj')
self.ups = nn.HybridSequential('ida_ups')
self.nodes = nn.HybridSequential('ida_nodes')
for i in range(1, len(in_channels)):
c = in_channels[i]
f = int(up_f[i])
proj = CustomConv(c, out_channels, use_dcnv2=use_dcnv2,
norm_layer=norm_layer, norm_kwargs=norm_kwargs)
node = CustomConv(out_channels, out_channels, use_dcnv2=use_dcnv2,
norm_layer=norm_layer, norm_kwargs=norm_kwargs)
up = nn.Conv2DTranspose(in_channels=out_channels, channels=out_channels,
kernel_size=f * 2, strides=f,
padding=f // 2, output_padding=0,
groups=out_channels, use_bias=False,
weight_initializer=BilinearUpSample())
self.projs.add(proj)
self.ups.add(up)
self.nodes.add(node)
示例12: _make_deconv_layer
# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Conv2DTranspose [as 別名]
def _make_deconv_layer(self, num_layers, num_filters, num_kernels):
assert num_layers == len(num_filters), \
'ERROR: num_deconv_layers is different from len(num_deconv_filters)'
assert num_layers == len(num_kernels), \
'ERROR: num_deconv_layers is different from len(num_deconv_filters)'
layer = nn.HybridSequential(prefix='')
with layer.name_scope():
for i in range(num_layers):
kernel, padding, output_padding = \
self._get_deconv_cfg(num_kernels[i])
planes = num_filters[i]
layer.add(
nn.Conv2DTranspose(
channels=planes,
kernel_size=kernel,
strides=2,
padding=padding,
output_padding=output_padding,
use_bias=self.deconv_with_bias,
weight_initializer=initializer.Normal(0.001),
bias_initializer=initializer.Zero()))
layer.add(gcv.nn.BatchNormCudnnOff(gamma_initializer=initializer.One(),
beta_initializer=initializer.Zero()))
layer.add(nn.Activation('relu'))
self.inplanes = planes
return layer
示例13: __init__
# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Conv2DTranspose [as 別名]
def __init__(self, batch_images, classes, mask_channels, num_fcn_convs=0, norm_layer=None,
norm_kwargs=None, **kwargs):
super(Mask, self).__init__(**kwargs)
self._batch_images = batch_images
self.classes = classes
init = mx.init.Xavier(rnd_type='gaussian', factor_type='out', magnitude=2)
with self.name_scope():
if num_fcn_convs > 0:
self.deconv = nn.HybridSequential()
for _ in range(num_fcn_convs):
self.deconv.add(
nn.Conv2D(mask_channels, kernel_size=(3, 3), strides=(1, 1),
padding=(1, 1), weight_initializer=init))
if norm_layer is not None and norm_layer is SyncBatchNorm:
self.deconv.add(norm_layer(**norm_kwargs))
self.deconv.add(nn.Activation('relu'))
self.deconv.add(
nn.Conv2DTranspose(mask_channels, kernel_size=(2, 2), strides=(2, 2),
padding=(0, 0), weight_initializer=init))
if norm_layer is not None and norm_layer is SyncBatchNorm:
self.deconv.add(norm_layer(**norm_kwargs))
else:
# this is for compatibility of older models.
self.deconv = nn.Conv2DTranspose(mask_channels, kernel_size=(2, 2), strides=(2, 2),
padding=(0, 0), weight_initializer=init)
self.mask = nn.Conv2D(len(classes), kernel_size=(1, 1), strides=(1, 1), padding=(0, 0),
weight_initializer=init)
# pylint: disable=arguments-differ
示例14: __init__
# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Conv2DTranspose [as 別名]
def __init__(self,
dim2,
classes,
out_size,
bn_epsilon,
**kwargs):
super(SBDecoder, self).__init__(**kwargs)
with self.name_scope():
self.decode1 = SBDecodeBlock(
channels=classes,
out_size=((out_size[0] // 8, out_size[1] // 8) if out_size else None),
bn_epsilon=bn_epsilon)
self.decode2 = SBDecodeBlock(
channels=classes,
out_size=((out_size[0] // 4, out_size[1] // 4) if out_size else None),
bn_epsilon=bn_epsilon)
self.conv3c = conv1x1_block(
in_channels=dim2,
out_channels=classes,
bn_epsilon=bn_epsilon,
activation=(lambda: PReLU2(classes)))
self.output = nn.Conv2DTranspose(
channels=classes,
kernel_size=2,
strides=2,
padding=0,
output_padding=0,
in_channels=classes,
use_bias=False)
self.up = InterpolationBlock(
scale_factor=2,
out_size=out_size)
示例15: _make_deconv_layer
# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Conv2DTranspose [as 別名]
def _make_deconv_layer(self,
num_layers,
num_filters,
num_kernels):
assert num_layers == len(num_filters), \
'ERROR: num_deconv_layers is different from len(num_deconv_filters)'
assert num_layers == len(num_kernels), \
'ERROR: num_deconv_layers is different from len(num_deconv_filters)'
layer = nn.HybridSequential(prefix='')
with layer.name_scope():
for i in range(num_layers):
kernel, padding, output_padding = \
self._get_deconv_cfg(num_kernels[i])
planes = num_filters[i]
layer.add(
nn.Conv2DTranspose(
channels=planes,
kernel_size=kernel,
strides=2,
padding=padding,
output_padding=output_padding,
use_bias=self.deconv_with_bias,
weight_initializer=initializer.Normal(0.001),
bias_initializer=initializer.Zero()))
layer.add(gcv.nn.BatchNormCudnnOff(gamma_initializer=initializer.One(),
beta_initializer=initializer.Zero()))
layer.add(nn.Activation('relu'))
self.inplanes = planes
return layer