本文整理汇总了Python中chainer.links.Deconvolution2D方法的典型用法代码示例。如果您正苦于以下问题:Python links.Deconvolution2D方法的具体用法?Python links.Deconvolution2D怎么用?Python links.Deconvolution2D使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类chainer.links
的用法示例。
在下文中一共展示了links.Deconvolution2D方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import Deconvolution2D [as 别名]
def __init__(self):
super(FCN_32s, self).__init__(
conv1_1=L.Convolution2D(3, 64, 3, pad=100),
conv1_2=L.Convolution2D(64, 64, 3),
conv2_1=L.Convolution2D(64, 128, 3),
conv2_2=L.Convolution2D(128, 128, 3),
conv3_1=L.Convolution2D(128, 256, 3),
conv3_2=L.Convolution2D(256, 256, 3),
conv4_1=L.Convolution2D(256, 512, 3),
conv4_2=L.Convolution2D(512, 512, 3),
conv4_3=L.Convolution2D(512, 512, 3),
conv5_1=L.Convolution2D(512, 512, 3),
conv5_2=L.Convolution2D(512, 512, 3),
conv5_3=L.Convolution2D(512, 512, 3),
fc6=L.Convolution2D(512, 4096, 7),
fc7=L.Convolution2D(4096, 4096, 1),
score_fr=L.Convolution2D(4096, 21, 1),
upsample=L.Deconvolution2D(21, 21, 64, 32),
)
self.train = True
示例2: __init__
# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import Deconvolution2D [as 别名]
def __init__(self, n_class=21):
self.train=True
super(FCN32s, self).__init__(
conv1_1=L.Convolution2D(3, 64, 3, stride=1, pad=100),
conv1_2=L.Convolution2D(64, 64, 3, stride=1, pad=1),
conv2_1=L.Convolution2D(64, 128, 3, stride=1, pad=1),
conv2_2=L.Convolution2D(128, 128, 3, stride=1, pad=1),
conv3_1=L.Convolution2D(128, 256, 3, stride=1, pad=1),
conv3_2=L.Convolution2D(256, 256, 3, stride=1, pad=1),
conv3_3=L.Convolution2D(256, 256, 3, stride=1, pad=1),
conv4_1=L.Convolution2D(256, 512, 3, stride=1, pad=1),
conv4_2=L.Convolution2D(512, 512, 3, stride=1, pad=1),
conv4_3=L.Convolution2D(512, 512, 3, stride=1, pad=1),
conv5_1=L.Convolution2D(512, 512, 3, stride=1, pad=1),
conv5_2=L.Convolution2D(512, 512, 3, stride=1, pad=1),
conv5_3=L.Convolution2D(512, 512, 3, stride=1, pad=1),
fc6=L.Convolution2D(512, 4096, 7, stride=1, pad=0),
fc7=L.Convolution2D(4096, 4096, 1, stride=1, pad=0),
score_fr=L.Convolution2D(4096, n_class, 1, stride=1, pad=0,
nobias=True, initialW=np.zeros((n_class, 4096, 1, 1))),
upscore=L.Deconvolution2D(n_class, n_class, 64, stride=32, pad=0,
nobias=True, initialW=f.bilinear_interpolation_kernel(n_class, n_class, ksize=64)),
)
示例3: __init__
# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import Deconvolution2D [as 别名]
def __init__(self, n_hidden, bottom_width=4, ch=512, wscale=0.02):
super(Generator, self).__init__()
self.n_hidden = n_hidden
self.ch = ch
self.bottom_width = bottom_width
with self.init_scope():
w = chainer.initializers.Normal(wscale)
self.l0 = L.Linear(self.n_hidden, bottom_width * bottom_width * ch,
initialW=w)
self.dc1 = L.Deconvolution2D(ch, ch // 2, 4, 2, 1, initialW=w)
self.dc2 = L.Deconvolution2D(ch // 2, ch // 4, 4, 2, 1, initialW=w)
self.dc3 = L.Deconvolution2D(ch // 4, ch // 8, 4, 2, 1, initialW=w)
self.dc4 = L.Deconvolution2D(ch // 8, 3, 3, 1, 1, initialW=w)
self.bn0 = L.BatchNormalization(bottom_width * bottom_width * ch)
self.bn1 = L.BatchNormalization(ch // 2)
self.bn2 = L.BatchNormalization(ch // 4)
self.bn3 = L.BatchNormalization(ch // 8)
示例4: __init__
# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import Deconvolution2D [as 别名]
def __init__(self, isize, nc, ngf, conv_init=None, bn_init=None):
cngf, tisize = ngf // 2, 4
while tisize != isize:
cngf = cngf * 2
tisize = tisize * 2
layers = []
# input is Z, going into a convolution
layers.append(L.Deconvolution2D(None, cngf, ksize=4, stride=1, pad=0, initialW=conv_init, nobias=True))
layers.append(L.BatchNormalization(cngf, initial_gamma=bn_init))
layers.append(ReLU())
csize, cndf = 4, cngf
while csize < isize // 2:
layers.append(L.Deconvolution2D(None, cngf // 2, ksize=4, stride=2, pad=1, initialW=conv_init, nobias=True))
layers.append(L.BatchNormalization(cngf // 2, initial_gamma=bn_init))
layers.append(ReLU())
cngf = cngf // 2
csize = csize * 2
layers.append(L.Deconvolution2D(None, nc, ksize=4, stride=2, pad=1, initialW=conv_init, nobias=True))
layers.append(Tanh())
super(DCGAN_G, self).__init__(*layers)
示例5: test_caffe_export_model
# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import Deconvolution2D [as 别名]
def test_caffe_export_model(self):
class Model(chainer.Chain):
def __init__(self):
super(Model, self).__init__()
with self.init_scope():
self.l1 = L.Convolution2D(None, 1, 1, 1, 0, groups=1)
self.b2 = L.BatchNormalization(1, eps=1e-2)
self.l3 = L.Deconvolution2D(None, 1, 1, 1, 0, groups=1)
self.l4 = L.Linear(None, 1)
def forward(self, x):
h = F.relu(self.l1(x))
h = self.b2(h)
h = self.l3(h)
return self.l4(h)
assert_export_import_match(Model(), self.x)
示例6: create_link
# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import Deconvolution2D [as 别名]
def create_link(self, initializers):
initialW, initial_bias = initializers
if self.nobias:
link = L.Deconvolution2D(
self.in_channels, self.out_channels, self.ksize,
stride=self.stride, pad=self.pad, nobias=self.nobias,
dilate=self.dilate, groups=self.groups,
initialW=initialW)
else:
link = L.Deconvolution2D(
self.in_channels, self.out_channels, self.ksize,
stride=self.stride, pad=self.pad, nobias=self.nobias,
dilate=self.dilate, groups=self.groups,
initialW=initialW,
initial_bias=initial_bias)
return link
示例7: test_deconvolution
# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import Deconvolution2D [as 别名]
def test_deconvolution(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
f = self.func.l1
self.assertIsInstance(f, links.Deconvolution2D)
for i in range(3): # 3 == group
in_slice = slice(i * 4, (i + 1) * 4) # 4 == channels
out_slice = slice(i * 2, (i + 1) * 2) # 2 == num / group
w = f.W.data[out_slice, in_slice]
numpy.testing.assert_array_equal(
w.flatten(), range(i * 32, (i + 1) * 32))
numpy.testing.assert_array_equal(
f.b.data, range(12))
self.call(['x'], ['y'])
self.mock.assert_called_once_with(self.inputs[0])
示例8: __init__
# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import Deconvolution2D [as 别名]
def __init__(self, n_hidden=128, bottom_width=4, ch=512, wscale=0.02,
z_distribution="uniform", hidden_activation=F.relu, output_activation=F.tanh, use_bn=True):
super(DCGANGenerator, self).__init__()
self.n_hidden = n_hidden
self.ch = ch
self.bottom_width = bottom_width
self.z_distribution = z_distribution
self.hidden_activation = hidden_activation
self.output_activation = output_activation
self.use_bn = use_bn
with self.init_scope():
w = chainer.initializers.Normal(wscale)
self.l0 = L.Linear(self.n_hidden, bottom_width * bottom_width * ch,
initialW=w)
self.dc1 = L.Deconvolution2D(ch, ch // 2, 4, 2, 1, initialW=w)
self.dc2 = L.Deconvolution2D(ch // 2, ch // 4, 4, 2, 1, initialW=w)
self.dc3 = L.Deconvolution2D(ch // 4, ch // 8, 4, 2, 1, initialW=w)
self.dc4 = L.Deconvolution2D(ch // 8, 3, 3, 1, 1, initialW=w)
if self.use_bn:
self.bn0 = L.BatchNormalization(bottom_width * bottom_width * ch)
self.bn1 = L.BatchNormalization(ch // 2)
self.bn2 = L.BatchNormalization(ch // 4)
self.bn3 = L.BatchNormalization(ch // 8)
示例9: __init__
# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import Deconvolution2D [as 别名]
def __init__(self, n_hidden=128, bottom_width=4, ch=512, wscale=0.02):
super(Generator, self).__init__()
self.n_hidden = n_hidden
self.ch = ch
self.bottom_width = bottom_width
with self.init_scope():
w = chainer.initializers.Normal(wscale)
self.l0 = L.Linear(self.n_hidden, bottom_width * bottom_width * ch,
initialW=w)
self.dc1 = L.Deconvolution2D(ch, ch // 2, 4, 2, 1, initialW=w)
self.dc2 = L.Deconvolution2D(ch // 2, ch // 4, 4, 2, 1, initialW=w)
self.dc3 = L.Deconvolution2D(ch // 4, ch // 8, 4, 2, 1, initialW=w)
self.dc4 = L.Deconvolution2D(ch // 8, 3, 3, 1, 1, initialW=w)
self.bn0 = L.BatchNormalization(bottom_width * bottom_width * ch)
self.bn1 = L.BatchNormalization(ch // 2)
self.bn2 = L.BatchNormalization(ch // 4)
self.bn3 = L.BatchNormalization(ch // 8)
示例10: __init__
# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import Deconvolution2D [as 别名]
def __init__(self, z_slow_dim, z_fast_dim, out_channels, bottom_width,
conv_ch=512, wscale=0.01):
self.ch = conv_ch
self.bottom_width = bottom_width
slow_mid_dim = bottom_width * bottom_width * conv_ch // 2
fast_mid_dim = bottom_width * bottom_width * conv_ch // 2
super(VideoGeneratorInitUniform, self).__init__()
w = chainer.initializers.Uniform(wscale)
with self.init_scope():
self.l0s = L.Linear(z_slow_dim, slow_mid_dim, initialW=w, nobias=True)
self.l0f = L.Linear(z_fast_dim, fast_mid_dim, initialW=w, nobias=True)
self.dc1 = L.Deconvolution2D(conv_ch, conv_ch // 2, 4, 2, 1, initialW=w, nobias=True)
self.dc2 = L.Deconvolution2D(conv_ch // 2, conv_ch // 4, 4, 2, 1, initialW=w, nobias=True)
self.dc3 = L.Deconvolution2D(conv_ch // 4, conv_ch // 8, 4, 2, 1, initialW=w, nobias=True)
self.dc4 = L.Deconvolution2D(conv_ch // 8, conv_ch // 16, 4, 2, 1, initialW=w, nobias=True)
self.dc5 = L.Deconvolution2D(conv_ch // 16, out_channels, 3, 1, 1, initialW=w, nobias=False)
self.bn0s = L.BatchNormalization(slow_mid_dim)
self.bn0f = L.BatchNormalization(fast_mid_dim)
self.bn1 = L.BatchNormalization(conv_ch // 2)
self.bn2 = L.BatchNormalization(conv_ch // 4)
self.bn3 = L.BatchNormalization(conv_ch // 8)
self.bn4 = L.BatchNormalization(conv_ch // 16)
示例11: __init__
# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import Deconvolution2D [as 别名]
def __init__(self, ch0, ch1, bn=True, sample='down', activation=F.relu, dropout=False) -> None:
super().__init__()
self.bn = bn
self.activation = activation
self.dropout = dropout
w = chainer.initializers.Normal(0.02)
with self.init_scope():
if sample == 'down':
self.c = L.Convolution2D(ch0, ch1, 4, 2, 1, initialW=w)
elif sample == 'up':
self.c = L.Deconvolution2D(ch0, ch1, 4, 2, 1, initialW=w)
else:
self.c = L.Convolution2D(ch0, ch1, 1, 1, 0, initialW=w)
if bn:
self.batchnorm = L.BatchNormalization(ch1)
示例12: __init__
# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import Deconvolution2D [as 别名]
def __init__(self):
super(FastStyleNet, self).__init__(
c1=L.Convolution2D(3, 32, 9, stride=1, pad=4),
c2=L.Convolution2D(32, 64, 4, stride=2, pad=1),
c3=L.Convolution2D(64, 128, 4,stride=2, pad=1),
r1=ResidualBlock(128, 128),
r2=ResidualBlock(128, 128),
r3=ResidualBlock(128, 128),
r4=ResidualBlock(128, 128),
r5=ResidualBlock(128, 128),
d1=L.Deconvolution2D(128, 64, 4, stride=2, pad=1),
d2=L.Deconvolution2D(64, 32, 4, stride=2, pad=1),
d3=L.Deconvolution2D(32, 3, 9, stride=1, pad=4),
b1=L.BatchNormalization(32),
b2=L.BatchNormalization(64),
b3=L.BatchNormalization(128),
b4=L.BatchNormalization(64),
b5=L.BatchNormalization(32),
)
示例13: __init__
# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import Deconvolution2D [as 别名]
def __init__(self, ch0, ch1, bn=True, sample='down', activation=F.relu, dropout=False):
self.bn = bn
self.activation = activation
self.dropout = dropout
layers = {}
w = chainer.initializers.Normal(0.02)
if sample=='down':
layers['c'] = L.Convolution2D(ch0, ch1, 4, 2, 1, initialW=w)
elif sample=='up':
layers['c'] = L.Deconvolution2D(ch0, ch1, 4, 2, 1, initialW=w)
elif sample=='up-nn':
layers['c'] = NNConvolution2D(ch0, ch1, 2, 3, 1, 1, initialW=w)
elif sample=='none':
layers['c'] = L.Convolution2D(ch0, ch1, 3, 1, 1, initialW=w)
elif sample=='none-5':
layers['c'] = L.Convolution2D(ch0, ch1, 5, 1, 2, initialW=w)
else:
assert False, 'unknown sample {}'.format(sample)
if bn:
layers['batchnorm'] = L.BatchNormalization(ch1)
super(CBR, self).__init__(**layers)
示例14: __init__
# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import Deconvolution2D [as 别名]
def __init__(self, in_channels, out_channels, mode='none', activation=F.leaky_relu, bn=False, dr=None):
super(ConvBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
self.activation = activation
self.bn = bn
self.dr = dr
with self.init_scope():
if mode == 'none':
self.c = L.Convolution2D(in_channels, out_channels, ksize=3, stride=1, pad=1, initialW=initializer, nobias=bn)
elif mode == 'none-7':
self.c = L.Convolution2D(in_channels, out_channels, ksize=(7,7), stride=1, pad=(3,3), initialW=initializer, nobias=bn)
elif mode == 'down':
self.c = L.Convolution2D(in_channels, out_channels, ksize=4, stride=2, pad=1, initialW=initializer, nobias=bn)
elif mode == 'up':
self.c = L.Deconvolution2D(in_channels, out_channels, ksize=4, stride=2, pad=1, initialW=initializer, nobias=bn)
elif mode == 'full-down':
self.c = L.Convolution2D(in_channels, out_channels, ksize=4, stride=1, pad=0, initialW=initializer, nobias=bn)
elif mode == 'frq':
self.c = L.Convolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)
elif mode == 'frq-down':
self.c = L.Convolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)
self.activation = lambda x: activation(_downsample(x))
elif mode == 'frq-up':
self.c = L.Convolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)
self.activation = lambda x: activation(_upsample(x))
elif mode == 'pad':
self.c = L.Convolution2D(in_channels, out_channels, ksize=3, stride=1, pad=2, initialW=initializer, nobias=bn)
elif mode == 'trim':
self.c = L.Convolution2D(in_channels, out_channels, ksize=3, stride=1, pad=0, initialW=initializer, nobias=bn)
else:
raise Exception('mode is missing')
if bn:
self.b = L.BatchNormalization(out_channels)
示例15: __init__
# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import Deconvolution2D [as 别名]
def __init__(self, in_ch, out_ch, ksize, stride, pad, nobias=False, gain=np.sqrt(2), lrmul=1):
w = chainer.initializers.Normal(1.0/lrmul) # equalized learning rate
self.inv_c = gain * np.sqrt(1.0 / (in_ch))
self.inv_c = self.inv_c * lrmul
super(EqualizedDeconv2d, self).__init__()
with self.init_scope():
self.c = L.Deconvolution2D(in_ch, out_ch, ksize, stride, pad, initialW=w, nobias=nobias)