本文整理汇总了Python中chainer.functions.unpooling_2d方法的典型用法代码示例。如果您正苦于以下问题:Python functions.unpooling_2d方法的具体用法?Python functions.unpooling_2d怎么用?Python functions.unpooling_2d使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类chainer.functions
的用法示例。
在下文中一共展示了functions.unpooling_2d方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: forward
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import unpooling_2d [as 别名]
def forward(self, x):
hs = self.base(x)
with flags.for_unroll():
for i in range(self.n_base_output_minus1, -1, -1):
hs[i] = self.inner[i](hs[i])
if i < self.n_base_output_minus1:
hs[i] += F.unpooling_2d(hs[i + 1], 2, cover_all=False)
for i in range(self.n_base_output):
hs[i] = self.outer[i](hs[i])
for _ in range(self.scales_minus_n_base_output):
hs.append(F.max_pooling_2d(hs[-1], 1, stride=2, cover_all=False))
return hs
# ======================================
示例2: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import unpooling_2d [as 别名]
def __call__(self, x, alpha=1.0):
if self.depth > 0 and alpha < 1.0:
h = x
for i in range(self.depth-1):
h = self['b%d'%i](h)
h1 = self['b%d'%(self.depth-1)](h)
h2 = F.unpooling_2d(h1, 2, 2, outsize=self['b%d'%self.depth].outsize)
h3 = self['b%d'%(self.depth-1)].toRGB(h2)
h4 = self['b%d'%self.depth](h1, True)
h = h3 * (1 - alpha) + h4 * alpha
else:
h = x
for i in range(self.depth):
h = self['b%d'%i](h)
h = self['b%d'%self.depth](h, True)
return h
示例3: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import unpooling_2d [as 别名]
def __call__(self, x, test):
if self.sample=="down" or self.sample=="none" or self.sample=='none-9' or self.sample=='none-7' or self.sample=='none-5':
h = self.c(x)
elif self.sample=="up":
h = F.unpooling_2d(x, 2, 2, 0, cover_all=False)
h = self.c(h)
else:
print("unknown sample method %s"%self.sample)
if self.bn:
h = self.batchnorm(h, test=test)
if self.noise:
h = add_noise(h, test=test)
if self.dropout:
h = F.dropout(h, train=not test)
if not self.activation is None:
h = self.activation(h)
return h
示例4: _upsample
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import unpooling_2d [as 别名]
def _upsample(x):
h, w = x.shape[2:]
return F.unpooling_2d(x, 2, outsize=(h * 2, w * 2))
示例5: _upsample_frq
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import unpooling_2d [as 别名]
def _upsample_frq(x):
h, w = x.shape[2:]
return F.unpooling_2d(x, (1,2), outsize=(h, w * 2))
示例6: upscale2x
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import unpooling_2d [as 别名]
def upscale2x(h):
return F.unpooling_2d(h, 2, 2, 0, outsize=(h.shape[2] * 2, h.shape[3] * 2))
示例7: forward
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import unpooling_2d [as 别名]
def forward(self, x):
y = F.unpooling_2d(x, 2, cover_all=False)
return y
示例8: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import unpooling_2d [as 别名]
def __call__(self, x, mask):
#h = self.c(x) - self.b
self.m.W.data = self.xp.array(self.maskW) #mask windows are set by 1
h = self.c(x*mask) #(B,C,H,W)
B,C,H,W = h.shape
#b = F.transpose(F.broadcast_to(self.c.b,(B,H,W,C)),(0,3,1,2))
#h = h - b
mask_sums = self.m(mask)
mask_new = (self.xp.sign(mask_sums.data-0.5)+1.0)*0.5
mask_new_b = mask_new.astype("bool")
mask_sums = F.where(mask_new_b,mask_sums,0.01*Variable(self.xp.ones(mask_sums.shape).astype("f")))
h = h/mask_sums
#h = h/mask_sums + b
mask_new = Variable(mask_new)
h = F.where(mask_new_b, h, Variable(self.xp.zeros(h.shape).astype("f")))
#elif self.sample=="up":
# h = F.unpooling_2d(x, 2, 2, 0, cover_all=False)
# h = self.c(h)
#else:
# print("unknown sample method %s"%self.sample)
if self.bn:
h = self.batchnorm(h)
if self.noise:
h = add_noise(h)
if self.dropout:
h = F.dropout(h)
if not self.activation is None:
h = self.activation(h)
return h, mask_new
开发者ID:SeitaroShinagawa,项目名称:chainer-partial_convolution_image_inpainting,代码行数:34,代码来源:net_pre-trained.py
示例9: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import unpooling_2d [as 别名]
def __call__(self, x):
return F.unpooling_2d(
x=x,
ksize=self.scale_factor,
cover_all=False)
示例10: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import unpooling_2d [as 别名]
def __call__(self, x):
x = self.conv(x)
return F.unpooling_2d(
x=x,
ksize=self.scale_factor,
cover_all=False)
示例11: check_forward_consistency_regression
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import unpooling_2d [as 别名]
def check_forward_consistency_regression(self, backend_config):
# Regression test to two-dimensional unpooling layer.
inputs, = self.generate_inputs()
x = chainer.Variable(backend_config.get_array(inputs))
ksize = self.ksize
stride = self.stride
pad = self.pad
y_nd = functions.unpooling_nd(x, ksize, stride=stride, pad=pad,
cover_all=self.cover_all)
y_2d = functions.unpooling_2d(x, ksize, stride=stride, pad=pad,
cover_all=self.cover_all)
testing.assert_allclose(
y_nd.array, y_2d.array, **self.check_forward_options)
示例12: check_backward_consistency_regression
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import unpooling_2d [as 别名]
def check_backward_consistency_regression(self, backend_config):
# Regression test to two-dimensional unpooling layer.
x_data, = self.generate_inputs()
gy_data = numpy.random.uniform(-1, 1, self.gy_shape).astype(self.dtype)
ksize = self.ksize
stride = self.stride
pad = self.pad
xp = backend.get_array_module(x_data)
# Backward computation for N-dimensional unpooling layer.
x_nd = chainer.Variable(xp.array(x_data))
y_nd = functions.unpooling_nd(
x_nd, ksize, stride=stride, pad=pad, cover_all=self.cover_all)
y_nd.grad = gy_data
y_nd.backward()
# Backward computation for two-dimensional unpooling layer.
x_2d = chainer.Variable(xp.array(x_data))
y_2d = functions.unpooling_2d(
x_2d, ksize, stride=stride, pad=pad, cover_all=self.cover_all)
y_2d.grad = gy_data
y_2d.backward()
# Test that the two result gradients are close enough.
opt = self.check_backward_options
testing.assert_allclose(
x_nd.grad, x_2d.grad, atol=opt['atol'], rtol=opt['rtol'])
示例13: check_forward
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import unpooling_2d [as 别名]
def check_forward(self, x_data):
x = chainer.Variable(x_data)
y = functions.unpooling_2d(x, self.ksize, outsize=self.outsize,
cover_all=self.cover_all)
self.assertEqual(y.data.dtype, self.dtype)
y_data = cuda.to_cpu(y.data)
self.assertEqual(self.gy.shape, y_data.shape)
for i in six.moves.range(self.N):
for c in six.moves.range(self.n_channels):
outsize = self.outsize or self.expected_outsize
assert y_data.shape[2:] == outsize
if outsize == (5, 2):
expect = numpy.zeros(outsize, dtype=self.dtype)
expect[:2, :] = self.x[i, c, 0, 0]
expect[2:4, :] = self.x[i, c, 1, 0]
elif outsize == (4, 2):
expect = numpy.array([
[self.x[i, c, 0, 0], self.x[i, c, 0, 0]],
[self.x[i, c, 0, 0], self.x[i, c, 0, 0]],
[self.x[i, c, 1, 0], self.x[i, c, 1, 0]],
[self.x[i, c, 1, 0], self.x[i, c, 1, 0]],
])
elif outsize == (3, 1):
expect = numpy.array([
[self.x[i, c, 0, 0]],
[self.x[i, c, 0, 0]],
[self.x[i, c, 1, 0]],
])
else:
raise ValueError('Unsupported outsize: {}'.format(outsize))
testing.assert_allclose(expect, y_data[i, c])
示例14: check_backward
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import unpooling_2d [as 别名]
def check_backward(self, x_data, y_grad):
def f(x):
return functions.unpooling_2d(x, self.ksize, outsize=self.outsize,
cover_all=self.cover_all)
gradient_check.check_backward(
f, x_data, y_grad, dtype=numpy.float64,
**self.check_backward_options)
示例15: check_double_backward
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import unpooling_2d [as 别名]
def check_double_backward(self, x_data, y_grad, x_grad_grad,
use_cudnn='always'):
def f(x):
return functions.unpooling_2d(x, self.ksize, outsize=self.outsize,
cover_all=self.cover_all)
with chainer.using_config('use_cudnn', use_cudnn):
gradient_check.check_double_backward(
f, x_data, y_grad, x_grad_grad, dtype=numpy.float64,
**self.check_double_backward_options)