本文整理匯總了Python中chainer.utils.conv.get_conv_outsize方法的典型用法代碼示例。如果您正苦於以下問題:Python conv.get_conv_outsize方法的具體用法?Python conv.get_conv_outsize怎麽用?Python conv.get_conv_outsize使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類chainer.utils.conv
的用法示例。
在下文中一共展示了conv.get_conv_outsize方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: setUp
# 需要導入模塊: from chainer.utils import conv [as 別名]
# 或者: from chainer.utils.conv import get_conv_outsize [as 別名]
def setUp(self):
in_channels = 3
out_channels = 2
ndim = len(self.dims)
ksize = (3,) * ndim
self.stride = (2,) * ndim
self.pad = (1,) * ndim
x_shape = (2, 3) + self.dims
self.x = cuda.cupy.random.uniform(-1, 1, x_shape).astype(self.dtype)
W_scale = numpy.sqrt(1. / functools.reduce(mul, ksize, in_channels))
W_shape = (out_channels, in_channels) + ksize
self.W = cuda.cupy.random.normal(
0, W_scale, W_shape).astype(self.dtype)
gy_shape = (2, 2) + tuple(
conv.get_conv_outsize(d, k, s, p) for (d, k, s, p) in zip(
self.dims, ksize, self.stride, self.pad))
self.gy = cuda.cupy.random.uniform(-1, 1, gy_shape).astype(self.dtype)
with chainer.using_config('use_cudnn', self.use_cudnn):
self.expect = chainer.should_use_cudnn('>=auto') and ndim > 1
示例2: setUp
# 需要導入模塊: from chainer.utils import conv [as 別名]
# 或者: from chainer.utils.conv import get_conv_outsize [as 別名]
def setUp(self):
self.ndim = len(self.dims)
self.ksize = (3,) * self.ndim
self.stride = (2,) * self.ndim
self.pad = (1,) * self.ndim
self.input_shape = (2, 3) + self.dims
outs = tuple(conv.get_conv_outsize(d, k, s, p, False)
for (d, k, s, p) in six.moves.zip(
self.dims, self.ksize, self.stride, self.pad))
self.output_shape = (2, 3) + outs
self.check_backward_options.update({'atol': 5e-3, 'rtol': 5e-3})
self.check_double_backward_options.update({'atol': 5e-3, 'rtol': 5e-3})
if self.dtype == numpy.float16:
self.check_forward_options.update({'atol': 5e-4, 'rtol': 5e-3})
self.check_backward_options.update({
'eps': 1e-2, 'atol': 5e-3, 'rtol': 5e-2})
self.check_backward_options.update({
'eps': 1e-2, 'atol': 5e-3, 'rtol': 5e-2})
示例3: test_valid_insize
# 需要導入模塊: from chainer.utils import conv [as 別名]
# 或者: from chainer.utils.conv import get_conv_outsize [as 別名]
def test_valid_insize(self):
N = self.N
c = self.c
ksize = self.ksize
stride = self.stride
pad = self.pad
outs = self.outsize
cover_all = self.cover_all
# Make input.
dims = tuple(conv.get_conv_outsize(out, k, s, p, cover_all=cover_all)
for (out, k, s, p) in zip(outs, ksize, stride, pad))
x_shape = (N, c) + dims
x_data = numpy.random.uniform(-1, 1, x_shape).astype(numpy.float32)
x = chainer.Variable(x_data)
# Compute unpooling.
y = functions.unpooling_nd(
x, ksize, stride, pad, outsize=outs, cover_all=cover_all)
# Test output's value.
y_expected = expected_unpooling_nd(x_data, outs, ksize, stride, pad)
testing.assert_allclose(y_expected, y.data)
示例4: test_invalid_insize
# 需要導入模塊: from chainer.utils import conv [as 別名]
# 或者: from chainer.utils.conv import get_conv_outsize [as 別名]
def test_invalid_insize(self):
ksize = self.ksize
stride = self.stride
pad = self.pad
outs = self.outsize
cover_all = self.cover_all
# Make input with invalid shape.
dims = tuple(conv.get_conv_outsize(out, k, s, p, cover_all=cover_all)
for (out, k, s, p) in zip(outs, ksize, stride, pad))
dims = tuple(d + 1 for d in dims) # Make invalid input shape.
x_shape = (self.N, self.c) + dims
x_data = numpy.random.uniform(-1, 1, x_shape).astype(numpy.float32)
x = chainer.Variable(x_data)
# Computing unpooling raises exception.
with self.assertRaises(type_check.InvalidType):
functions.unpooling_nd(
x, ksize, stride, pad, outsize=outs, cover_all=cover_all)
示例5: im2col_nd_gpu
# 需要導入模塊: from chainer.utils import conv [as 別名]
# 或者: from chainer.utils.conv import get_conv_outsize [as 別名]
def im2col_nd_gpu(img, ksize, stride, pad, cover_all=False, dilate=1):
n, c = img.shape[0:2] # (n, c, d_1, d_2, ..., d_N)
dims = img.shape[2:]
ndim = len(dims)
dilate = as_tuple(dilate, ndim)
assert ndim == len(ksize) == len(stride) == len(pad)
outs = tuple(get_conv_outsize(d, k, s, p, cover_all, di)
for (d, k, s, p, di)
in zip(dims, ksize, stride, pad, dilate))
assert all(out > 0 for out in outs), 'Output sizes should be positive.'
# col_shape: (n, c, k_1, k_2, ..., k_N, out_1, out_2, ..., out_N)
shape = (n, c) + ksize + outs
col = cuda.cupy.empty(shape, dtype=img.dtype)
in_params, out_params, operation, name = \
conv_nd_kernel.Im2colNDKernel.generate(ndim)
cuda.elementwise(in_params, out_params, operation, name)(
img.reduced_view(),
*(dims + outs + ksize + stride + pad + dilate + (col,)))
return col
示例6: _forward_cudnn
# 需要導入模塊: from chainer.utils import conv [as 別名]
# 或者: from chainer.utils.conv import get_conv_outsize [as 別名]
def _forward_cudnn(self, x, W, b):
out_c = W.shape[0] # (c_O, _, k_1, k_2, ..., k_N)
ksize = W.shape[2:]
n, c = x.shape[:2] # (n, c_I, d_1, d_2, ..., d_N)
dims = x.shape[2:]
stride = self.stride
pad = self.pad
dilate = self.dilate
groups = self.groups
# Make empty array for result.
outs = tuple(
conv.get_conv_outsize(d, k, s, p, cover_all=self.cover_all, d=di)
for (d, k, s, p, di) in zip(dims, ksize, stride, pad, dilate))
assert all(out > 0 for out in outs), 'Output sizes should be positive.'
y_shape = (n, out_c) + outs # (n, c_O, out_1, out_2, ..., out_N)
y = cuda.cupy.empty(y_shape, dtype=x.dtype)
auto_tune = configuration.config.autotune
tensor_core = configuration.config.use_cudnn_tensor_core
cuda.cudnn.convolution_forward(
x, W, b, y, pad, stride, dilate, groups,
auto_tune=auto_tune, tensor_core=tensor_core)
return y,
示例7: _forward_ideep
# 需要導入模塊: from chainer.utils import conv [as 別名]
# 或者: from chainer.utils.conv import get_conv_outsize [as 別名]
def _forward_ideep(self, x):
self._in_shape = x[0].shape
self._in_dtype = x[0].dtype
self.retain_inputs((0,))
n, c, h, w = x[0].shape
y_h = conv.get_conv_outsize(
h, self.kh, self.sy, self.ph, self.cover_all)
assert y_h > 0, 'Height in the output should be positive.'
y_w = conv.get_conv_outsize(
w, self.kw, self.sx, self.pw, self.cover_all)
assert y_w > 0, 'Width in the output should be positive.'
pd = self.sy * (y_h - 1) + self.kh - h - self.ph
pr = self.sx * (y_w - 1) + self.kw - w - self.pw
pp = intel64.ideep.pooling2DParam(
(n, c, y_h, y_w),
self.kh, self.kw,
self.sy, self.sx,
self.ph, self.pw,
pd, pr,
intel64.ideep.pooling2DParam.pooling_avg_include_padding)
y, = intel64.ideep.pooling2D.Forward(intel64.ideep.array(x[0]), pp)
return y,
示例8: forward
# 需要導入模塊: from chainer.utils import conv [as 別名]
# 或者: from chainer.utils.conv import get_conv_outsize [as 別名]
def forward(self, x):
func = self.func
ksize = func.ksize
stride = func.stride
pad = func.pad
cover_all = func.cover_all
pool_mode = func.get_cudnn_pool_mode()
x = x[0]
n, c = x.shape[:2]
dims = x.shape[2:]
ys = tuple(conv.get_conv_outsize(d, k, s, p, cover_all)
for d, k, s, p in six.moves.zip(dims, ksize, stride, pad))
y_shape = (n, c) + ys
y = cuda.cupy.empty(y_shape, dtype=x.dtype)
cudnn.pooling_forward(x, y, ksize, stride, pad, pool_mode)
func.retain_inputs((0,))
func.retain_outputs((0,))
return y,
示例9: check_type_forward
# 需要導入模塊: from chainer.utils import conv [as 別名]
# 或者: from chainer.utils.conv import get_conv_outsize [as 別名]
def check_type_forward(self, in_types):
n_in = in_types.size()
type_check.expect(n_in == 1)
x_type = in_types[0]
type_check.expect(
x_type.dtype.kind == 'f',
x_type.ndim == 4,
)
if self.outh is not None:
expected_h = conv.get_conv_outsize(
self.outh, self.kh, self.sy, self.ph, cover_all=self.cover_all)
type_check.expect(x_type.shape[2] == expected_h)
if self.outw is not None:
expected_w = conv.get_conv_outsize(
self.outw, self.kw, self.sx, self.pw, cover_all=self.cover_all)
type_check.expect(x_type.shape[3] == expected_w)
示例10: forward_gpu
# 需要導入模塊: from chainer.utils import conv [as 別名]
# 或者: from chainer.utils.conv import get_conv_outsize [as 別名]
def forward_gpu(self, x):
self.retain_inputs((0,))
self._used_cudnn = True
# Implementation using cudnn
x = x[0]
n, c, h, w = x.shape
y_h = conv.get_conv_outsize(
h, self.kh, self.sy, self.ph, self.cover_all)
assert y_h > 0, 'Height in the output should be positive.'
y_w = conv.get_conv_outsize(
w, self.kw, self.sx, self.pw, self.cover_all)
assert y_w > 0, 'Width in the output should be positive.'
y = cuda.cupy.empty((n, c, y_h, y_w), dtype=x.dtype)
cudnn.pooling_forward(
x, y,
(self.kh, self.kw), (self.sy, self.sx), (self.ph, self.pw),
self._get_pool_mode())
self.retain_outputs((0,))
return y,
示例11: check_type_forward
# 需要導入模塊: from chainer.utils import conv [as 別名]
# 或者: from chainer.utils.conv import get_conv_outsize [as 別名]
def check_type_forward(self, in_types):
n_in = in_types.size()
type_check.expect(n_in == 1)
x_type = in_types[0]
type_check.expect(
x_type.dtype.kind == 'f',
x_type.ndim == 4,
x_type.shape == self.indexes.shape,
)
if self.outh is not None:
expected_h = conv.get_conv_outsize(
self.outh, self.kh, self.sy, self.ph, cover_all=self.cover_all)
type_check.expect(x_type.shape[2] == expected_h)
if self.outw is not None:
expected_w = conv.get_conv_outsize(
self.outw, self.kw, self.sx, self.pw, cover_all=self.cover_all)
type_check.expect(x_type.shape[3] == expected_w)
示例12: infer_return
# 需要導入模塊: from chainer.utils import conv [as 別名]
# 或者: from chainer.utils.conv import get_conv_outsize [as 別名]
def infer_return(self, conv, x_type):
ksize = make_pair(conv.ksize)
stride = make_pair(conv.stride)
pad = make_pair(conv.pad)
dilate = make_pair(conv.dilate)
shape_2 = get_conv_outsize(
x_type.shape[2], ksize[0], stride[0], pad[0], d=dilate[0])
shape_3 = get_conv_outsize(
x_type.shape[3], ksize[1], stride[1], pad[1], d=dilate[1])
ret_shape = (x_type.shape[0], conv.out_channels, shape_2, shape_3)
return TyChainerVariable(x_type.dtype, shape=ret_shape)
示例13: check_forward
# 需要導入模塊: from chainer.utils import conv [as 別名]
# 或者: from chainer.utils.conv import get_conv_outsize [as 別名]
def check_forward(self, x, kh, kw, sy, sx, ph, pw, dy, dx, gpu):
x = x.copy()
n, c, h, w = x.shape
col = functions.im2col(
x, (kh, kw), (sy, sx), (ph, pw), dilate=(dy, dx)).data
col_h = get_conv_outsize(h, kh, sy, ph, d=dy)
col_w = get_conv_outsize(w, kw, sx, pw, d=dx)
self.assertEqual(col.shape, (n, c * kh * kw, col_h, col_w))
col = col.reshape(n, c, kh, kw, col_h, col_w)
col = cuda.to_cpu(col)
for y in moves.range(col_h):
for x in moves.range(col_w):
for ky in moves.range(kh):
for kx in moves.range(kw):
oy = y * sy - ph + ky * dy
ox = x * sx - pw + kx * dx
if 0 <= oy < h and 0 <= ox < w:
testing.assert_allclose(
col[:, :, ky, kx, y, x],
self.x[:, :, oy, ox])
else:
testing.assert_allclose(
col[:, :, ky, kx, y, x],
numpy.zeros((2, 3), self.dtype))
示例14: setUp
# 需要導入模塊: from chainer.utils import conv [as 別名]
# 或者: from chainer.utils.conv import get_conv_outsize [as 別名]
def setUp(self):
self.x = numpy.random.uniform(
size=self.in_shape).astype(self.dtype)
kh, kw = _pair(self.ksize)
sy, sx = _pair(self.stride)
ph, pw = _pair(self.pad)
dy, dx = _pair(self.dilate)
N, C, H, W = self.in_shape
o_H = get_conv_outsize(H, kh, sy, ph, cover_all=self.cover_all, d=dy)
o_W = get_conv_outsize(W, kw, sx, pw, cover_all=self.cover_all, d=dx)
self.gy = numpy.random.uniform(
size=(N, C * kh * kw, o_H, o_W)).astype(self.dtype)
self.ggx = numpy.random.uniform(
size=self.in_shape).astype(self.dtype)
self.check_backward_options = {'atol': 5e-4, 'rtol': 5e-3}
if self.dtype is numpy.float16:
self.check_backward_options.update({'atol': 2e-3, 'rtol': 1e-2})
self.check_double_backward_options = {'atol': 5e-4, 'rtol': 5e-3}
if self.dtype is numpy.float16:
self.check_double_backward_options.update(
{'atol': 1e-3, 'rtol': 1e-2})
示例15: _get_out_dims
# 需要導入模塊: from chainer.utils import conv [as 別名]
# 或者: from chainer.utils.conv import get_conv_outsize [as 別名]
def _get_out_dims(self, in_dims):
out_dims = tuple(
conv.get_conv_outsize(d, k, s, p, self.cover_all)
for d, k, s, p
in six.moves.zip(in_dims, self.ksize, self.stride, self.pad))
return out_dims