本文整理匯總了Python中chainer.initializers.Zero方法的典型用法代碼示例。如果您正苦於以下問題:Python initializers.Zero方法的具體用法?Python initializers.Zero怎麽用?Python initializers.Zero使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類chainer.initializers
的用法示例。
在下文中一共展示了initializers.Zero方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: from chainer import initializers [as 別名]
# 或者: from chainer.initializers import Zero [as 別名]
def __init__(self):
chainer.Chain.__init__(self)
self.dtype = np.float16
W = initializers.HeNormal(1 / np.sqrt(2), self.dtype)
bias = initializers.Zero(self.dtype)
with self.init_scope():
self.conv1 = L.Convolution2D(None, 96, 11, stride=4,
initialW=W, initial_bias=bias)
self.conv2 = L.Convolution2D(None, 256, 5, pad=2,
initialW=W, initial_bias=bias)
self.conv3 = L.Convolution2D(None, 384, 3, pad=1,
initialW=W, initial_bias=bias)
self.conv4 = L.Convolution2D(None, 384, 3, pad=1,
initialW=W, initial_bias=bias)
self.conv5 = L.Convolution2D(None, 256, 3, pad=1,
initialW=W, initial_bias=bias)
self.fc6 = L.Linear(None, 4096, initialW=W, initial_bias=bias)
self.fc7 = L.Linear(None, 4096, initialW=W, initial_bias=bias)
self.fc8 = L.Linear(None, 1000, initialW=W, initial_bias=bias)
示例2: test_add_param
# 需要導入模塊: from chainer import initializers [as 別名]
# 或者: from chainer.initializers import Zero [as 別名]
def test_add_param(self):
self.link.add_param('z', (2, 3))
self.check_param_init('z', (2, 3), 'f')
self.link.add_param('w', (2, 3), dtype='d')
self.check_param_init('w', (2, 3), 'd')
self.link.add_param('r')
self.check_param_uninit('r')
self.link.r.initialize((2, 3))
self.check_param_init('r', (2, 3), 'f')
self.link.add_param('s', dtype='d')
self.check_param_uninit('s')
self.link.s.initialize((2, 3))
self.check_param_init('s', (2, 3), 'd')
initializer = initializers.Zero('d')
self.link.add_param('t', initializer=initializer)
self.check_param_uninit('t', initializer)
self.link.t.initialize((2, 3))
self.check_param_init('t', (2, 3), 'd', 0)
示例3: test_copydata_from_uninitialized_parameter
# 需要導入模塊: from chainer import initializers [as 別名]
# 或者: from chainer.initializers import Zero [as 別名]
def test_copydata_from_uninitialized_parameter(
self, src_backend_config, dst_backend_config):
shape = self.shape
dtype = np.float32
dst_arr_numpy = np.asarray(np.random.randn(*shape), dtype)
dst_arr = dst_backend_config.get_array(dst_arr_numpy.copy())
initializer = initializers.Zero()
dst_var = chainer.Parameter(dst_arr)
src_var = chainer.Parameter(initializer)
src_var.to_device(src_backend_config.device)
dst_arr_prev = dst_var.array
dst_var.copydata(src_var)
assert src_var.device == src_backend_config.device
assert dst_var.device == dst_backend_config.device
assert dst_var.array is dst_arr_prev
np.testing.assert_array_equal(
_numpy_device.send(dst_var.array),
_numpy_device.send(src_var.array))
示例4: __init__
# 需要導入模塊: from chainer import initializers [as 別名]
# 或者: from chainer.initializers import Zero [as 別名]
def __init__(self, size, decay=0.9, eps=2e-5, dtype=numpy.float32,
use_gamma=True, use_beta=True,
initial_gamma=None, initial_beta=None):
super(BatchNormalization, self).__init__()
if use_gamma:
self.add_param('gamma', size, dtype=dtype)
if initial_gamma is None:
initial_gamma = initializers.One()
initializers.init_weight(self.gamma.data, initial_gamma)
if use_beta:
self.add_param('beta', size, dtype=dtype)
if initial_beta is None:
initial_beta = initializers.Zero()
initializers.init_weight(self.beta.data, initial_beta)
self.add_persistent('avg_mean', numpy.zeros(size, dtype=dtype))
self.add_persistent('avg_var', numpy.zeros(size, dtype=dtype))
self.add_persistent('N', 0)
self.decay = decay
self.eps = eps
示例5: __init__
# 需要導入模塊: from chainer import initializers [as 別名]
# 或者: from chainer.initializers import Zero [as 別名]
def __init__(self):
init = {
'initialW': initializers.LeCunUniform(),
'initial_bias': initializers.Zero(),
}
super(VGG16Extractor300, self).__init__()
with self.init_scope():
self.conv8_1 = L.Convolution2D(256, 1, **init)
self.conv8_2 = L.Convolution2D(512, 3, stride=2, pad=1, **init)
self.conv9_1 = L.Convolution2D(128, 1, **init)
self.conv9_2 = L.Convolution2D(256, 3, stride=2, pad=1, **init)
self.conv10_1 = L.Convolution2D(128, 1, **init)
self.conv10_2 = L.Convolution2D(256, 3, **init)
self.conv11_1 = L.Convolution2D(128, 1, **init)
self.conv11_2 = L.Convolution2D(256, 3, **init)
示例6: __init__
# 需要導入模塊: from chainer import initializers [as 別名]
# 或者: from chainer.initializers import Zero [as 別名]
def __init__(
self, n_class, aspect_ratios,
initialW=None, initial_bias=None):
self.n_class = n_class
self.aspect_ratios = aspect_ratios
super(Multibox, self).__init__()
with self.init_scope():
self.loc = chainer.ChainList()
self.conf = chainer.ChainList()
if initialW is None:
initialW = initializers.LeCunUniform()
if initial_bias is None:
initial_bias = initializers.Zero()
init = {'initialW': initialW, 'initial_bias': initial_bias}
for ar in aspect_ratios:
n = (len(ar) + 1) * 2
self.loc.add_link(L.Convolution2D(n * 4, 3, pad=1, **init))
self.conf.add_link(L.Convolution2D(
n * self.n_class, 3, pad=1, **init))
示例7: __init__
# 需要導入模塊: from chainer import initializers [as 別名]
# 或者: from chainer.initializers import Zero [as 別名]
def __init__(self, inp = 256, mid = 128, sz = 3):
super(ConvLSTM, self).__init__(
Wxi = L.Convolution2D(inp, mid, sz, pad = sz//2),
Whi = L.Convolution2D(mid, mid, sz, pad = sz//2, nobias = True),
Wxf = L.Convolution2D(inp, mid, sz, pad = sz//2),
Whf = L.Convolution2D(mid, mid, sz, pad = sz//2, nobias = True),
Wxc = L.Convolution2D(inp, mid, sz, pad = sz//2),
Whc = L.Convolution2D(mid, mid, sz, pad = sz//2, nobias = True),
Wxo = L.Convolution2D(inp, mid, sz, pad = sz//2),
Who = L.Convolution2D(mid, mid, sz, pad = sz//2, nobias = True)
)
self.inp = inp
self.mid = mid
self.pc = None
self.ph = None
with self.init_scope():
Wci_initializer = initializers.Zero()
self.Wci = variable.Parameter(Wci_initializer)
Wcf_initializer = initializers.Zero()
self.Wcf = variable.Parameter(Wcf_initializer)
Wco_initializer = initializers.Zero()
self.Wco = variable.Parameter(Wco_initializer)
示例8: __init__
# 需要導入模塊: from chainer import initializers [as 別名]
# 或者: from chainer.initializers import Zero [as 別名]
def __init__(self, k=3, use_bn=True, residual=False):
super(TransformModule, self).__init__()
initial_bias = numpy.identity(k, dtype=numpy.float32).ravel()
with self.init_scope():
self.conv_block1 = ConvBlock(k, 64, ksize=1, use_bn=use_bn,
residual=residual)
self.conv_block2 = ConvBlock(64, 128, ksize=1, use_bn=use_bn,
residual=residual)
self.conv_block3 = ConvBlock(128, 1024, ksize=1, use_bn=use_bn,
residual=residual)
# [Note]
# Original paper uses BN for fc layer as well.
# https://github.com/charlesq34/pointnet/blob/master/models/transform_nets.py#L34
# This chanier impl. skip BN for fc layer
self.fc4 = links.Linear(1024, 512)
# self.bn4 = links.BatchNormalization(512)
self.fc5 = links.Linear(512, 256)
# self.bn5 = links.BatchNormalization(256)
# initial output of transform net should be identity
self.fc6 = links.Linear(
256, k * k, initialW=initializers.Zero(dtype=numpy.float32),
initial_bias=initial_bias)
self.k = k
示例9: create_initializer
# 需要導入模塊: from chainer import initializers [as 別名]
# 或者: from chainer.initializers import Zero [as 別名]
def create_initializer(init_type, scale=None, fillvalue=None):
if init_type == 'identity':
return initializers.Identity() if scale is None else initializers.Identity(scale=scale)
if init_type == 'constant':
return initializers.Constant(fillvalue)
if init_type == 'zero':
return initializers.Zero()
if init_type == 'one':
return initializers.One()
if init_type == 'normal':
return initializers.Normal() if scale is None else initializers.Normal(scale)
if init_type == 'glorotNormal':
return initializers.GlorotNormal() if scale is None else initializers.GlorotNormal(scale)
if init_type == 'heNormal':
return initializers.HeNormal() if scale is None else initializers.HeNormal(scale)
if init_type == 'orthogonal':
return initializers.Orthogonal(
scale) if scale is None else initializers.Orthogonal(scale)
if init_type == 'uniform':
return initializers.Uniform(
scale) if scale is None else initializers.Uniform(scale)
if init_type == 'leCunUniform':
return initializers.LeCunUniform(
scale) if scale is None else initializers.LeCunUniform(scale)
if init_type == 'glorotUniform':
return initializers.GlorotUniform(
scale) if scale is None else initializers.GlorotUniform(scale)
if init_type == 'heUniform':
return initializers.HeUniform(
scale) if scale is None else initializers.HeUniform(scale)
raise ValueError("Unknown initializer type: {0}".format(init_type))
示例10: __init__
# 需要導入模塊: from chainer import initializers [as 別名]
# 或者: from chainer.initializers import Zero [as 別名]
def __init__(self, size=None, eps=1e-6, initial_gamma=None,
initial_beta=None):
super(LayerNormalizationLink, self).__init__()
self.add_uninitialized_param('gamma')
self.add_uninitialized_param('beta')
if initial_gamma is None:
initial_gamma = initializers.One()
self._gamma_initializer = initial_gamma
if initial_beta is None:
initial_beta = initializers.Zero()
self._beta_initializer = initial_beta
self.eps = eps
if size is not None:
self._initialize_params(size)
示例11: _generate_array
# 需要導入模塊: from chainer import initializers [as 別名]
# 或者: from chainer.initializers import Zero [as 別名]
def _generate_array(self, xp, dtype=None, device=None):
initializer = initializers.Zero(dtype)
return initializers.generate_array(initializer, (), xp, device=device)
示例12: __init__
# 需要導入模塊: from chainer import initializers [as 別名]
# 或者: from chainer.initializers import Zero [as 別名]
def __init__(self, dtype=numpy.float32):
super(SimpleNet, self).__init__()
self.dtype = dtype
W = initializers.HeNormal(1 / numpy.sqrt(2), self.dtype)
bias = initializers.Zero(self.dtype)
with self.init_scope():
self.conv = chainer.links.Convolution2D(2, 2, 3, initialW=W,
initial_bias=bias)
self.fc = chainer.links.Linear(18, 2, initialW=W,
initial_bias=bias)
self.train = True
示例13: test_initialize_dtype
# 需要導入模塊: from chainer import initializers [as 別名]
# 或者: from chainer.initializers import Zero [as 別名]
def test_initialize_dtype(self):
initializer = initializers.Zero(np.float64)
x = chainer.Parameter(initializer=initializer)
x.initialize((2, 3))
assert x.data.dtype == np.float64
assert x.grad.dtype == np.float64
示例14: test_zerograd_dtype
# 需要導入模塊: from chainer import initializers [as 別名]
# 或者: from chainer.initializers import Zero [as 別名]
def test_zerograd_dtype(self):
x = chainer.Parameter(initializers.Zero(dtype=np.float16))
with testing.assert_warns(DeprecationWarning):
x.zerograd()
x.initialize((3, 2))
assert x.grad.dtype == x.data.dtype
示例15: zerograd
# 需要導入模塊: from chainer import initializers [as 別名]
# 或者: from chainer.initializers import Zero [as 別名]
def zerograd(self):
super(Parameter, self).zerograd()
if not self.is_initialized:
dtype = getattr(self.initializer, 'dtype', None)
self._grad_initializer = initializers.Zero(dtype)