本文整理汇总了Python中chainer.initializers.Uniform方法的典型用法代码示例。如果您正苦于以下问题:Python initializers.Uniform方法的具体用法?Python initializers.Uniform怎么用?Python initializers.Uniform使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类chainer.initializers
的用法示例。
在下文中一共展示了initializers.Uniform方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from chainer import initializers [as 别名]
# 或者: from chainer.initializers import Uniform [as 别名]
def __init__(self, vocab, vocab_ngram_tokens, n_units, n_units_char, dropout,
subword): # dropout ratio, zero indicates no dropout
super(RNN, self).__init__()
with self.init_scope():
self.embed = L.EmbedID(
len(vocab_ngram_tokens.lst_words) + 2, n_units_char,
initialW=I.Uniform(1. / n_units_char)) # ngram tokens embedding plus 2 for OOV and end symbol.
if 'lstm' in subword:
self.mid = L.LSTM(n_units_char, n_units_char * 2)
self.out = L.Linear(n_units_char * 2, n_units_char) # the feed-forward output layer
if 'bilstm' in subword:
self.mid_b = L.LSTM(n_units_char, n_units_char * 2)
self.out_b = L.Linear(n_units_char * 2, n_units_char)
self.n_ngram = vocab_ngram_tokens.metadata["max_gram"] - vocab_ngram_tokens.metadata["min_gram"] + 1
self.final_out = L.Linear(n_units * (self.n_ngram), n_units)
self.dropout = dropout
self.vocab = vocab
self.vocab_ngram_tokens = vocab_ngram_tokens
self.subword = subword
示例2: __init__
# 需要导入模块: from chainer import initializers [as 别名]
# 或者: from chainer.initializers import Uniform [as 别名]
def __init__(self, n_codebooks, n_centroids, n_vocab, embed_dim, tau, embed_mat):
super(EmbeddingCompressor, self).__init__()
"""
M: number of codebooks (subcodes)
K: number of vectors in each codebook
"""
self.M = n_codebooks
self.K = n_centroids
self.n_vocab = n_vocab
self.embed_dim = embed_dim
self.tau = tau
M = self.M
K = self.K
u_init = I.Uniform(scale=0.01)
with self.init_scope():
self.embed_mat = L.EmbedID(n_vocab, embed_dim, initialW=embed_mat)
self.l1 = L.Linear(embed_dim, M * K // 2, initialW=u_init, initial_bias=u_init)
self.l2 = L.Linear(M * K // 2, M * K, initialW=u_init, initial_bias=u_init)
self.codebook = chainer.Parameter(initializer=u_init, shape=(M * K, embed_dim))
示例3: create_initializer
# 需要导入模块: from chainer import initializers [as 别名]
# 或者: from chainer.initializers import Uniform [as 别名]
def create_initializer(init_type, scale=None, fillvalue=None):
if init_type == 'identity':
return initializers.Identity() if scale is None else initializers.Identity(scale=scale)
if init_type == 'constant':
return initializers.Constant(fillvalue)
if init_type == 'zero':
return initializers.Zero()
if init_type == 'one':
return initializers.One()
if init_type == 'normal':
return initializers.Normal() if scale is None else initializers.Normal(scale)
if init_type == 'glorotNormal':
return initializers.GlorotNormal() if scale is None else initializers.GlorotNormal(scale)
if init_type == 'heNormal':
return initializers.HeNormal() if scale is None else initializers.HeNormal(scale)
if init_type == 'orthogonal':
return initializers.Orthogonal(
scale) if scale is None else initializers.Orthogonal(scale)
if init_type == 'uniform':
return initializers.Uniform(
scale) if scale is None else initializers.Uniform(scale)
if init_type == 'leCunUniform':
return initializers.LeCunUniform(
scale) if scale is None else initializers.LeCunUniform(scale)
if init_type == 'glorotUniform':
return initializers.GlorotUniform(
scale) if scale is None else initializers.GlorotUniform(scale)
if init_type == 'heUniform':
return initializers.HeUniform(
scale) if scale is None else initializers.HeUniform(scale)
raise ValueError("Unknown initializer type: {0}".format(init_type))
示例4: __call__
# 需要导入模块: from chainer import initializers [as 别名]
# 或者: from chainer.initializers import Uniform [as 别名]
def __call__(self, array):
scale = 1 / np.sqrt(array.shape[-1])
initializers.Uniform(scale)(array)
示例5: __init__
# 需要导入模块: from chainer import initializers [as 别名]
# 或者: from chainer.initializers import Uniform [as 别名]
def __init__(self, n_vocab, n_units, loss_func):
super(ContinuousBoW, self).__init__()
with self.init_scope():
self.embed = L.EmbedID(n_vocab + 2, n_units, initialW=I.Uniform(1. / n_units)) # plus 2 for OOV and end symbol.
self.loss_func = loss_func
示例6: __init__
# 需要导入模块: from chainer import initializers [as 别名]
# 或者: from chainer.initializers import Uniform [as 别名]
def __init__(self, n_vocab, n_units, loss_func):
super(ContinuousBoW, self).__init__()
with self.init_scope():
self.embed = L.EmbedID(
n_vocab, n_units, initialW=I.Uniform(1. / n_units))
self.loss_func = loss_func
示例7: generate_params
# 需要导入模块: from chainer import initializers [as 别名]
# 或者: from chainer.initializers import Uniform [as 别名]
def generate_params(self):
initial_bias = initializers.Uniform(scale=1., dtype=self.dtype)
return initial_bias,
示例8: get_initializers
# 需要导入模块: from chainer import initializers [as 别名]
# 或者: from chainer.initializers import Uniform [as 别名]
def get_initializers(self):
if self.initialW == 'zero':
weight_initializer = initializers.constant.Zero()
elif self.initialW == 'random':
weight_initializer = initializers.GlorotUniform(
rng=numpy.random.RandomState(seed=0))
if self.initial_bias == 'zero':
bias_initializer = initializers.constant.Zero()
elif self.initial_bias == 'random':
bias_initializer = initializers.Uniform(
rng=numpy.random.RandomState(seed=0))
return weight_initializer, bias_initializer