本文整理汇总了Python中chainer.links.EmbedID方法的典型用法代码示例。如果您正苦于以下问题:Python links.EmbedID方法的具体用法?Python links.EmbedID怎么用?Python links.EmbedID使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类chainer.links
的用法示例。
在下文中一共展示了links.EmbedID方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import EmbedID [as 别名]
def __init__(self, n_actions, max_episode_steps):
super().__init__()
with self.init_scope():
self.embed = L.EmbedID(max_episode_steps + 1, 3136)
self.image2hidden = chainerrl.links.Sequence(
L.Convolution2D(None, 32, 8, stride=4),
F.relu,
L.Convolution2D(None, 64, 4, stride=2),
F.relu,
L.Convolution2D(None, 64, 3, stride=1),
functools.partial(F.reshape, shape=(-1, 3136)),
)
self.hidden2out = chainerrl.links.Sequence(
L.Linear(None, 512),
F.relu,
L.Linear(None, n_actions),
DiscreteActionValue,
)
示例2: block_embed
# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import EmbedID [as 别名]
def block_embed(embed, x, dropout=0.):
"""Embedding function followed by convolution
Args:
embed (callable): A :func:`~chainer.functions.embed_id` function
or :class:`~chainer.links.EmbedID` link.
x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`): Input variable, which
is a :math:`(B, L)`-shaped int array. Its first dimension
:math:`(B)` is assumed to be the *minibatch dimension*.
The second dimension :math:`(L)` is the length of padded
sentences.
dropout (float): Dropout ratio.
Returns:
~chainer.Variable: Output variable. A float array with shape
of :math:`(B, N, L, 1)`. :math:`(N)` is the number of dimensions
of word embedding.
"""
e = embed(x)
e = F.dropout(e, ratio=dropout)
e = F.transpose(e, (0, 2, 1))
e = e[:, :, :, None]
return e
示例3: __init__
# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import EmbedID [as 别名]
def __init__(self, Vi, Ei, Hi, init_orth=False, use_bn_length=0, cell_type=rnn_cells.LSTMCell):
gru_f = cell_type(Ei, Hi)
gru_b = cell_type(Ei, Hi)
log.info("constructing encoder [%s]" % (cell_type,))
super(Encoder, self).__init__(
emb=L.EmbedID(Vi, Ei),
# gru_f = L.GRU(Hi, Ei),
# gru_b = L.GRU(Hi, Ei)
gru_f=gru_f,
gru_b=gru_b
)
self.Hi = Hi
if use_bn_length > 0:
self.add_link("bn_f", BNList(Hi, use_bn_length))
# self.add_link("bn_b", BNList(Hi, use_bn_length)) #TODO
self.use_bn_length = use_bn_length
if init_orth:
ortho_init(self.gru_f)
ortho_init(self.gru_b)
示例4: __init__
# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import EmbedID [as 别名]
def __init__(self, V, d_model=512, n_heads=8, d_ff=2048, experimental_relu=False, dropout=None, nb_layers=6,
residual_mode="normal", no_normalize=False):
super(Decoder, self).__init__(
emb = L.EmbedID(V, d_model),
encoding_layers = DecoderMultiLayer(d_model, n_heads, d_ff=d_ff,
experimental_relu=experimental_relu,
dropout=dropout, nb_layers=nb_layers,
residual_mode=residual_mode, no_normalize=no_normalize),
logits_layer = L.Linear(d_model, V + 1)
)
self.dropout = dropout
self.n_heads = n_heads
self.d_model = d_model
self.cached_pos_vect = None
self.add_param("bos_encoding", (1, 1, d_model))
self.bos_encoding.data[...] = np.random.randn(d_model)
self.V = V
self.eos_idx = V
示例5: __init__
# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import EmbedID [as 别名]
def __init__(self, n_layers, in_size, out_size, embed_size, hidden_size, proj_size, dropout=0.5):
"""Initialize encoder with structure parameters
Args:
n_layers (int): Number of layers.
in_size (int): Dimensionality of input vectors.
out_size (int): Dimensionality of output vectors.
embed_size (int): Dimensionality of word embedding.
hidden_size (int) : Dimensionality of hidden vectors.
proj_size (int) : Dimensionality of projection before softmax.
dropout (float): Dropout ratio.
"""
super(LSTMDecoder, self).__init__(
embed = L.EmbedID(in_size, embed_size),
lstm = L.NStepLSTM(n_layers, embed_size, hidden_size, dropout),
proj = L.Linear(hidden_size, proj_size),
out = L.Linear(proj_size, out_size)
)
self.dropout = dropout
for param in self.params():
param.data[...] = np.random.uniform(-0.1, 0.1, param.data.shape)
示例6: __init__
# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import EmbedID [as 别名]
def __init__(self, n_layers, n_vocab, n_units, dropout=0.1):
out_units = n_units // 3
super(CNNEncoder, self).__init__(
embed=L.EmbedID(n_vocab, n_units, ignore_label=-1,
initialW=embed_init),
cnn_w3=L.Convolution2D(
n_units, out_units, ksize=(3, 1), stride=1, pad=(2, 0),
nobias=True),
cnn_w4=L.Convolution2D(
n_units, out_units, ksize=(4, 1), stride=1, pad=(3, 0),
nobias=True),
cnn_w5=L.Convolution2D(
n_units, out_units, ksize=(5, 1), stride=1, pad=(4, 0),
nobias=True),
mlp=MLP(n_layers, out_units * 3, dropout)
)
self.out_units = out_units * 3
self.dropout = dropout
self.use_predict_embed = False
示例7: __init__
# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import EmbedID [as 别名]
def __init__(self, vocab, vocab_ngram_tokens, n_units, n_units_char, dropout,
subword): # dropout ratio, zero indicates no dropout
super(RNN, self).__init__()
with self.init_scope():
self.embed = L.EmbedID(
len(vocab_ngram_tokens.lst_words) + 2, n_units_char,
initialW=I.Uniform(1. / n_units_char)) # ngram tokens embedding plus 2 for OOV and end symbol.
if 'lstm' in subword:
self.mid = L.LSTM(n_units_char, n_units_char * 2)
self.out = L.Linear(n_units_char * 2, n_units_char) # the feed-forward output layer
if 'bilstm' in subword:
self.mid_b = L.LSTM(n_units_char, n_units_char * 2)
self.out_b = L.Linear(n_units_char * 2, n_units_char)
self.n_ngram = vocab_ngram_tokens.metadata["max_gram"] - vocab_ngram_tokens.metadata["min_gram"] + 1
self.final_out = L.Linear(n_units * (self.n_ngram), n_units)
self.dropout = dropout
self.vocab = vocab
self.vocab_ngram_tokens = vocab_ngram_tokens
self.subword = subword
示例8: __init__
# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import EmbedID [as 别名]
def __init__(self, n_layers, n_vocab, n_units, dropout=0.1, wv=None):
super(RNNEncoder, self).__init__()
with self.init_scope():
if wv is None:
self.embed = L.EmbedID(n_vocab, n_units, ignore_label=-1,
initialW=embed_init)
else:
# TODO: this implementation was allowing for dynamic embeddings
# think about how to support both continuous embeddings
# and function pointers
# self.embed = self.get_embed_from_wv
self.embed = L.EmbedID(n_vocab, n_units, ignore_label=-1,
initialW=wv)
self.encoder = L.NStepLSTM(n_layers, n_units, n_units, dropout)
self.n_layers = n_layers
self.out_units = n_units
self.dropout = dropout
示例9: __init__
# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import EmbedID [as 别名]
def __init__(
self,
embed_dim: int,
n_units: int=1000,
gpu: int=-1,
):
super(LSTM, self).__init__(
embed=L.EmbedID(embed_dim, n_units), # word embedding
l1=L.Linear(n_units, n_units * 4),
h1=L.Linear(n_units, n_units * 4),
l2=L.Linear(n_units, n_units * 4),
h2=L.Linear(n_units, n_units * 4),
l3=L.Linear(n_units, embed_dim),
)
self.embed_dim = embed_dim
self.n_units = n_units
self.gpu = gpu
示例10: __init__
# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import EmbedID [as 别名]
def __init__(self, vocab_size, hidden_size, dropout_ratio, ignore_label):
super(LSTMLanguageModel, self).__init__()
with self.init_scope():
self.embed_word = L.EmbedID(
vocab_size,
hidden_size,
initialW=initializers.Normal(1.0),
ignore_label=ignore_label
)
self.embed_img = L.Linear(
hidden_size,
initialW=initializers.Normal(0.01)
)
self.lstm = L.LSTM(hidden_size, hidden_size)
self.out_word = L.Linear(
hidden_size,
vocab_size,
initialW=initializers.Normal(0.01)
)
self.dropout_ratio = dropout_ratio
示例11: __init__
# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import EmbedID [as 别名]
def __init__(self, n_layers, n_vocab, n_units, dropout=0.1):
out_units = n_units // 3
super(CNNEncoder, self).__init__()
with self.init_scope():
self.embed = L.EmbedID(n_vocab, n_units, ignore_label=-1,
initialW=embed_init)
self.cnn_w3 = L.Convolution2D(
n_units, out_units, ksize=(3, 1), stride=1, pad=(2, 0),
nobias=True)
self.cnn_w4 = L.Convolution2D(
n_units, out_units, ksize=(4, 1), stride=1, pad=(3, 0),
nobias=True)
self.cnn_w5 = L.Convolution2D(
n_units, out_units, ksize=(5, 1), stride=1, pad=(4, 0),
nobias=True)
self.mlp = MLP(n_layers, out_units * 3, dropout)
self.out_units = out_units * 3
self.dropout = dropout
示例12: setUp
# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import EmbedID [as 别名]
def setUp(self):
class Model(chainer.Chain):
def __init__(self, link, args, kwargs):
super(Model, self).__init__()
with self.init_scope():
self.l1 = link(*args, **kwargs)
def __call__(self, x):
return self.l1(x)
self.model = Model(self.link, self.args, self.kwargs)
if self.link is L.EmbedID:
self.x = np.random.randint(0, self.args[0], size=self.in_shape)
self.x = self.x.astype(self.in_type)
else:
self.x = input_generator.increasing(
*self.in_shape, dtype=self.in_type)
示例13: __init__
# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import EmbedID [as 别名]
def __init__(self, n_layers, n_source_vocab, n_target_vocab, n_units,
max_length=50, dropout=0.2, width=3):
init_emb = chainer.initializers.Normal(0.1)
init_out = VarInNormal(1.)
super(Seq2seq, self).__init__(
embed_x=L.EmbedID(n_source_vocab, n_units, ignore_label=-1,
initialW=init_emb),
embed_y=L.EmbedID(n_target_vocab, n_units, ignore_label=-1,
initialW=init_emb),
embed_position_x=L.EmbedID(max_length, n_units,
initialW=init_emb),
embed_position_y=L.EmbedID(max_length, n_units,
initialW=init_emb),
encoder=ConvGLUEncoder(n_layers, n_units, width, dropout),
decoder=ConvGLUDecoder(n_layers, n_units, width, dropout),
W=L.Linear(n_units, n_target_vocab, initialW=init_out),
)
self.n_layers = n_layers
self.n_units = n_units
self.n_target_vocab = n_target_vocab
self.max_length = max_length
self.width = width
self.dropout = dropout
示例14: __init__
# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import EmbedID [as 别名]
def __init__(self, vocab_size, embed_size, hidden_size, output_size):
super(RNNModel, self).__init__()
with self.init_scope():
self.embed = L.EmbedID(vocab_size, embed_size)
self.rnn = L.LSTM(embed_size, hidden_size)
self.linear = L.Linear(hidden_size, output_size)
示例15: sequence_embed
# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import EmbedID [as 别名]
def sequence_embed(embed, xs, dropout=0.):
"""Efficient embedding function for variable-length sequences
This output is equally to
"return [F.dropout(embed(x), ratio=dropout) for x in xs]".
However, calling the functions is one-shot and faster.
Args:
embed (callable): A :func:`~chainer.functions.embed_id` function
or :class:`~chainer.links.EmbedID` link.
xs (list of :class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`): i-th element in the list is an input variable,
which is a :math:`(L_i, )`-shaped int array.
dropout (float): Dropout ratio.
Returns:
list of ~chainer.Variable: Output variables. i-th element in the
list is an output variable, which is a :math:`(L_i, N)`-shaped
float array. :math:`(N)` is the number of dimensions of word embedding.
"""
x_len = [len(x) for x in xs]
x_section = np.cumsum(x_len[:-1])
ex = embed(F.concat(xs, axis=0))
ex = F.dropout(ex, ratio=dropout)
exs = F.split_axis(ex, x_section, 0)
return exs