本文整理匯總了Python中blocks.bricks.lookup.LookupTable方法的典型用法代碼示例。如果您正苦於以下問題:Python lookup.LookupTable方法的具體用法?Python lookup.LookupTable怎麽用?Python lookup.LookupTable使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類blocks.bricks.lookup
的用法示例。
在下文中一共展示了lookup.LookupTable方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: from blocks.bricks import lookup [as 別名]
# 或者: from blocks.bricks.lookup import LookupTable [as 別名]
def __init__(self, vocab_size, embedding_dim, state_dim, **kwargs):
super(BidirectionalEncoder, self).__init__(**kwargs)
self.vocab_size = vocab_size
self.embedding_dim = embedding_dim
self.state_dim = state_dim
self.lookup = LookupTable(name='embeddings')
self.bidir = BidirectionalWMT15(
GatedRecurrent(activation=Tanh(), dim=state_dim))
self.fwd_fork = Fork(
[name for name in self.bidir.prototype.apply.sequences
if name != 'mask'], prototype=Linear(), name='fwd_fork')
self.back_fork = Fork(
[name for name in self.bidir.prototype.apply.sequences
if name != 'mask'], prototype=Linear(), name='back_fork')
self.children = [self.lookup, self.bidir,
self.fwd_fork, self.back_fork]
示例2: __init__
# 需要導入模塊: from blocks.bricks import lookup [as 別名]
# 或者: from blocks.bricks.lookup import LookupTable [as 別名]
def __init__(self, vocab_size, embedding_dim, dgru_state_dim, dgru_depth, **kwargs):
super(Decimator, self).__init__(**kwargs)
self.vocab_size = vocab_size
self.embedding_dim = embedding_dim
self.dgru_state_dim = dgru_state_dim
self.embedding_dim = embedding_dim
self.lookup = LookupTable(name='embeddings')
self.dgru_depth = dgru_depth
# representation
self.dgru = RecurrentStack([DGRU(activation=Tanh(), dim=self.dgru_state_dim) for _ in range(dgru_depth)],
skip_connections=True)
# importance of this representation
self.bidir_w = Bidirectional(
RecurrentWithFork(DGRU(activation=Tanh(), dim=self.dgru_state_dim // 2), self.embedding_dim,
name='src_word_with_fork'),
name='bidir_src_word_encoder')
self.gru_fork = Fork([name for name in self.dgru.apply.sequences
if name != 'mask'], prototype=Linear(), name='gru_fork')
# map to a energy scalar
self.wl = Linear(input_dim=dgru_state_dim, output_dim=1)
self.children = [self.lookup, self.dgru, self.gru_fork, self.bidir_w, self.wl]
示例3: __init__
# 需要導入模塊: from blocks.bricks import lookup [as 別名]
# 或者: from blocks.bricks.lookup import LookupTable [as 別名]
def __init__(self, embedding_dim, state_dim, **kwargs):
super(BidirectionalEncoder, self).__init__(**kwargs)
# self.vocab_size = vocab_size
self.embedding_dim = embedding_dim
self.state_dim = state_dim
# self.lookup = LookupTable(name='embeddings')
self.bidir = BidirectionalWMT15(
GatedRecurrent(activation=Tanh(), dim=state_dim))
self.fwd_fork = Fork(
[name for name in self.bidir.prototype.apply.sequences
if name != 'mask'], prototype=Linear(), name='fwd_fork')
self.back_fork = Fork(
[name for name in self.bidir.prototype.apply.sequences
if name != 'mask'], prototype=Linear(), name='back_fork')
self.children = [self.bidir,
self.fwd_fork, self.back_fork]
示例4: __init__
# 需要導入模塊: from blocks.bricks import lookup [as 別名]
# 或者: from blocks.bricks.lookup import LookupTable [as 別名]
def __init__(self,
vocab_size,
embedding_dim,
n_layers,
skip_connections,
state_dim,
**kwargs):
"""Sole constructor.
Args:
vocab_size (int): Source vocabulary size
embedding_dim (int): Dimension of the embedding layer
n_layers (int): Number of layers. Layers share the same
weight matrices.
skip_connections (bool): Skip connections connect the
source word embeddings directly
with deeper layers to propagate
the gradient more efficiently
state_dim (int): Number of hidden units in the recurrent
layers.
"""
super(BidirectionalEncoder, self).__init__(**kwargs)
self.vocab_size = vocab_size
self.embedding_dim = embedding_dim
self.n_layers = n_layers
self.state_dim = state_dim
self.skip_connections = skip_connections
self.lookup = LookupTable(name='embeddings')
if self.n_layers >= 1:
self.bidir = BidirectionalWMT15(
GatedRecurrent(activation=Tanh(), dim=state_dim))
self.fwd_fork = Fork(
[name for name in self.bidir.prototype.apply.sequences
if name != 'mask'], prototype=Linear(), name='fwd_fork')
self.back_fork = Fork(
[name for name in self.bidir.prototype.apply.sequences
if name != 'mask'], prototype=Linear(), name='back_fork')
self.children = [self.lookup, self.bidir,
self.fwd_fork, self.back_fork]
if self.n_layers > 1: # Deep encoder
self.mid_fwd_fork = Fork(
[name for name in self.bidir.prototype.apply.sequences
if name != 'mask'], prototype=Linear(), name='mid_fwd_fork')
self.mid_back_fork = Fork(
[name for name in self.bidir.prototype.apply.sequences
if name != 'mask'], prototype=Linear(), name='mid_back_fork')
self.children.append(self.mid_fwd_fork)
self.children.append(self.mid_back_fork)
elif self.n_layers == 0:
self.embedding_dim = state_dim*2
self.children = [self.lookup]
else:
logging.fatal("Number of encoder layers must be non-negative")
示例5: __init__
# 需要導入模塊: from blocks.bricks import lookup [as 別名]
# 或者: from blocks.bricks.lookup import LookupTable [as 別名]
def __init__(self, num_input_words, emb_dim, dim, vocab, lookup=None,
fork_and_rnn=None, cache=None, **kwargs):
self._vocab = vocab
self._cache = cache
children = []
if num_input_words > 0:
logger.info("Restricting def vocab to " + str(num_input_words))
self._num_input_words = num_input_words
else:
self._num_input_words = vocab.size()
if lookup is None:
self._def_lookup = LookupTable(self._num_input_words, emb_dim, name='def_lookup')
else:
self._def_lookup = lookup
if fork_and_rnn is None:
self._def_fork = Linear(emb_dim, 4 * dim, name='def_fork')
self._def_rnn = LSTM(dim, name='def_rnn')
else:
self._def_fork, self._def_rnn = fork_and_rnn
children.extend([self._def_lookup, self._def_fork, self._def_rnn])
super(LSTMReadDefinitions, self).__init__(children=children, **kwargs)
示例6: __init__
# 需要導入模塊: from blocks.bricks import lookup [as 別名]
# 或者: from blocks.bricks.lookup import LookupTable [as 別名]
def __init__(self, emb_dim, dim, num_input_words,
num_output_words, vocab,
**kwargs):
if emb_dim == 0:
emb_dim = dim
if num_input_words == 0:
num_input_words = vocab.size()
if num_output_words == 0:
num_output_words = vocab.size()
self._num_input_words = num_input_words
self._num_output_words = num_output_words
self._vocab = vocab
self._word_to_id = WordToIdOp(self._vocab)
children = []
self._main_lookup = LookupTable(self._num_input_words, emb_dim, name='main_lookup')
self._encoder_fork = Linear(emb_dim, 4 * dim, name='encoder_fork')
self._encoder_rnn = LSTM(dim, name='encoder_rnn')
self._decoder_fork = Linear(emb_dim, 4 * dim, name='decoder_fork')
self._decoder_rnn = LSTM(dim, name='decoder_rnn')
children.extend([self._main_lookup,
self._encoder_fork, self._encoder_rnn,
self._decoder_fork, self._decoder_rnn])
self._pre_softmax = Linear(dim, self._num_output_words)
self._softmax = NDimensionalSoftmax()
children.extend([self._pre_softmax, self._softmax])
super(LanguageModel, self).__init__(children=children, **kwargs)
示例7: __init__
# 需要導入模塊: from blocks.bricks import lookup [as 別名]
# 或者: from blocks.bricks.lookup import LookupTable [as 別名]
def __init__(
self,
encoder_type,
num_characters,
input_dim,
encoder_dim,
**kwargs):
assert encoder_type in [None, 'bidirectional']
self.encoder_type = encoder_type
super(Encoder, self).__init__(**kwargs)
self.children = []
if encoder_type in ['lookup', 'bidirectional']:
self.embed_label = LookupTable(
num_characters,
input_dim,
name='embed_label')
self.children += [
self.embed_label]
else:
# If there is no encoder.
assert num_characters == input_dim
if encoder_type == 'bidirectional':
transition = RecurrentWithFork(
GatedRecurrent(dim=encoder_dim).apply,
input_dim, name='encoder_transition')
self.encoder = Bidirectional(transition, name='encoder')
self.children.append(self.encoder)