本文整理匯總了Python中allennlp.modules.Attention方法的典型用法代碼示例。如果您正苦於以下問題:Python modules.Attention方法的具體用法?Python modules.Attention怎麽用?Python modules.Attention使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類allennlp.modules
的用法示例。
在下文中一共展示了modules.Attention方法的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: from allennlp import modules [as 別名]
# 或者: from allennlp.modules import Attention [as 別名]
def __init__(
self,
encoder_output_dim: int,
action_embedding_dim: int,
input_attention: Attention,
activation: Activation = Activation.by_name("relu")(),
add_action_bias: bool = True,
dropout: float = 0.0,
) -> None:
super().__init__(
encoder_output_dim=encoder_output_dim,
action_embedding_dim=action_embedding_dim,
input_attention=input_attention,
activation=activation,
add_action_bias=add_action_bias,
dropout=dropout,
)
# See the class docstring for a description of what this does.
self._checklist_multiplier = Parameter(torch.FloatTensor([1.0]))
示例2: __init__
# 需要導入模塊: from allennlp import modules [as 別名]
# 或者: from allennlp.modules import Attention [as 別名]
def __init__(self, vocab_size, max_len, embed_size, hidden_size, sos_id=2, eos_id=3, n_layers=1, rnn_cell='GRU',
input_dropout_p=0, dropout_p=0, use_attention=False):
super(Decoder, self).__init__()
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.n_layers = n_layers
self.input_dropout = nn.Dropout(p=input_dropout_p)
if rnn_cell == 'LSTM':
self.rnn_cell = nn.LSTM
elif rnn_cell == 'GRU':
self.rnn_cell = nn.GRU
else:
raise ValueError("Unsupported RNN Cell: {0}".format(rnn_cell))
self.rnn = self.rnn_cell(embed_size, hidden_size, n_layers, batch_first=True, dropout=dropout_p)
self.output_size = vocab_size
self.max_length = max_len
self.use_attention = use_attention
self.eos_id = eos_id
self.sos_id = sos_id
self.init_input = None
self.embedding = nn.Embedding(self.output_size, embed_size)
if use_attention:
self.attention = Attention(self.hidden_size)
self.out = nn.Linear(self.hidden_size, self.output_size)
示例3: __init__
# 需要導入模塊: from allennlp import modules [as 別名]
# 或者: from allennlp.modules import Attention [as 別名]
def __init__(self, vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
seq2seq_encoder: Seq2SeqEncoder,
initializer: InitializerApplicator) -> None:
super(ProLocalModel, self).__init__(vocab)
self.text_field_embedder = text_field_embedder
self.seq2seq_encoder = seq2seq_encoder
self.attention_layer = \
Attention(similarity_function=BilinearSimilarity(2 * seq2seq_encoder.get_output_dim(),
seq2seq_encoder.get_output_dim()), normalize=True)
self.num_types = self.vocab.get_vocab_size("state_change_type_labels")
self.aggregate_feedforward = Linear(seq2seq_encoder.get_output_dim(),
self.num_types)
self.span_metric = SpanBasedF1Measure(vocab,
tag_namespace="state_change_tags") # by default "O" is ignored in metric computation
self.num_tags = self.vocab.get_vocab_size("state_change_tags")
self.tag_projection_layer = TimeDistributed(Linear(self.seq2seq_encoder.get_output_dim() + 2
, self.num_tags))
self._type_accuracy = CategoricalAccuracy()
self.type_f1_metrics = {}
self.type_labels_vocab = self.vocab.get_index_to_token_vocabulary("state_change_type_labels")
for type_label in self.type_labels_vocab.values():
self.type_f1_metrics["type_" + type_label] = F1Measure(self.vocab.get_token_index(type_label, "state_change_type_labels"))
self._loss = torch.nn.CrossEntropyLoss()
initializer(self)
示例4: __init__
# 需要導入模塊: from allennlp import modules [as 別名]
# 或者: from allennlp.modules import Attention [as 別名]
def __init__(
self,
encoder_output_dim: int,
action_embedding_dim: int,
input_attention: Attention,
activation: Activation = Activation.by_name("relu")(),
add_action_bias: bool = True,
mixture_feedforward: FeedForward = None,
dropout: float = 0.0,
num_layers: int = 1,
) -> None:
super().__init__(
encoder_output_dim=encoder_output_dim,
action_embedding_dim=action_embedding_dim,
input_attention=input_attention,
activation=activation,
add_action_bias=add_action_bias,
dropout=dropout,
num_layers=num_layers,
)
self._mixture_feedforward = mixture_feedforward
if mixture_feedforward is not None:
check_dimensions_match(
encoder_output_dim,
mixture_feedforward.get_input_dim(),
"hidden state embedding dim",
"mixture feedforward input dim",
)
check_dimensions_match(
mixture_feedforward.get_output_dim(),
1,
"mixture feedforward output dim",
"dimension for scalar value",
)
示例5: __init__
# 需要導入模塊: from allennlp import modules [as 別名]
# 或者: from allennlp.modules import Attention [as 別名]
def __init__(
self,
encoder_output_dim: int,
action_embedding_dim: int,
input_attention: Attention,
activation: Activation = Activation.by_name("relu")(),
add_action_bias: bool = True,
mixture_feedforward: FeedForward = None,
dropout: float = 0.0,
) -> None:
super().__init__(
encoder_output_dim=encoder_output_dim,
action_embedding_dim=action_embedding_dim,
input_attention=input_attention,
activation=activation,
add_action_bias=add_action_bias,
dropout=dropout,
)
self._linked_checklist_multiplier = Parameter(torch.FloatTensor([1.0]))
self._mixture_feedforward = mixture_feedforward
if mixture_feedforward is not None:
check_dimensions_match(
encoder_output_dim,
mixture_feedforward.get_input_dim(),
"hidden state embedding dim",
"mixture feedforward input dim",
)
check_dimensions_match(
mixture_feedforward.get_output_dim(),
1,
"mixture feedforward output dim",
"dimension for scalar value",
)
示例6: __init__
# 需要導入模塊: from allennlp import modules [as 別名]
# 或者: from allennlp.modules import Attention [as 別名]
def __init__(
self,
vocab: Vocabulary,
sentence_embedder: TextFieldEmbedder,
action_embedding_dim: int,
encoder: Seq2SeqEncoder,
attention: Attention,
decoder_beam_search: BeamSearch,
max_decoding_steps: int,
dropout: float = 0.0,
) -> None:
super(NlvrDirectSemanticParser, self).__init__(
vocab=vocab,
sentence_embedder=sentence_embedder,
action_embedding_dim=action_embedding_dim,
encoder=encoder,
dropout=dropout,
)
self._decoder_trainer = MaximumMarginalLikelihood()
self._decoder_step = BasicTransitionFunction(
encoder_output_dim=self._encoder.get_output_dim(),
action_embedding_dim=action_embedding_dim,
input_attention=attention,
activation=Activation.by_name("tanh")(),
add_action_bias=False,
dropout=dropout,
)
self._decoder_beam_search = decoder_beam_search
self._max_decoding_steps = max_decoding_steps
self._action_padding_index = -1
示例7: __init__
# 需要導入模塊: from allennlp import modules [as 別名]
# 或者: from allennlp.modules import Attention [as 別名]
def __init__(self,
vocab: Vocabulary,
source_embedder: TextFieldEmbedder,
encoder: Seq2SeqEncoder,
attention: Attention,
beam_size: int,
max_decoding_steps: int,
target_embedding_dim: int = None,
copy_token: str = "@COPY@",
source_namespace: str = "tokens",
target_namespace: str = "target_tokens",
tensor_based_metric: Metric = None,
token_based_metric: Metric = None,
tie_embeddings: bool = False) -> None:
target_embedding_dim = target_embedding_dim or source_embedder.get_output_dim()
CopyNetSeq2Seq.__init__(
self,
vocab,
source_embedder,
encoder,
attention,
beam_size,
max_decoding_steps,
target_embedding_dim,
copy_token,
source_namespace,
target_namespace,
tensor_based_metric,
token_based_metric
)
self._tie_embeddings = tie_embeddings
if self._tie_embeddings:
assert source_namespace == target_namespace
assert "token_embedder_tokens" in dict(self._source_embedder.named_children())
source_token_embedder = dict(self._source_embedder.named_children())["token_embedder_tokens"]
self._target_embedder.weight = source_token_embedder.weight
if tensor_based_metric is None:
self._tensor_based_metric = None
示例8: __init__
# 需要導入模塊: from allennlp import modules [as 別名]
# 或者: from allennlp.modules import Attention [as 別名]
def __init__(
self,
encoder_output_dim: int,
action_embedding_dim: int,
input_attention: Attention,
activation: Activation = Activation.by_name("relu")(),
add_action_bias: bool = True,
dropout: float = 0.0,
num_layers: int = 1,
) -> None:
super().__init__()
self._input_attention = input_attention
self._add_action_bias = add_action_bias
self._activation = activation
self._num_layers = num_layers
# Decoder output dim needs to be the same as the encoder output dim since we initialize the
# hidden state of the decoder with the final hidden state of the encoder.
output_dim = encoder_output_dim
input_dim = output_dim
# Our decoder input will be the concatenation of the attended encoder hidden state (i.e.,
# the attended question encoding) and the previous action embedding, and we'll project that
# down to the decoder's `input_dim`, which we arbitrarily set to be the same as
# `output_dim`.
self._input_projection_layer = Linear(encoder_output_dim + action_embedding_dim, input_dim)
# Before making a prediction, we'll compute an attention over the input given our updated
# hidden state. Then we concatenate those with the decoder state and project to
# `action_embedding_dim` to make a prediction.
self._output_projection_layer = Linear(
output_dim + encoder_output_dim, action_embedding_dim
)
if self._num_layers > 1:
self._decoder_cell = LSTM(input_dim, output_dim, self._num_layers)
else:
# We use a ``LSTMCell`` if we just have one layer because it is slightly faster since we are
# just running the LSTM for one step each time.
self._decoder_cell = LSTMCell(input_dim, output_dim)
if dropout > 0:
self._dropout = torch.nn.Dropout(p=dropout)
else:
self._dropout = lambda x: x
示例9: __init__
# 需要導入模塊: from allennlp import modules [as 別名]
# 或者: from allennlp.modules import Attention [as 別名]
def __init__(self,
vocab: Vocabulary,
source_embedder: TextFieldEmbedder,
encoder: Seq2SeqEncoder,
max_decoding_steps: int,
attention: Attention = None,
beam_size: int = None,
target_namespace: str = "tokens",
target_embedding_dim: int = None,
scheduled_sampling_ratio: float = 0.,
use_projection: bool = False,
projection_dim: int = None,
tie_embeddings: bool = False) -> None:
super(Seq2Seq, self).__init__(
vocab,
source_embedder,
encoder,
max_decoding_steps,
attention,
None,
beam_size,
target_namespace,
target_embedding_dim,
scheduled_sampling_ratio
)
use_projection = use_projection or projection_dim is not None
self._tie_embeddings = tie_embeddings
if self._tie_embeddings:
assert "token_embedder_tokens" in dict(self._source_embedder.named_children())
source_token_embedder = dict(self._source_embedder.named_children())["token_embedder_tokens"]
self._target_embedder.weight = source_token_embedder.weight
num_classes = self.vocab.get_vocab_size(self._target_namespace)
self._use_projection = use_projection
if self._use_projection:
self._projection_dim = projection_dim or self._source_embedder.get_output_dim()
self._hidden_projection_layer = Linear(self._decoder_output_dim, self._projection_dim)
self._output_projection_layer = Linear(self._projection_dim, num_classes)
else:
self._output_projection_layer = Linear(self._decoder_output_dim, num_classes)
self._bleu = False