当前位置: 首页>>代码示例>>Python>>正文


Python modules.Attention方法代码示例

本文整理汇总了Python中allennlp.modules.Attention方法的典型用法代码示例。如果您正苦于以下问题:Python modules.Attention方法的具体用法?Python modules.Attention怎么用?Python modules.Attention使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在allennlp.modules的用法示例。


在下文中一共展示了modules.Attention方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from allennlp import modules [as 别名]
# 或者: from allennlp.modules import Attention [as 别名]
def __init__(
        self,
        encoder_output_dim: int,
        action_embedding_dim: int,
        input_attention: Attention,
        activation: Activation = Activation.by_name("relu")(),
        add_action_bias: bool = True,
        dropout: float = 0.0,
    ) -> None:
        super().__init__(
            encoder_output_dim=encoder_output_dim,
            action_embedding_dim=action_embedding_dim,
            input_attention=input_attention,
            activation=activation,
            add_action_bias=add_action_bias,
            dropout=dropout,
        )
        # See the class docstring for a description of what this does.
        self._checklist_multiplier = Parameter(torch.FloatTensor([1.0])) 
开发者ID:allenai,项目名称:allennlp-semparse,代码行数:21,代码来源:coverage_transition_function.py

示例2: __init__

# 需要导入模块: from allennlp import modules [as 别名]
# 或者: from allennlp.modules import Attention [as 别名]
def __init__(self, vocab_size, max_len, embed_size, hidden_size, sos_id=2, eos_id=3, n_layers=1, rnn_cell='GRU',
            input_dropout_p=0, dropout_p=0, use_attention=False):
        super(Decoder, self).__init__()

        self.vocab_size = vocab_size
        self.hidden_size = hidden_size
        self.n_layers = n_layers
        self.input_dropout = nn.Dropout(p=input_dropout_p)
        if rnn_cell == 'LSTM':
            self.rnn_cell = nn.LSTM
        elif rnn_cell == 'GRU':
            self.rnn_cell = nn.GRU
        else:
            raise ValueError("Unsupported RNN Cell: {0}".format(rnn_cell))
        self.rnn = self.rnn_cell(embed_size, hidden_size, n_layers, batch_first=True, dropout=dropout_p)

        self.output_size = vocab_size
        self.max_length = max_len
        self.use_attention = use_attention
        self.eos_id = eos_id
        self.sos_id = sos_id

        self.init_input = None

        self.embedding = nn.Embedding(self.output_size, embed_size)
        if use_attention:
            self.attention = Attention(self.hidden_size)

        self.out = nn.Linear(self.hidden_size, self.output_size) 
开发者ID:ConvLab,项目名称:ConvLab,代码行数:31,代码来源:usermodule.py

示例3: __init__

# 需要导入模块: from allennlp import modules [as 别名]
# 或者: from allennlp.modules import Attention [as 别名]
def __init__(self, vocab: Vocabulary,
                 text_field_embedder: TextFieldEmbedder,
                 seq2seq_encoder: Seq2SeqEncoder,
                 initializer: InitializerApplicator) -> None:
        super(ProLocalModel, self).__init__(vocab)

        self.text_field_embedder = text_field_embedder
        self.seq2seq_encoder = seq2seq_encoder

        self.attention_layer = \
            Attention(similarity_function=BilinearSimilarity(2 * seq2seq_encoder.get_output_dim(),
                                                             seq2seq_encoder.get_output_dim()), normalize=True)

        self.num_types = self.vocab.get_vocab_size("state_change_type_labels")
        self.aggregate_feedforward = Linear(seq2seq_encoder.get_output_dim(),
                                            self.num_types)

        self.span_metric = SpanBasedF1Measure(vocab,
                                              tag_namespace="state_change_tags")  # by default "O" is ignored in metric computation
        self.num_tags = self.vocab.get_vocab_size("state_change_tags")

        self.tag_projection_layer = TimeDistributed(Linear(self.seq2seq_encoder.get_output_dim() + 2
                                                           , self.num_tags))
        self._type_accuracy = CategoricalAccuracy()

        self.type_f1_metrics = {}
        self.type_labels_vocab = self.vocab.get_index_to_token_vocabulary("state_change_type_labels")
        for type_label in self.type_labels_vocab.values():
            self.type_f1_metrics["type_" + type_label] = F1Measure(self.vocab.get_token_index(type_label, "state_change_type_labels"))

        self._loss = torch.nn.CrossEntropyLoss()

        initializer(self) 
开发者ID:allenai,项目名称:propara,代码行数:35,代码来源:prolocal_model.py

示例4: __init__

# 需要导入模块: from allennlp import modules [as 别名]
# 或者: from allennlp.modules import Attention [as 别名]
def __init__(
        self,
        encoder_output_dim: int,
        action_embedding_dim: int,
        input_attention: Attention,
        activation: Activation = Activation.by_name("relu")(),
        add_action_bias: bool = True,
        mixture_feedforward: FeedForward = None,
        dropout: float = 0.0,
        num_layers: int = 1,
    ) -> None:
        super().__init__(
            encoder_output_dim=encoder_output_dim,
            action_embedding_dim=action_embedding_dim,
            input_attention=input_attention,
            activation=activation,
            add_action_bias=add_action_bias,
            dropout=dropout,
            num_layers=num_layers,
        )
        self._mixture_feedforward = mixture_feedforward

        if mixture_feedforward is not None:
            check_dimensions_match(
                encoder_output_dim,
                mixture_feedforward.get_input_dim(),
                "hidden state embedding dim",
                "mixture feedforward input dim",
            )
            check_dimensions_match(
                mixture_feedforward.get_output_dim(),
                1,
                "mixture feedforward output dim",
                "dimension for scalar value",
            ) 
开发者ID:allenai,项目名称:allennlp-semparse,代码行数:37,代码来源:linking_transition_function.py

示例5: __init__

# 需要导入模块: from allennlp import modules [as 别名]
# 或者: from allennlp.modules import Attention [as 别名]
def __init__(
        self,
        encoder_output_dim: int,
        action_embedding_dim: int,
        input_attention: Attention,
        activation: Activation = Activation.by_name("relu")(),
        add_action_bias: bool = True,
        mixture_feedforward: FeedForward = None,
        dropout: float = 0.0,
    ) -> None:
        super().__init__(
            encoder_output_dim=encoder_output_dim,
            action_embedding_dim=action_embedding_dim,
            input_attention=input_attention,
            activation=activation,
            add_action_bias=add_action_bias,
            dropout=dropout,
        )
        self._linked_checklist_multiplier = Parameter(torch.FloatTensor([1.0]))
        self._mixture_feedforward = mixture_feedforward

        if mixture_feedforward is not None:
            check_dimensions_match(
                encoder_output_dim,
                mixture_feedforward.get_input_dim(),
                "hidden state embedding dim",
                "mixture feedforward input dim",
            )
            check_dimensions_match(
                mixture_feedforward.get_output_dim(),
                1,
                "mixture feedforward output dim",
                "dimension for scalar value",
            ) 
开发者ID:allenai,项目名称:allennlp-semparse,代码行数:36,代码来源:linking_coverage_transition_function.py

示例6: __init__

# 需要导入模块: from allennlp import modules [as 别名]
# 或者: from allennlp.modules import Attention [as 别名]
def __init__(
        self,
        vocab: Vocabulary,
        sentence_embedder: TextFieldEmbedder,
        action_embedding_dim: int,
        encoder: Seq2SeqEncoder,
        attention: Attention,
        decoder_beam_search: BeamSearch,
        max_decoding_steps: int,
        dropout: float = 0.0,
    ) -> None:
        super(NlvrDirectSemanticParser, self).__init__(
            vocab=vocab,
            sentence_embedder=sentence_embedder,
            action_embedding_dim=action_embedding_dim,
            encoder=encoder,
            dropout=dropout,
        )
        self._decoder_trainer = MaximumMarginalLikelihood()
        self._decoder_step = BasicTransitionFunction(
            encoder_output_dim=self._encoder.get_output_dim(),
            action_embedding_dim=action_embedding_dim,
            input_attention=attention,
            activation=Activation.by_name("tanh")(),
            add_action_bias=False,
            dropout=dropout,
        )
        self._decoder_beam_search = decoder_beam_search
        self._max_decoding_steps = max_decoding_steps
        self._action_padding_index = -1 
开发者ID:allenai,项目名称:allennlp-semparse,代码行数:32,代码来源:nlvr_direct_semantic_parser.py

示例7: __init__

# 需要导入模块: from allennlp import modules [as 别名]
# 或者: from allennlp.modules import Attention [as 别名]
def __init__(self,
                 vocab: Vocabulary,
                 source_embedder: TextFieldEmbedder,
                 encoder: Seq2SeqEncoder,
                 attention: Attention,
                 beam_size: int,
                 max_decoding_steps: int,
                 target_embedding_dim: int = None,
                 copy_token: str = "@COPY@",
                 source_namespace: str = "tokens",
                 target_namespace: str = "target_tokens",
                 tensor_based_metric: Metric = None,
                 token_based_metric: Metric = None,
                 tie_embeddings: bool = False) -> None:
        target_embedding_dim = target_embedding_dim or source_embedder.get_output_dim()
        CopyNetSeq2Seq.__init__(
            self,
            vocab,
            source_embedder,
            encoder,
            attention,
            beam_size,
            max_decoding_steps,
            target_embedding_dim,
            copy_token,
            source_namespace,
            target_namespace,
            tensor_based_metric,
            token_based_metric
        )
        self._tie_embeddings = tie_embeddings

        if self._tie_embeddings:
            assert source_namespace == target_namespace
            assert "token_embedder_tokens" in dict(self._source_embedder.named_children())
            source_token_embedder = dict(self._source_embedder.named_children())["token_embedder_tokens"]
            self._target_embedder.weight = source_token_embedder.weight

        if tensor_based_metric is None:
            self._tensor_based_metric = None 
开发者ID:IlyaGusev,项目名称:summarus,代码行数:42,代码来源:copynet.py

示例8: __init__

# 需要导入模块: from allennlp import modules [as 别名]
# 或者: from allennlp.modules import Attention [as 别名]
def __init__(
        self,
        encoder_output_dim: int,
        action_embedding_dim: int,
        input_attention: Attention,
        activation: Activation = Activation.by_name("relu")(),
        add_action_bias: bool = True,
        dropout: float = 0.0,
        num_layers: int = 1,
    ) -> None:
        super().__init__()
        self._input_attention = input_attention
        self._add_action_bias = add_action_bias
        self._activation = activation
        self._num_layers = num_layers

        # Decoder output dim needs to be the same as the encoder output dim since we initialize the
        # hidden state of the decoder with the final hidden state of the encoder.
        output_dim = encoder_output_dim
        input_dim = output_dim
        # Our decoder input will be the concatenation of the attended encoder hidden state (i.e.,
        # the attended question encoding) and the previous action embedding, and we'll project that
        # down to the decoder's `input_dim`, which we arbitrarily set to be the same as
        # `output_dim`.
        self._input_projection_layer = Linear(encoder_output_dim + action_embedding_dim, input_dim)
        # Before making a prediction, we'll compute an attention over the input given our updated
        # hidden state. Then we concatenate those with the decoder state and project to
        # `action_embedding_dim` to make a prediction.
        self._output_projection_layer = Linear(
            output_dim + encoder_output_dim, action_embedding_dim
        )
        if self._num_layers > 1:
            self._decoder_cell = LSTM(input_dim, output_dim, self._num_layers)
        else:
            # We use a ``LSTMCell`` if we just have one layer because it is slightly faster since we are
            # just running the LSTM for one step each time.
            self._decoder_cell = LSTMCell(input_dim, output_dim)

        if dropout > 0:
            self._dropout = torch.nn.Dropout(p=dropout)
        else:
            self._dropout = lambda x: x 
开发者ID:allenai,项目名称:allennlp-semparse,代码行数:44,代码来源:basic_transition_function.py

示例9: __init__

# 需要导入模块: from allennlp import modules [as 别名]
# 或者: from allennlp.modules import Attention [as 别名]
def __init__(self,
                 vocab: Vocabulary,
                 source_embedder: TextFieldEmbedder,
                 encoder: Seq2SeqEncoder,
                 max_decoding_steps: int,
                 attention: Attention = None,
                 beam_size: int = None,
                 target_namespace: str = "tokens",
                 target_embedding_dim: int = None,
                 scheduled_sampling_ratio: float = 0.,
                 use_projection: bool = False,
                 projection_dim: int = None,
                 tie_embeddings: bool = False) -> None:
        super(Seq2Seq, self).__init__(
            vocab,
            source_embedder,
            encoder,
            max_decoding_steps,
            attention,
            None,
            beam_size,
            target_namespace,
            target_embedding_dim,
            scheduled_sampling_ratio
        )
        use_projection = use_projection or projection_dim is not None

        self._tie_embeddings = tie_embeddings

        if self._tie_embeddings:
            assert "token_embedder_tokens" in dict(self._source_embedder.named_children())
            source_token_embedder = dict(self._source_embedder.named_children())["token_embedder_tokens"]
            self._target_embedder.weight = source_token_embedder.weight

        num_classes = self.vocab.get_vocab_size(self._target_namespace)
        self._use_projection = use_projection
        if self._use_projection:
            self._projection_dim = projection_dim or self._source_embedder.get_output_dim()
            self._hidden_projection_layer = Linear(self._decoder_output_dim, self._projection_dim)
            self._output_projection_layer = Linear(self._projection_dim, num_classes)
        else:
            self._output_projection_layer = Linear(self._decoder_output_dim, num_classes)
        self._bleu = False 
开发者ID:IlyaGusev,项目名称:summarus,代码行数:45,代码来源:seq2seq.py


注:本文中的allennlp.modules.Attention方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。