當前位置: 首頁>>代碼示例>>Python>>正文


Python modules.TimeDistributed方法代碼示例

本文整理匯總了Python中allennlp.modules.TimeDistributed方法的典型用法代碼示例。如果您正苦於以下問題:Python modules.TimeDistributed方法的具體用法?Python modules.TimeDistributed怎麽用?Python modules.TimeDistributed使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在allennlp.modules的用法示例。


在下文中一共展示了modules.TimeDistributed方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: test_time_distributed_reshapes_multiple_inputs_with_pass_through_non_tensor_correctly

# 需要導入模塊: from allennlp import modules [as 別名]
# 或者: from allennlp.modules import TimeDistributed [as 別名]
def test_time_distributed_reshapes_multiple_inputs_with_pass_through_non_tensor_correctly(self):
        class FakeModule(Module):
            @overrides
            def forward(self, input_tensor, number=0, another_tensor=None):

                return input_tensor + number + another_tensor

        module = FakeModule()
        distributed_module = TimeDistributed(module)

        input_tensor1 = torch.LongTensor([[[1, 2], [3, 4]]])
        input_number = 5
        input_tensor2 = torch.LongTensor([[[4, 2], [9, 1]]])

        output = distributed_module(
            input_tensor1,
            number=input_number,
            another_tensor=input_tensor2,
            pass_through=["number"],
        )
        assert_almost_equal(output.data.numpy(), [[[10, 9], [17, 10]]]) 
開發者ID:allenai,項目名稱:allennlp,代碼行數:23,代碼來源:time_distributed_test.py

示例2: __init__

# 需要導入模塊: from allennlp import modules [as 別名]
# 或者: from allennlp.modules import TimeDistributed [as 別名]
def __init__(self, vocab            ,
                 text_field_embedder                   ,
                 encoder                ,
                 initializer                        = InitializerApplicator(),
                 regularizer                                  = None)        :
        super(SimpleTagger, self).__init__(vocab, regularizer)

        self.text_field_embedder = text_field_embedder
        self.num_classes = self.vocab.get_vocab_size(u"labels")
        self.encoder = encoder
        self.tag_projection_layer = TimeDistributed(Linear(self.encoder.get_output_dim(),
                                                           self.num_classes))

        check_dimensions_match(text_field_embedder.get_output_dim(), encoder.get_input_dim(),
                               u"text field embedding dim", u"encoder input dim")
        self.metrics = {
                u"accuracy": CategoricalAccuracy(),
                u"accuracy3": CategoricalAccuracy(top_k=3)
        }

        initializer(self)

    #overrides 
開發者ID:plasticityai,項目名稱:magnitude,代碼行數:25,代碼來源:simple_tagger.py

示例3: __init__

# 需要導入模塊: from allennlp import modules [as 別名]
# 或者: from allennlp.modules import TimeDistributed [as 別名]
def __init__(self, vocab: Vocabulary,
                 text_field_embedder: TextFieldEmbedder,
                 encoder: Seq2SeqEncoder,
                 include_start_end_transitions: bool = True,
                 dropout: Optional[float] = None,
                 initializer: InitializerApplicator = InitializerApplicator(),
                 regularizer: Optional[RegularizerApplicator] = None) -> None:
        super().__init__(vocab, regularizer)

        self.label_namespace = 'labels'
        self.num_tags = self.vocab.get_vocab_size(self.label_namespace)

        # encode text
        self.text_field_embedder = text_field_embedder
        self.encoder = encoder
        self.dropout = torch.nn.Dropout(dropout) if dropout else None

        # crf
        output_dim = self.encoder.get_output_dim()
        self.tag_projection_layer = TimeDistributed(Linear(output_dim, self.num_tags))
        self.crf = ConditionalRandomField(self.num_tags, constraints=None, include_start_end_transitions=include_start_end_transitions)

        self.metrics = {
            "accuracy": CategoricalAccuracy(),
            "accuracy3": CategoricalAccuracy(top_k=3)
        }
        for index, label in self.vocab.get_index_to_token_vocabulary(self.label_namespace).items():
            self.metrics['F1_' + label] = F1Measure(positive_label=index)

        initializer(self) 
開發者ID:allenai,項目名稱:scibert,代碼行數:32,代碼來源:pico_crf_tagger.py

示例4: __init__

# 需要導入模塊: from allennlp import modules [as 別名]
# 或者: from allennlp.modules import TimeDistributed [as 別名]
def __init__(self, vocab: Vocabulary,
                 text_field_embedder: TextFieldEmbedder,
                 encoder: Seq2SeqEncoder,
                 label_namespace: str = "labels",
                 constraint_type: str = None,
                 include_start_end_transitions: bool = True,
                 dropout: float = None,
                 initializer: InitializerApplicator = InitializerApplicator(),
                 regularizer: Optional[RegularizerApplicator] = None) -> None:
        super().__init__(vocab, regularizer)

        self.label_namespace = label_namespace
        self.text_field_embedder = text_field_embedder
        self.num_tags = self.vocab.get_vocab_size(label_namespace)
        self.encoder = encoder
        if dropout:
            self.dropout = torch.nn.Dropout(dropout)
        else:
            self.dropout = None
        self.tag_projection_layer = TimeDistributed(Linear(self.encoder.get_output_dim(),
                                                           self.num_tags))

        if constraint_type is not None:
            labels = self.vocab.get_index_to_token_vocabulary(label_namespace)
            constraints = allowed_transitions(constraint_type, labels)
        else:
            constraints = None

        self.crf = ConditionalRandomField(
                self.num_tags, constraints,
                include_start_end_transitions=include_start_end_transitions
        )

        self.span_metric = SpanBasedF1Measure(vocab,
                                              tag_namespace=label_namespace,
                                              label_encoding=constraint_type or "BIO")

        check_dimensions_match(text_field_embedder.get_output_dim(), encoder.get_input_dim(),
                               "text field embedding dim", "encoder input dim")
        initializer(self) 
開發者ID:arthurmensch,項目名稱:didyprog,代碼行數:42,代碼來源:crf_tagger.py

示例5: __init__

# 需要導入模塊: from allennlp import modules [as 別名]
# 或者: from allennlp.modules import TimeDistributed [as 別名]
def __init__(self, vocab: Vocabulary,
                 text_field_embedder: TextFieldEmbedder,
                 predictor_dropout=0.0,
                 labels_namespace: str = "labels",
                 detect_namespace: str = "d_tags",
                 verbose_metrics: bool = False,
                 label_smoothing: float = 0.0,
                 confidence: float = 0.0,
                 initializer: InitializerApplicator = InitializerApplicator(),
                 regularizer: Optional[RegularizerApplicator] = None) -> None:
        super(Seq2Labels, self).__init__(vocab, regularizer)

        self.label_namespaces = [labels_namespace,
                                 detect_namespace]
        self.text_field_embedder = text_field_embedder
        self.num_labels_classes = self.vocab.get_vocab_size(labels_namespace)
        self.num_detect_classes = self.vocab.get_vocab_size(detect_namespace)
        self.label_smoothing = label_smoothing
        self.confidence = confidence
        self.incorr_index = self.vocab.get_token_index("INCORRECT",
                                                       namespace=detect_namespace)

        self._verbose_metrics = verbose_metrics
        self.predictor_dropout = TimeDistributed(torch.nn.Dropout(predictor_dropout))

        self.tag_labels_projection_layer = TimeDistributed(
            Linear(text_field_embedder._token_embedders['bert'].get_output_dim(), self.num_labels_classes))

        self.tag_detect_projection_layer = TimeDistributed(
            Linear(text_field_embedder._token_embedders['bert'].get_output_dim(), self.num_detect_classes))

        self.metrics = {"accuracy": CategoricalAccuracy()}

        initializer(self) 
開發者ID:plkmo,項目名稱:NLP_Toolkit,代碼行數:36,代碼來源:seq2labels_model.py

示例6: forward

# 需要導入模塊: from allennlp import modules [as 別名]
# 或者: from allennlp.modules import TimeDistributed [as 別名]
def forward(
        self, tokens: torch.Tensor, mask: torch.BoolTensor = None, num_wrapping_dims: int = 0
    ):
        pooler = self.pooler
        for _ in range(num_wrapping_dims):
            from allennlp.modules import TimeDistributed

            pooler = TimeDistributed(pooler)
        pooled = pooler(tokens)
        pooled = self._dropout(pooled)
        return pooled 
開發者ID:allenai,項目名稱:allennlp,代碼行數:13,代碼來源:bert_pooler.py

示例7: test_time_distributed_reshapes_named_arg_correctly

# 需要導入模塊: from allennlp import modules [as 別名]
# 或者: from allennlp.modules import TimeDistributed [as 別名]
def test_time_distributed_reshapes_named_arg_correctly(self):
        char_embedding = Embedding(2, 2)
        char_embedding.weight = Parameter(torch.FloatTensor([[0.4, 0.4], [0.5, 0.5]]))
        distributed_embedding = TimeDistributed(char_embedding)
        char_input = torch.LongTensor([[[1, 0], [1, 1]]])
        output = distributed_embedding(char_input)
        assert_almost_equal(
            output.data.numpy(), [[[[0.5, 0.5], [0.4, 0.4]], [[0.5, 0.5], [0.5, 0.5]]]]
        ) 
開發者ID:allenai,項目名稱:allennlp,代碼行數:11,代碼來源:time_distributed_test.py

示例8: test_time_distributed_reshapes_positional_kwarg_correctly

# 需要導入模塊: from allennlp import modules [as 別名]
# 或者: from allennlp.modules import TimeDistributed [as 別名]
def test_time_distributed_reshapes_positional_kwarg_correctly(self):
        char_embedding = Embedding(2, 2)
        char_embedding.weight = Parameter(torch.FloatTensor([[0.4, 0.4], [0.5, 0.5]]))
        distributed_embedding = TimeDistributed(char_embedding)
        char_input = torch.LongTensor([[[1, 0], [1, 1]]])
        output = distributed_embedding(input=char_input)
        assert_almost_equal(
            output.data.numpy(), [[[[0.5, 0.5], [0.4, 0.4]], [[0.5, 0.5], [0.5, 0.5]]]]
        ) 
開發者ID:allenai,項目名稱:allennlp,代碼行數:11,代碼來源:time_distributed_test.py

示例9: test_time_distributed_works_with_multiple_inputs

# 需要導入模塊: from allennlp import modules [as 別名]
# 或者: from allennlp.modules import TimeDistributed [as 別名]
def test_time_distributed_works_with_multiple_inputs(self):
        module = lambda x, y: x + y
        distributed = TimeDistributed(module)
        x_input = torch.LongTensor([[[1, 2], [3, 4]]])
        y_input = torch.LongTensor([[[4, 2], [9, 1]]])
        output = distributed(x_input, y_input)
        assert_almost_equal(output.data.numpy(), [[[5, 4], [12, 5]]]) 
開發者ID:allenai,項目名稱:allennlp,代碼行數:9,代碼來源:time_distributed_test.py

示例10: __init__

# 需要導入模塊: from allennlp import modules [as 別名]
# 或者: from allennlp.modules import TimeDistributed [as 別名]
def __init__(self, vocab            ,
                 text_field_embedder                   ,
                 attend_feedforward             ,
                 similarity_function                    ,
                 compare_feedforward             ,
                 aggregate_feedforward             ,
                 premise_encoder                           = None,
                 hypothesis_encoder                           = None,
                 initializer                        = InitializerApplicator(),
                 regularizer                                  = None)        :
        super(DecomposableAttention, self).__init__(vocab, regularizer)

        self._text_field_embedder = text_field_embedder
        self._attend_feedforward = TimeDistributed(attend_feedforward)
        self._matrix_attention = LegacyMatrixAttention(similarity_function)
        self._compare_feedforward = TimeDistributed(compare_feedforward)
        self._aggregate_feedforward = aggregate_feedforward
        self._premise_encoder = premise_encoder
        self._hypothesis_encoder = hypothesis_encoder or premise_encoder

        self._num_labels = vocab.get_vocab_size(namespace=u"labels")

        check_dimensions_match(text_field_embedder.get_output_dim(), attend_feedforward.get_input_dim(),
                               u"text field embedding dim", u"attend feedforward input dim")
        check_dimensions_match(aggregate_feedforward.get_output_dim(), self._num_labels,
                               u"final output dimension", u"number of labels")

        self._accuracy = CategoricalAccuracy()
        self._loss = torch.nn.CrossEntropyLoss()

        initializer(self) 
開發者ID:plasticityai,項目名稱:magnitude,代碼行數:33,代碼來源:decomposable_attention.py

示例11: __init__

# 需要導入模塊: from allennlp import modules [as 別名]
# 或者: from allennlp.modules import TimeDistributed [as 別名]
def __init__(self, vocab            ,
                 text_field_embedder                   ,
                 encoder                ,
                 binary_feature_dim     ,
                 embedding_dropout        = 0.0,
                 initializer                        = InitializerApplicator(),
                 regularizer                                  = None,
                 label_smoothing        = None)        :
        super(SemanticRoleLabeler, self).__init__(vocab, regularizer)

        self.text_field_embedder = text_field_embedder
        self.num_classes = self.vocab.get_vocab_size(u"labels")

        # For the span based evaluation, we don't want to consider labels
        # for verb, because the verb index is provided to the model.
        self.span_metric = SpanBasedF1Measure(vocab, tag_namespace=u"labels", ignore_classes=[u"V"])

        self.encoder = encoder
        # There are exactly 2 binary features for the verb predicate embedding.
        self.binary_feature_embedding = Embedding(2, binary_feature_dim)
        self.tag_projection_layer = TimeDistributed(Linear(self.encoder.get_output_dim(),
                                                           self.num_classes))
        self.embedding_dropout = Dropout(p=embedding_dropout)
        self._label_smoothing = label_smoothing

        check_dimensions_match(text_field_embedder.get_output_dim() + binary_feature_dim,
                               encoder.get_input_dim(),
                               u"text embedding dim + verb indicator embedding dim",
                               u"encoder input dim")
        initializer(self) 
開發者ID:plasticityai,項目名稱:magnitude,代碼行數:32,代碼來源:semantic_role_labeler.py

示例12: test_time_distributed_reshapes_correctly

# 需要導入模塊: from allennlp import modules [as 別名]
# 或者: from allennlp.modules import TimeDistributed [as 別名]
def test_time_distributed_reshapes_correctly(self):
        char_embedding = Embedding(2, 2)
        char_embedding.weight = Parameter(torch.FloatTensor([[.4, .4], [.5, .5]]))
        distributed_embedding = TimeDistributed(char_embedding)
        char_input = torch.LongTensor([[[1, 0], [1, 1]]])
        output = distributed_embedding(char_input)
        assert_almost_equal(output.data.numpy(),
                            [[[[.5, .5], [.4, .4]], [[.5, .5,], [.5, .5]]]]) 
開發者ID:plasticityai,項目名稱:magnitude,代碼行數:10,代碼來源:time_distributed_test.py

示例13: __init__

# 需要導入模塊: from allennlp import modules [as 別名]
# 或者: from allennlp.modules import TimeDistributed [as 別名]
def __init__(self, vocab: Vocabulary,
                 text_field_embedder: TextFieldEmbedder,
                 seq2seq_encoder: Seq2SeqEncoder,
                 initializer: InitializerApplicator) -> None:
        super(ProLocalModel, self).__init__(vocab)

        self.text_field_embedder = text_field_embedder
        self.seq2seq_encoder = seq2seq_encoder

        self.attention_layer = \
            Attention(similarity_function=BilinearSimilarity(2 * seq2seq_encoder.get_output_dim(),
                                                             seq2seq_encoder.get_output_dim()), normalize=True)

        self.num_types = self.vocab.get_vocab_size("state_change_type_labels")
        self.aggregate_feedforward = Linear(seq2seq_encoder.get_output_dim(),
                                            self.num_types)

        self.span_metric = SpanBasedF1Measure(vocab,
                                              tag_namespace="state_change_tags")  # by default "O" is ignored in metric computation
        self.num_tags = self.vocab.get_vocab_size("state_change_tags")

        self.tag_projection_layer = TimeDistributed(Linear(self.seq2seq_encoder.get_output_dim() + 2
                                                           , self.num_tags))
        self._type_accuracy = CategoricalAccuracy()

        self.type_f1_metrics = {}
        self.type_labels_vocab = self.vocab.get_index_to_token_vocabulary("state_change_type_labels")
        for type_label in self.type_labels_vocab.values():
            self.type_f1_metrics["type_" + type_label] = F1Measure(self.vocab.get_token_index(type_label, "state_change_type_labels"))

        self._loss = torch.nn.CrossEntropyLoss()

        initializer(self) 
開發者ID:allenai,項目名稱:propara,代碼行數:35,代碼來源:prolocal_model.py

示例14: __init__

# 需要導入模塊: from allennlp import modules [as 別名]
# 或者: from allennlp.modules import TimeDistributed [as 別名]
def __init__(self, vocab: Vocabulary,
                 text_field_embedder: TextFieldEmbedder,
                 encoder: Seq2SeqEncoder,
                 calculate_span_f1: bool = None,
                 label_encoding: Optional[str] = None,
                 label_namespace: str = "labels",
                 verbose_metrics: bool = False,
                 initializer: InitializerApplicator = InitializerApplicator(),
                 regularizer: Optional[RegularizerApplicator] = None) -> None:
        super(SimpleTagger, self).__init__(vocab, regularizer)

        self.label_namespace = label_namespace
        self.text_field_embedder = text_field_embedder
        self.num_classes = self.vocab.get_vocab_size(label_namespace)
        self.encoder = encoder
        self._verbose_metrics = verbose_metrics
        self.tag_projection_layer = TimeDistributed(Linear(self.encoder.get_output_dim(),
                                                           self.num_classes))

        check_dimensions_match(text_field_embedder.get_output_dim(), encoder.get_input_dim(),
                               "text field embedding dim", "encoder input dim")

        # We keep calculate_span_f1 as a constructor argument for API consistency with
        # the CrfTagger, even it is redundant in this class
        # (label_encoding serves the same purpose).
        if calculate_span_f1 and not label_encoding:
            raise ConfigurationError("calculate_span_f1 is True, but "
                                     "no label_encoding was specified.")
        self.metrics = {
            "accuracy": CategoricalAccuracy(),
            "accuracy3": CategoricalAccuracy(top_k=3)
        }

        if calculate_span_f1 or label_encoding:
            self._f1_metric = SpanBasedF1Measure(vocab,
                                                 tag_namespace=label_namespace,
                                                 label_encoding=label_encoding)
        else:
            self._f1_metric = None

        initializer(self) 
開發者ID:DreamerDeo,項目名稱:HIT-SCIR-CoNLL2019,代碼行數:43,代碼來源:simple_tagger.py

示例15: __init__

# 需要導入模塊: from allennlp import modules [as 別名]
# 或者: from allennlp.modules import TimeDistributed [as 別名]
def __init__(self,
                 vocab: Vocabulary,
                 span_encoder: Seq2SeqEncoder,
                 reasoning_encoder: Seq2SeqEncoder,
                 input_dropout: float = 0.3,
                 hidden_dim_maxpool: int = 1024,
                 class_embs: bool = True,
                 reasoning_use_obj: bool = True,
                 reasoning_use_answer: bool = True,
                 reasoning_use_question: bool = True,
                 pool_reasoning: bool = True,
                 pool_answer: bool = True,
                 pool_question: bool = False,
                 initializer: InitializerApplicator = InitializerApplicator(),
                 ):
        super(HGL_Model, self).__init__(vocab)

        self.detector = SimpleDetector(pretrained=True, average_pool=True, semantic=class_embs, final_dim=512)
        ###################################################################################################
        self.rnn_input_dropout = TimeDistributed(InputVariationalDropout(input_dropout)) if input_dropout > 0 else None
        self.span_encoder = TimeDistributed(span_encoder)
        self.reasoning_encoder = TimeDistributed(reasoning_encoder)

        self.Graph_reasoning = Graph_reasoning(512)

        self.QAHG = BilinearMatrixAttention(
            matrix_1_dim=span_encoder.get_output_dim(),
            matrix_2_dim=span_encoder.get_output_dim(),
        )

        self.VAHG = BilinearMatrixAttention(
            matrix_1_dim=span_encoder.get_output_dim(),
            matrix_2_dim=self.detector.final_dim,
        )

        self.reasoning_use_obj = reasoning_use_obj
        self.reasoning_use_answer = reasoning_use_answer
        self.reasoning_use_question = reasoning_use_question
        self.pool_reasoning = pool_reasoning
        self.pool_answer = pool_answer
        self.pool_question = pool_question
        dim = sum([d for d, to_pool in [(reasoning_encoder.get_output_dim(), self.pool_reasoning),
                                        (span_encoder.get_output_dim(), self.pool_answer),
                                        (span_encoder.get_output_dim(), self.pool_question)] if to_pool])

        self.final_mlp = torch.nn.Sequential(
            torch.nn.Dropout(input_dropout, inplace=False),
            torch.nn.Linear(dim, hidden_dim_maxpool),
            torch.nn.ReLU(inplace=True),
            torch.nn.Dropout(input_dropout, inplace=False),
            torch.nn.Linear(hidden_dim_maxpool, 1),
        )
        self._accuracy = CategoricalAccuracy()
        self._loss = torch.nn.CrossEntropyLoss()
        initializer(self) 
開發者ID:yuweijiang,項目名稱:HGL-pytorch,代碼行數:57,代碼來源:model.py


注:本文中的allennlp.modules.TimeDistributed方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。