当前位置: 首页>>代码示例>>Python>>正文


Python nn.InitializerApplicator方法代码示例

本文整理汇总了Python中allennlp.nn.InitializerApplicator方法的典型用法代码示例。如果您正苦于以下问题:Python nn.InitializerApplicator方法的具体用法?Python nn.InitializerApplicator怎么用?Python nn.InitializerApplicator使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在allennlp.nn的用法示例。


在下文中一共展示了nn.InitializerApplicator方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from allennlp import nn [as 别名]
# 或者: from allennlp.nn import InitializerApplicator [as 别名]
def __init__(self, vocab: Vocabulary,
                 text_field_embedder: TextFieldEmbedder,
                 verbose_metrics: bool = False,
                 dropout: float = 0.2,
                 initializer: InitializerApplicator = InitializerApplicator(),
                 regularizer: Optional[RegularizerApplicator] = None,
                 ) -> None:
        super(TextClassifier, self).__init__(vocab, regularizer)

        self.text_field_embedder = text_field_embedder
        self.dropout = torch.nn.Dropout(dropout)
        self.num_classes = self.vocab.get_vocab_size("labels")
        self.classifier_feedforward = torch.nn.Linear(self.text_field_embedder.get_output_dim()  , self.num_classes)

        self.label_accuracy = CategoricalAccuracy()
        self.label_f1_metrics = {}

        self.verbose_metrics = verbose_metrics

        for i in range(self.num_classes):
            self.label_f1_metrics[vocab.get_token_from_index(index=i, namespace="labels")] = F1Measure(positive_label=i)
        self.loss = torch.nn.CrossEntropyLoss()

        initializer(self) 
开发者ID:allenai,项目名称:scibert,代码行数:26,代码来源:bert_text_classifier.py

示例2: test_forward_gives_correct_output

# 需要导入模块: from allennlp import nn [as 别名]
# 或者: from allennlp.nn import InitializerApplicator [as 别名]
def test_forward_gives_correct_output(self):
        params = Params(
            {"input_dim": 2, "output_dims": 3, "pool_sizes": 4, "dropout": 0.0, "num_layers": 2}
        )
        maxout = Maxout.from_params(params)

        constant_init = Initializer.from_params(Params({"type": "constant", "val": 1.0}))
        initializer = InitializerApplicator([(".*", constant_init)])
        initializer(maxout)

        input_tensor = torch.FloatTensor([[-3, 1]])
        output = maxout(input_tensor).data.numpy()
        assert output.shape == (1, 3)
        # This output was checked by hand
        # The output of the first maxout layer is [-1, -1, -1], since the
        # matrix multiply gives us [-2]*12. Reshaping and maxing
        # produces [-2, -2, -2] and the bias increments these values.
        # The second layer output is [-2, -2, -2], since the matrix
        # matrix multiply gives us [-3]*12. Reshaping and maxing
        # produces [-3, -3, -3] and the bias increments these values.
        assert_almost_equal(output, [[-2, -2, -2]]) 
开发者ID:allenai,项目名称:allennlp,代码行数:23,代码来源:maxout_test.py

示例3: __init__

# 需要导入模块: from allennlp import nn [as 别名]
# 或者: from allennlp.nn import InitializerApplicator [as 别名]
def __init__(self, vocab            ,
                 text_field_embedder                   ,
                 encoder                ,
                 initializer                        = InitializerApplicator(),
                 regularizer                                  = None)        :
        super(SimpleTagger, self).__init__(vocab, regularizer)

        self.text_field_embedder = text_field_embedder
        self.num_classes = self.vocab.get_vocab_size(u"labels")
        self.encoder = encoder
        self.tag_projection_layer = TimeDistributed(Linear(self.encoder.get_output_dim(),
                                                           self.num_classes))

        check_dimensions_match(text_field_embedder.get_output_dim(), encoder.get_input_dim(),
                               u"text field embedding dim", u"encoder input dim")
        self.metrics = {
                u"accuracy": CategoricalAccuracy(),
                u"accuracy3": CategoricalAccuracy(top_k=3)
        }

        initializer(self)

    #overrides 
开发者ID:plasticityai,项目名称:magnitude,代码行数:25,代码来源:simple_tagger.py

示例4: test_forward_gives_correct_output

# 需要导入模块: from allennlp import nn [as 别名]
# 或者: from allennlp.nn import InitializerApplicator [as 别名]
def test_forward_gives_correct_output(self):
        params = Params({
                u'input_dim': 2,
                u'output_dims': 3,
                u'pool_sizes': 4,
                u'dropout': 0.0,
                u'num_layers': 2
                })
        maxout = Maxout.from_params(params)

        constant_init = lambda tensor: torch.nn.init.constant_(tensor, 1.)
        initializer = InitializerApplicator([(u".*", constant_init)])
        initializer(maxout)

        input_tensor = torch.FloatTensor([[-3, 1]])
        output = maxout(input_tensor).data.numpy()
        assert output.shape == (1, 3)
        # This output was checked by hand
        # The output of the first maxout layer is [-1, -1, -1], since the
        # matrix multiply gives us [-2]*12. Reshaping and maxing
        # produces [-2, -2, -2] and the bias increments these values.
        # The second layer output is [-2, -2, -2], since the matrix
        # matrix multiply gives us [-3]*12. Reshaping and maxing
        # produces [-3, -3, -3] and the bias increments these values.
        assert_almost_equal(output, [[-2, -2, -2]]) 
开发者ID:plasticityai,项目名称:magnitude,代码行数:27,代码来源:maxout_test.py

示例5: __init__

# 需要导入模块: from allennlp import nn [as 别名]
# 或者: from allennlp.nn import InitializerApplicator [as 别名]
def __init__(self, vocab: Vocabulary,
                 input_dim: int,
                 num_classes: int,
                 label_namespace: str = "labels",
                 feedforward: Optional[FeedForward] = None,
                 dropout: Optional[float] = None,
                 verbose_metrics: bool = False,
                 initializer: InitializerApplicator = InitializerApplicator(),
                 regularizer: Optional[RegularizerApplicator] = None) -> None:
        super().__init__(vocab, regularizer)
        self.label_namespace = label_namespace
        self.input_dim = input_dim
        self.num_classes = num_classes 
        self._verbose_metrics = verbose_metrics
        if dropout:
            self.dropout = torch.nn.Dropout(dropout)
        else:
            self.dropout = None
        self._feedforward = feedforward

        if self._feedforward is not None: 
            self.projection_layer = Linear(feedforward.get_output_dim(), self.num_classes)
        else:
            self.projection_layer = Linear(self.input_dim, self.num_classes)

        self.metrics = {
                "accuracy": CategoricalAccuracy(),
                "accuracy3": CategoricalAccuracy(top_k=3),
                "accuracy5": CategoricalAccuracy(top_k=5)
        }
        self._loss = torch.nn.CrossEntropyLoss()

        initializer(self) 
开发者ID:ConvLab,项目名称:ConvLab,代码行数:35,代码来源:model.py

示例6: __init__

# 需要导入模块: from allennlp import nn [as 别名]
# 或者: from allennlp.nn import InitializerApplicator [as 别名]
def __init__(self, vocab: Vocabulary,
                 text_field_embedder: TextFieldEmbedder,
                 text_encoder: Seq2SeqEncoder,
                 classifier_feedforward: FeedForward,
                 verbose_metrics: False,
                 initializer: InitializerApplicator = InitializerApplicator(),
                 regularizer: Optional[RegularizerApplicator] = None,
                 ) -> None:
        super(TextClassifier, self).__init__(vocab, regularizer)

        self.text_field_embedder = text_field_embedder
        self.num_classes = self.vocab.get_vocab_size("labels")
        self.text_encoder = text_encoder
        self.classifier_feedforward = classifier_feedforward
        self.prediction_layer = torch.nn.Linear(self.classifier_feedforward.get_output_dim()  , self.num_classes)

        self.label_accuracy = CategoricalAccuracy()
        self.label_f1_metrics = {}

        self.verbose_metrics = verbose_metrics

        for i in range(self.num_classes):
            self.label_f1_metrics[vocab.get_token_from_index(index=i, namespace="labels")] = F1Measure(positive_label=i)
        self.loss = torch.nn.CrossEntropyLoss()

        self.pool = lambda text, mask: util.get_final_encoder_states(text, mask, bidirectional=True)

        initializer(self) 
开发者ID:allenai,项目名称:scibert,代码行数:30,代码来源:text_classifier.py

示例7: __init__

# 需要导入模块: from allennlp import nn [as 别名]
# 或者: from allennlp.nn import InitializerApplicator [as 别名]
def __init__(self, vocab: Vocabulary,
                 text_field_embedder: TextFieldEmbedder,
                 encoder: Seq2SeqEncoder,
                 include_start_end_transitions: bool = True,
                 dropout: Optional[float] = None,
                 initializer: InitializerApplicator = InitializerApplicator(),
                 regularizer: Optional[RegularizerApplicator] = None) -> None:
        super().__init__(vocab, regularizer)

        self.label_namespace = 'labels'
        self.num_tags = self.vocab.get_vocab_size(self.label_namespace)

        # encode text
        self.text_field_embedder = text_field_embedder
        self.encoder = encoder
        self.dropout = torch.nn.Dropout(dropout) if dropout else None

        # crf
        output_dim = self.encoder.get_output_dim()
        self.tag_projection_layer = TimeDistributed(Linear(output_dim, self.num_tags))
        self.crf = ConditionalRandomField(self.num_tags, constraints=None, include_start_end_transitions=include_start_end_transitions)

        self.metrics = {
            "accuracy": CategoricalAccuracy(),
            "accuracy3": CategoricalAccuracy(top_k=3)
        }
        for index, label in self.vocab.get_index_to_token_vocabulary(self.label_namespace).items():
            self.metrics['F1_' + label] = F1Measure(positive_label=index)

        initializer(self) 
开发者ID:allenai,项目名称:scibert,代码行数:32,代码来源:pico_crf_tagger.py

示例8: __init__

# 需要导入模块: from allennlp import nn [as 别名]
# 或者: from allennlp.nn import InitializerApplicator [as 别名]
def __init__(self, vocab: Vocabulary,
                 text_field_embedder: TextFieldEmbedder,
                 predictor_dropout=0.0,
                 labels_namespace: str = "labels",
                 detect_namespace: str = "d_tags",
                 verbose_metrics: bool = False,
                 label_smoothing: float = 0.0,
                 confidence: float = 0.0,
                 initializer: InitializerApplicator = InitializerApplicator(),
                 regularizer: Optional[RegularizerApplicator] = None) -> None:
        super(Seq2Labels, self).__init__(vocab, regularizer)

        self.label_namespaces = [labels_namespace,
                                 detect_namespace]
        self.text_field_embedder = text_field_embedder
        self.num_labels_classes = self.vocab.get_vocab_size(labels_namespace)
        self.num_detect_classes = self.vocab.get_vocab_size(detect_namespace)
        self.label_smoothing = label_smoothing
        self.confidence = confidence
        self.incorr_index = self.vocab.get_token_index("INCORRECT",
                                                       namespace=detect_namespace)

        self._verbose_metrics = verbose_metrics
        self.predictor_dropout = TimeDistributed(torch.nn.Dropout(predictor_dropout))

        self.tag_labels_projection_layer = TimeDistributed(
            Linear(text_field_embedder._token_embedders['bert'].get_output_dim(), self.num_labels_classes))

        self.tag_detect_projection_layer = TimeDistributed(
            Linear(text_field_embedder._token_embedders['bert'].get_output_dim(), self.num_detect_classes))

        self.metrics = {"accuracy": CategoricalAccuracy()}

        initializer(self) 
开发者ID:plkmo,项目名称:NLP_Toolkit,代码行数:36,代码来源:seq2labels_model.py

示例9: test_augmented_lstm_computes_same_function_as_pytorch_lstm

# 需要导入模块: from allennlp import nn [as 别名]
# 或者: from allennlp.nn import InitializerApplicator [as 别名]
def test_augmented_lstm_computes_same_function_as_pytorch_lstm(self):
        augmented_lstm = AugmentedLstm(10, 11)
        pytorch_lstm = LSTM(10, 11, num_layers=1, batch_first=True)
        # Initialize all weights to be == 1.
        constant_init = Initializer.from_params(Params({"type": "constant", "val": 1.0}))
        initializer = InitializerApplicator([(".*", constant_init)])
        initializer(augmented_lstm)
        initializer(pytorch_lstm)

        initial_state = torch.zeros([1, 5, 11])
        initial_memory = torch.zeros([1, 5, 11])

        # Use bigger numbers to avoid floating point instability.
        sorted_tensor, sorted_sequence, _, _ = sort_batch_by_length(
            self.random_tensor * 5.0, self.sequence_lengths
        )
        lstm_input = pack_padded_sequence(
            sorted_tensor, sorted_sequence.data.tolist(), batch_first=True
        )

        augmented_output, augmented_state = augmented_lstm(
            lstm_input, (initial_state, initial_memory)
        )
        pytorch_output, pytorch_state = pytorch_lstm(lstm_input, (initial_state, initial_memory))
        pytorch_output_sequence, _ = pad_packed_sequence(pytorch_output, batch_first=True)
        augmented_output_sequence, _ = pad_packed_sequence(augmented_output, batch_first=True)

        numpy.testing.assert_array_almost_equal(
            pytorch_output_sequence.data.numpy(), augmented_output_sequence.data.numpy(), decimal=4
        )
        numpy.testing.assert_array_almost_equal(
            pytorch_state[0].data.numpy(), augmented_state[0].data.numpy(), decimal=4
        )
        numpy.testing.assert_array_almost_equal(
            pytorch_state[1].data.numpy(), augmented_state[1].data.numpy(), decimal=4
        ) 
开发者ID:allenai,项目名称:allennlp,代码行数:38,代码来源:augmented_lstm_test.py

示例10: test_forward_gives_correct_output

# 需要导入模块: from allennlp import nn [as 别名]
# 或者: from allennlp.nn import InitializerApplicator [as 别名]
def test_forward_gives_correct_output(self):
        params = Params({"input_dim": 2, "hidden_dims": 3, "activations": "relu", "num_layers": 2})
        feedforward = FeedForward.from_params(params)

        constant_init = Initializer.from_params(Params({"type": "constant", "val": 1.0}))
        initializer = InitializerApplicator([(".*", constant_init)])
        initializer(feedforward)

        input_tensor = torch.FloatTensor([[-3, 1]])
        output = feedforward(input_tensor).data.numpy()
        assert output.shape == (1, 3)
        # This output was checked by hand - ReLU makes output after first hidden layer [0, 0, 0],
        # which then gets a bias added in the second layer to be [1, 1, 1].
        assert_almost_equal(output, [[1, 1, 1]]) 
开发者ID:allenai,项目名称:allennlp,代码行数:16,代码来源:feedforward_test.py

示例11: setup_method

# 需要导入模块: from allennlp import nn [as 别名]
# 或者: from allennlp.nn import InitializerApplicator [as 别名]
def setup_method(self):
        super().setup_method()
        self.vocab = Vocabulary()
        self.vocab.add_token_to_namespace("1", "token_characters")
        self.vocab.add_token_to_namespace("2", "token_characters")
        self.vocab.add_token_to_namespace("3", "token_characters")
        self.vocab.add_token_to_namespace("4", "token_characters")
        params = Params(
            {
                "embedding": {"embedding_dim": 2, "vocab_namespace": "token_characters"},
                "encoder": {
                    "type": "cnn",
                    "embedding_dim": 2,
                    "num_filters": 4,
                    "ngram_filter_sizes": [1, 2],
                    "output_dim": 3,
                },
            }
        )
        self.encoder = TokenCharactersEncoder.from_params(vocab=self.vocab, params=deepcopy(params))
        self.embedding = Embedding.from_params(vocab=self.vocab, params=params["embedding"])
        self.inner_encoder = Seq2VecEncoder.from_params(params["encoder"])
        constant_init = Initializer.from_params(Params({"type": "constant", "val": 1.0}))
        initializer = InitializerApplicator([(".*", constant_init)])
        initializer(self.encoder)
        initializer(self.embedding)
        initializer(self.inner_encoder) 
开发者ID:allenai,项目名称:allennlp,代码行数:29,代码来源:token_characters_encoder_test.py

示例12: test_forward_does_correct_computation

# 需要导入模块: from allennlp import nn [as 别名]
# 或者: from allennlp.nn import InitializerApplicator [as 别名]
def test_forward_does_correct_computation(self):
        encoder = CnnEncoder(embedding_dim=2, num_filters=1, ngram_filter_sizes=(1, 2))
        constant_init = Initializer.from_params(Params({"type": "constant", "val": 1.0}))
        initializer = InitializerApplicator([(".*", constant_init)])
        initializer(encoder)
        input_tensor = torch.FloatTensor([[[0.7, 0.8], [0.1, 1.5]]])
        encoder_output = encoder(input_tensor, None)
        assert_almost_equal(
            encoder_output.data.numpy(), numpy.asarray([[1.6 + 1.0, 3.1 + 1.0]]), decimal=6
        ) 
开发者ID:allenai,项目名称:allennlp,代码行数:12,代码来源:cnn_encoder_test.py

示例13: test_l1_regularization

# 需要导入模块: from allennlp import nn [as 别名]
# 或者: from allennlp.nn import InitializerApplicator [as 别名]
def test_l1_regularization(self):
        model = torch.nn.Sequential(torch.nn.Linear(5, 10), torch.nn.Linear(10, 5))
        constant_init = Initializer.from_params(Params({"type": "constant", "val": -1}))
        initializer = InitializerApplicator([(".*", constant_init)])
        initializer(model)
        value = RegularizerApplicator([("", L1Regularizer(1.0))])(model)
        # 115 because of biases.
        assert value.data.numpy() == 115.0 
开发者ID:allenai,项目名称:allennlp,代码行数:10,代码来源:regularizers_test.py

示例14: test_l2_regularization

# 需要导入模块: from allennlp import nn [as 别名]
# 或者: from allennlp.nn import InitializerApplicator [as 别名]
def test_l2_regularization(self):
        model = torch.nn.Sequential(torch.nn.Linear(5, 10), torch.nn.Linear(10, 5))
        constant_init = Initializer.from_params(Params({"type": "constant", "val": 0.5}))
        initializer = InitializerApplicator([(".*", constant_init)])
        initializer(model)
        value = RegularizerApplicator([("", L2Regularizer(1.0))])(model)
        assert value.data.numpy() == 28.75 
开发者ID:allenai,项目名称:allennlp,代码行数:9,代码来源:regularizers_test.py

示例15: test_frozen_params

# 需要导入模块: from allennlp import nn [as 别名]
# 或者: from allennlp.nn import InitializerApplicator [as 别名]
def test_frozen_params(self):
        model = torch.nn.Sequential(torch.nn.Linear(5, 10), torch.nn.Linear(10, 5))
        constant_init = Initializer.from_params(Params({"type": "constant", "val": -1}))
        initializer = InitializerApplicator([(".*", constant_init)])
        initializer(model)
        # freeze the parameters of the first linear
        for name, param in model.named_parameters():
            if re.search(r"0.*$", name):
                param.requires_grad = False
        value = RegularizerApplicator([("", L1Regularizer(1.0))])(model)
        # 55 because of bias (5*10 + 5)
        assert value.data.numpy() == 55 
开发者ID:allenai,项目名称:allennlp,代码行数:14,代码来源:regularizers_test.py


注:本文中的allennlp.nn.InitializerApplicator方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。