當前位置: 首頁>>代碼示例>>Python>>正文


Python params.Params方法代碼示例

本文整理匯總了Python中allennlp.common.params.Params方法的典型用法代碼示例。如果您正苦於以下問題:Python params.Params方法的具體用法?Python params.Params怎麽用?Python params.Params使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在allennlp.common.params的用法示例。


在下文中一共展示了params.Params方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: test_from_params_extend_config

# 需要導入模塊: from allennlp.common import params [as 別名]
# 或者: from allennlp.common.params import Params [as 別名]
def test_from_params_extend_config(self):

        vocab_dir = self.TEST_DIR / "vocab_save"
        original_vocab = Vocabulary(non_padded_namespaces=["tokens"])
        original_vocab.add_token_to_namespace("a", namespace="tokens")
        original_vocab.save_to_files(vocab_dir)

        text_field = TextField(
            [Token(t) for t in ["a", "b"]], {"tokens": SingleIdTokenIndexer("tokens")}
        )
        instances = Batch([Instance({"text": text_field})])

        # If you ask to extend vocab from `directory`, instances must be passed
        # in Vocabulary constructor, or else there is nothing to extend to.
        params = Params({"type": "extend", "directory": vocab_dir})
        with pytest.raises(ConfigurationError):
            _ = Vocabulary.from_params(params)

        # If you ask to extend vocab, `directory` key must be present in params,
        # or else there is nothing to extend from.
        params = Params({"type": "extend"})
        with pytest.raises(ConfigurationError):
            _ = Vocabulary.from_params(params, instances=instances) 
開發者ID:allenai,項目名稱:allennlp,代碼行數:25,代碼來源:vocabulary_test.py

示例2: test_max_vocab_size_partial_dict

# 需要導入模塊: from allennlp.common import params [as 別名]
# 或者: from allennlp.common.params import Params [as 別名]
def test_max_vocab_size_partial_dict(self):
        indexers = {
            "tokens": SingleIdTokenIndexer(),
            "token_characters": TokenCharactersIndexer(min_padding_length=3),
        }
        instance = Instance(
            {
                "text": TextField(
                    [Token(w) for w in "Abc def ghi jkl mno pqr stu vwx yz".split(" ")], indexers
                )
            }
        )
        dataset = Batch([instance])
        params = Params({"max_vocab_size": {"tokens": 1}})

        vocab = Vocabulary.from_params(params=params, instances=dataset)
        assert len(vocab.get_index_to_token_vocabulary("tokens").values()) == 3  # 1 + 2
        assert len(vocab.get_index_to_token_vocabulary("token_characters").values()) == 28  # 26 + 2 
開發者ID:allenai,項目名稱:allennlp,代碼行數:20,代碼來源:vocabulary_test.py

示例3: test_custom_padding_oov_tokens

# 需要導入模塊: from allennlp.common import params [as 別名]
# 或者: from allennlp.common.params import Params [as 別名]
def test_custom_padding_oov_tokens(self):
        vocab = Vocabulary(oov_token="[UNK]")
        assert vocab._oov_token == "[UNK]"
        assert vocab._padding_token == "@@PADDING@@"

        vocab = Vocabulary(padding_token="[PAD]")
        assert vocab._oov_token == "@@UNKNOWN@@"
        assert vocab._padding_token == "[PAD]"

        vocab_dir = self.TEST_DIR / "vocab_save"
        vocab = Vocabulary(oov_token="<UNK>")
        vocab.add_tokens_to_namespace(["a0", "a1", "a2"], namespace="a")
        vocab.save_to_files(vocab_dir)

        params = Params({"type": "from_files", "directory": vocab_dir, "oov_token": "<UNK>"})
        vocab = Vocabulary.from_params(params)

        with pytest.raises(AssertionError) as excinfo:
            vocab = Vocabulary.from_params(Params({"type": "from_files", "directory": vocab_dir}))

        assert "OOV token not found!" in str(excinfo.value) 
開發者ID:allenai,項目名稱:allennlp,代碼行數:23,代碼來源:vocabulary_test.py

示例4: test_from_params

# 需要導入模塊: from allennlp.common import params [as 別名]
# 或者: from allennlp.common.params import Params [as 別名]
def test_from_params(self):
        params = Params({"type": "pretrained", "weights_file_path": self.temp_file})
        initializer = Initializer.from_params(params)
        assert initializer.weights
        assert initializer.parameter_name_overrides == {}

        name_overrides = {"a": "b", "c": "d"}
        params = Params(
            {
                "type": "pretrained",
                "weights_file_path": self.temp_file,
                "parameter_name_overrides": name_overrides,
            }
        )
        initializer = Initializer.from_params(params)
        assert initializer.weights
        assert initializer.parameter_name_overrides == name_overrides 
開發者ID:allenai,項目名稱:allennlp,代碼行數:19,代碼來源:pretrained_model_initializer_test.py

示例5: test_exponential_works_properly

# 需要導入模塊: from allennlp.common import params [as 別名]
# 或者: from allennlp.common.params import Params [as 別名]
def test_exponential_works_properly(self):
        scheduler = LearningRateScheduler.from_params(
            optimizer=Optimizer.from_params(
                model_parameters=self.model.named_parameters(),
                params=Params({"type": "sgd", "lr": 1.0}),
            ),
            params=Params({"type": "exponential", "gamma": 0.5}),
        )
        optimizer = scheduler.lr_scheduler.optimizer
        optimizer.step()  # to avoid a pytorch warning
        # Initial learning rate should be unchanged for first epoch.
        assert optimizer.param_groups[0]["lr"] == 1.0
        scheduler.step()
        assert optimizer.param_groups[0]["lr"] == 0.5
        scheduler.step()
        assert optimizer.param_groups[0]["lr"] == 0.5 ** 2
        scheduler.step()
        assert optimizer.param_groups[0]["lr"] == 0.5 ** 3 
開發者ID:allenai,項目名稱:allennlp,代碼行數:20,代碼來源:learning_rate_scheduler_test.py

示例6: setup_method

# 需要導入模塊: from allennlp.common import params [as 別名]
# 或者: from allennlp.common.params import Params [as 別名]
def setup_method(self):
        super().setup_method()
        self.instances = SequenceTaggingDatasetReader().read(
            self.FIXTURES_ROOT / "data" / "sequence_tagging.tsv"
        )
        self.vocab = Vocabulary.from_instances(self.instances)
        self.model_params = Params(
            {
                "text_field_embedder": {
                    "token_embedders": {
                        "tokens": {"type": "embedding", "embedding_dim": 5, "sparse": True}
                    }
                },
                "encoder": {"type": "lstm", "input_size": 5, "hidden_size": 7, "num_layers": 2},
            }
        )
        self.model = SimpleTagger.from_params(vocab=self.vocab, params=self.model_params) 
開發者ID:allenai,項目名稱:allennlp,代碼行數:19,代碼來源:optimizer_test.py

示例7: test_registered_subclass

# 需要導入模塊: from allennlp.common import params [as 別名]
# 或者: from allennlp.common.params import Params [as 別名]
def test_registered_subclass(self):
        """
        Tests that registering Checkpointer subclasses works correctly.
        """

        @Checkpointer.register("checkpointer_subclass")
        class CheckpointerSubclass(Checkpointer):
            def __init__(self, x: int, y: int) -> None:
                super().__init__()
                self.x = x
                self.y = y

        sub_inst = Checkpointer.from_params(
            Params({"type": "checkpointer_subclass", "x": 1, "y": 3})
        )
        assert sub_inst.__class__ == CheckpointerSubclass
        assert sub_inst.x == 1 and sub_inst.y == 3 
開發者ID:allenai,項目名稱:allennlp,代碼行數:19,代碼來源:checkpointer_test.py

示例8: test_all_datasets_read_for_vocab

# 需要導入模塊: from allennlp.common import params [as 別名]
# 或者: from allennlp.common.params import Params [as 別名]
def test_all_datasets_read_for_vocab(self, caplog):
        params = Params(
            {
                "dataset_reader": {"type": "train-util-test-reader"},
                "train_data_path": "path-to-training-file",
                "validation_data_path": "path-to-validation-file",
                "test_data_path": "path-to-test-file",
            }
        )
        _ = make_vocab_from_params(params, str(self.TEST_DIR))
        log_messages = "\n".join([rec.message for rec in caplog.records])
        assert "...train-util-test-reader reading from path-to-training-file" in log_messages
        assert "...train-util-test-reader reading from path-to-validation-file" in log_messages
        assert "...train-util-test-reader reading from path-to-test-file" in log_messages
        assert "Reading training data" in log_messages
        assert "Reading validation data" in log_messages
        assert "Reading test data" in log_messages 
開發者ID:allenai,項目名稱:allennlp,代碼行數:19,代碼來源:util_test.py

示例9: test_from_params_extend_config

# 需要導入模塊: from allennlp.common import params [as 別名]
# 或者: from allennlp.common.params import Params [as 別名]
def test_from_params_extend_config(self):

        vocab_dir = self.TEST_DIR / u'vocab_save'
        original_vocab = Vocabulary(non_padded_namespaces=[u"tokens"])
        original_vocab.add_token_to_namespace(u"a", namespace=u"tokens")
        original_vocab.save_to_files(vocab_dir)

        text_field = TextField([Token(t) for t in [u"a", u"b"]],
                               {u"tokens": SingleIdTokenIndexer(u"tokens")})
        instances = Batch([Instance({u"text": text_field})])

        # If you ask to extend vocab from `directory_path`, instances must be passed
        # in Vocabulary constructor, or else there is nothing to extend to.
        params = Params({u"directory_path": vocab_dir, u"extend": True})
        with pytest.raises(ConfigurationError):
            _ = Vocabulary.from_params(params)

        # If you ask to extend vocab, `directory_path` key must be present in params,
        # or else there is nothing to extend from.
        params = Params({u"extend": True})
        with pytest.raises(ConfigurationError):
            _ = Vocabulary.from_params(params, instances) 
開發者ID:plasticityai,項目名稱:magnitude,代碼行數:24,代碼來源:vocabulary_test.py

示例10: test_registrability

# 需要導入模塊: from allennlp.common import params [as 別名]
# 或者: from allennlp.common.params import Params [as 別名]
def test_registrability(self):

        class MyVocabulary(object):
            @classmethod
            def from_params(cls, params, instances=None):
                # pylint: disable=unused-argument
                return MyVocabulary()


                MyVocabulary = Vocabulary.register(u'my-vocabulary')(MyVocabulary)

        params = Params({u'type': u'my-vocabulary'})

        instance = Instance(fields={})

        vocab = Vocabulary.from_params(params=params, instances=[instance])

        assert isinstance(vocab, MyVocabulary) 
開發者ID:plasticityai,項目名稱:magnitude,代碼行數:20,代碼來源:vocabulary_test.py

示例11: setUp

# 需要導入模塊: from allennlp.common import params [as 別名]
# 或者: from allennlp.common.params import Params [as 別名]
def setUp(self):
        super(TestOptimizer, self).setUp()
        self.instances = SequenceTaggingDatasetReader().read(self.FIXTURES_ROOT / u'data' / u'sequence_tagging.tsv')
        vocab = Vocabulary.from_instances(self.instances)
        self.model_params = Params({
                u"text_field_embedder": {
                        u"tokens": {
                                u"type": u"embedding",
                                u"embedding_dim": 5
                                }
                        },
                u"encoder": {
                        u"type": u"lstm",
                        u"input_size": 5,
                        u"hidden_size": 7,
                        u"num_layers": 2
                        }
                })
        self.model = SimpleTagger.from_params(vocab=vocab, params=self.model_params) 
開發者ID:plasticityai,項目名稱:magnitude,代碼行數:21,代碼來源:optimizer_test.py

示例12: setUp

# 需要導入模塊: from allennlp.common import params [as 別名]
# 或者: from allennlp.common.params import Params [as 別名]
def setUp(self):
        super(TestTrainer, self).setUp()
        self.instances = SequenceTaggingDatasetReader().read(self.FIXTURES_ROOT / u'data' / u'sequence_tagging.tsv')
        vocab = Vocabulary.from_instances(self.instances)
        self.vocab = vocab
        self.model_params = Params({
                u"text_field_embedder": {
                        u"tokens": {
                                u"type": u"embedding",
                                u"embedding_dim": 5
                                }
                        },
                u"encoder": {
                        u"type": u"lstm",
                        u"input_size": 5,
                        u"hidden_size": 7,
                        u"num_layers": 2
                        }
                })
        self.model = SimpleTagger.from_params(vocab=self.vocab, params=self.model_params)
        self.optimizer = torch.optim.SGD(self.model.parameters(), 0.01)
        self.iterator = BasicIterator(batch_size=2)
        self.iterator.index_with(vocab) 
開發者ID:plasticityai,項目名稱:magnitude,代碼行數:25,代碼來源:trainer_test.py

示例13: remove_pretrained_embedding_params

# 需要導入模塊: from allennlp.common import params [as 別名]
# 或者: from allennlp.common.params import Params [as 別名]
def remove_pretrained_embedding_params(params: Params):
    if isinstance(params, Params):  # The model could possibly be a string, for example.
        keys = params.keys()
        if "pretrained_file" in keys:
            del params["pretrained_file"]
        for value in params.values():
            if isinstance(value, Params):
                remove_pretrained_embedding_params(value) 
開發者ID:allenai,項目名稱:allennlp,代碼行數:10,代碼來源:model.py

示例14: prepare_environment

# 需要導入模塊: from allennlp.common import params [as 別名]
# 或者: from allennlp.common.params import Params [as 別名]
def prepare_environment(params: Params):
    """
    Sets random seeds for reproducible experiments. This may not work as expected
    if you use this from within a python project in which you have already imported Pytorch.
    If you use the scripts/run_model.py entry point to training models with this library,
    your experiments should be reasonably reproducible. If you are using this from your own
    project, you will want to call this function before importing Pytorch. Complete determinism
    is very difficult to achieve with libraries doing optimized linear algebra due to massively
    parallel execution, which is exacerbated by using GPUs.

    # Parameters

    params: `Params`
        A `Params` object or dict holding the json parameters.
    """
    seed = params.pop_int("random_seed", 13370)
    numpy_seed = params.pop_int("numpy_seed", 1337)
    torch_seed = params.pop_int("pytorch_seed", 133)

    if seed is not None:
        random.seed(seed)
    if numpy_seed is not None:
        numpy.random.seed(numpy_seed)
    if torch_seed is not None:
        torch.manual_seed(torch_seed)
        # Seed all GPUs with the same seed if available.
        if torch.cuda.is_available():
            torch.cuda.manual_seed_all(torch_seed)

    log_pytorch_version_info() 
開發者ID:allenai,項目名稱:allennlp,代碼行數:32,代碼來源:util.py

示例15: test_stacked_bidirectional_lstm_can_build_from_params

# 需要導入模塊: from allennlp.common import params [as 別名]
# 或者: from allennlp.common.params import Params [as 別名]
def test_stacked_bidirectional_lstm_can_build_from_params(self):
        params = Params(
            {
                "type": "stacked_bidirectional_lstm",
                "input_size": 5,
                "hidden_size": 9,
                "num_layers": 3,
            }
        )
        encoder = Seq2SeqEncoder.from_params(params)

        assert encoder.get_input_dim() == 5
        assert encoder.get_output_dim() == 18
        assert encoder.is_bidirectional 
開發者ID:allenai,項目名稱:allennlp,代碼行數:16,代碼來源:stacked_bidirectional_lstm_test.py


注:本文中的allennlp.common.params.Params方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。