当前位置: 首页>>代码示例>>Python>>正文


Python common.Params方法代码示例

本文整理汇总了Python中allennlp.common.Params方法的典型用法代码示例。如果您正苦于以下问题:Python common.Params方法的具体用法?Python common.Params怎么用?Python common.Params使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在allennlp.common的用法示例。


在下文中一共展示了common.Params方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: from_params

# 需要导入模块: from allennlp import common [as 别名]
# 或者: from allennlp.common import Params [as 别名]
def from_params(cls, params: Params) -> 'ArcMultiChoiceWithFactsTextJsonReaderMultiSource':
        # read tokenizers
        field_tokenizers = tokenizer_dict_from_params(params.get('tokenizers', {}))
        token_indexers = token_indexer_dict_from_params(params.get('token_indexers', {}))

        # external knowledge
        external_knowledge_params = params.pop('external_knowledge')

        choice_value_type = params.get('choice_value_type', None)
        question_value_type = params.get('question_value_type', None)

        no_relevant_fact_add = params.get('no_relevant_fact_add', False)
        no_relevant_fact_text = params.get('no_relevant_fact_text', NO_RELEVANT_FACT_TEXT)

        lazy = params.pop('lazy', False)
        # params.assert_empty(cls.__name__)

        return ArcMultiChoiceWithFactsTextJsonReaderMultiSource(field_tokenizers=field_tokenizers,
                                                                token_indexers=token_indexers,
                                                                external_know_config=external_knowledge_params,
                                                                choice_value_type=choice_value_type,
                                                                question_value_type=question_value_type,
                                                                no_relevant_fact_add=no_relevant_fact_add,
                                                                no_relevant_fact_text=no_relevant_fact_text,
                                                                lazy=lazy) 
开发者ID:allenai,项目名称:OpenBookQA,代码行数:27,代码来源:arc_multichoice_with_facts_text_json_reader_multi_source.py

示例2: tokenizer_dict_from_params

# 需要导入模块: from allennlp import common [as 别名]
# 或者: from allennlp.common import Params [as 别名]
def tokenizer_dict_from_params(params: Params) -> 'Dict[str, Tokenizer]':  # type: ignore
    """
    ``Tokenizer`` can be used in a dictionary, with each ``Tokenizer`` getting a
    name.  The specification for this in a ``Params`` object is typically ``{"name" ->
    {tokenizer_params}}``.  This method reads that whole set of parameters and returns a
    dictionary suitable for use in a ``TextField``.

    Because default values for token indexers are typically handled in the calling class to
    this and are based on checking for ``None``, if there were no parameters specifying any
    tokenizers in the given ``params``, we return ``None`` instead of an empty dictionary.
    """
    tokenizers = {}
    for name, indexer_params in params.items():
        tokenizers[name] = Tokenizer.from_params(indexer_params)
    if tokenizers == {}:
        tokenizers = None
    return tokenizers 
开发者ID:allenai,项目名称:OpenBookQA,代码行数:19,代码来源:common.py

示例3: token_indexer_dict_from_params

# 需要导入模块: from allennlp import common [as 别名]
# 或者: from allennlp.common import Params [as 别名]
def token_indexer_dict_from_params(params: Params) -> 'Dict[str, TokenIndexer]':  # type: ignore
    """
    We typically use ``TokenIndexers`` in a dictionary, with each ``TokenIndexer`` getting a
    name.  The specification for this in a ``Params`` object is typically ``{"name" ->
    {indexer_params}}``.  This method reads that whole set of parameters and returns a
    dictionary suitable for use in a ``TextField``.

    Because default values for token indexers are typically handled in the calling class to
    this and are based on checking for ``None``, if there were no parameters specifying any
    token indexers in the given ``params``, we return ``None`` instead of an empty dictionary.
    """
    token_indexers = {}
    for name, indexer_params in params.items():
        token_indexers[name] = TokenIndexer.from_params(indexer_params)
    if token_indexers == {}:
        token_indexers = None
    return token_indexers 
开发者ID:allenai,项目名称:OpenBookQA,代码行数:19,代码来源:common.py

示例4: update_params

# 需要导入模块: from allennlp import common [as 别名]
# 或者: from allennlp.common import Params [as 别名]
def update_params(params: Params,
                  update_dict:Dict[str, Any],
                  update_if_exists = False):
    """
    Updates AllenNLP Params object. This is used to automatically parameters of components and then pass them to
    from_params method
    :param params: The params object to update
    :param update_dict: The parameters to update. This is a flat dictionary of {"key1->key2":"value"}
    that updates the params hierarchically where key1 is a parent of key2
    :param update_if_exists: If we want to update the parameter only if it exists.
    :return:
    """
    params_dict = params.as_dict()

    if not update_if_exists:
        params_dict.update(update_dict)
    else:
        for k,v in update_dict.items():
            if k in params_dict:
                params_dict[k] = v
            elif "->" in k:
                try_set_val_by_hier_key(params_dict, clean_split(k, "->"), v, False)

    return Params(params_dict) 
开发者ID:allenai,项目名称:OpenBookQA,代码行数:26,代码来源:common_utils.py

示例5: from_params

# 需要导入模块: from allennlp import common [as 别名]
# 或者: from allennlp.common import Params [as 别名]
def from_params(cls, vocab: Vocabulary, params: Params) -> 'CrfTagger':
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)
        encoder = Seq2SeqEncoder.from_params(params.pop("encoder"))
        label_namespace = params.pop("label_namespace", "labels")
        constraint_type = params.pop("constraint_type", None)
        dropout = params.pop("dropout", None)
        include_start_end_transitions = params.pop("include_start_end_transitions", True)
        initializer = InitializerApplicator.from_params(params.pop('initializer', []))
        regularizer = RegularizerApplicator.from_params(params.pop('regularizer', []))

        params.assert_empty(cls.__name__)

        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   encoder=encoder,
                   label_namespace=label_namespace,
                   constraint_type=constraint_type,
                   dropout=dropout,
                   include_start_end_transitions=include_start_end_transitions,
                   initializer=initializer,
                   regularizer=regularizer) 
开发者ID:arthurmensch,项目名称:didyprog,代码行数:24,代码来源:crf_tagger.py

示例6: setup_method

# 需要导入模块: from allennlp import common [as 别名]
# 或者: from allennlp.common import Params [as 别名]
def setup_method(self):
        super().setup_method()
        self.params = lambda: Params(
            {
                "model": {
                    "type": "simple_tagger",
                    "text_field_embedder": {
                        "token_embedders": {"tokens": {"type": "embedding", "embedding_dim": 5}}
                    },
                    "encoder": {"type": "lstm", "input_size": 5, "hidden_size": 7, "num_layers": 2},
                },
                "dataset_reader": {"type": "sequence_tagging"},
                "train_data_path": str(self.FIXTURES_ROOT / "data" / "sequence_tagging.tsv"),
                "validation_data_path": str(self.FIXTURES_ROOT / "data" / "sequence_tagging.tsv"),
                "data_loader": {"batch_size": 2},
                "trainer": {"cuda_device": -1, "num_epochs": 2, "optimizer": "adam"},
            }
        ) 
开发者ID:allenai,项目名称:allennlp,代码行数:20,代码来源:find_learning_rate_test.py

示例7: test_find_learning_rate_multi_gpu

# 需要导入模块: from allennlp import common [as 别名]
# 或者: from allennlp.common import Params [as 别名]
def test_find_learning_rate_multi_gpu(self):
        params = self.params()
        del params["trainer"]["cuda_device"]
        params["distributed"] = Params({})
        params["distributed"]["cuda_devices"] = [0, 1]

        with pytest.raises(AssertionError) as execinfo:
            find_learning_rate_model(
                params,
                os.path.join(self.TEST_DIR, "test_find_learning_rate_multi_gpu"),
                start_lr=1e-5,
                end_lr=1,
                num_batches=100,
                linear_steps=True,
                stopping_factor=None,
                force=False,
            )
        assert "DistributedDataParallel" in str(execinfo.value) 
开发者ID:allenai,项目名称:allennlp,代码行数:20,代码来源:find_learning_rate_test.py

示例8: test_train_model

# 需要导入模块: from allennlp import common [as 别名]
# 或者: from allennlp.common import Params [as 别名]
def test_train_model(self):
        params = lambda: Params(
            {
                "model": {"type": "constant"},
                "dataset_reader": {"type": "sequence_tagging"},
                "train_data_path": SEQUENCE_TAGGING_DATA_PATH,
                "validation_data_path": SEQUENCE_TAGGING_DATA_PATH,
                "data_loader": {"batch_size": 2},
                "trainer": {"type": "no_op"},
            }
        )

        serialization_dir = self.TEST_DIR / "serialization_directory"
        train_model(params(), serialization_dir=serialization_dir)
        archive = load_archive(str(serialization_dir / "model.tar.gz"))
        model = archive.model
        assert model.forward(torch.tensor([1, 2, 3]))["class"] == torch.tensor(98)
        assert model.vocab.get_vocab_size() == 9 
开发者ID:allenai,项目名称:allennlp,代码行数:20,代码来源:no_op_train_test.py

示例9: test_from_params_builders_encoder_correctly

# 需要导入模块: from allennlp import common [as 别名]
# 或者: from allennlp.common import Params [as 别名]
def test_from_params_builders_encoder_correctly(self):
        # We're just making sure parameters get passed through correctly here, and that the basic
        # API works.
        params = Params(
            {
                "type": "lstm",
                "bidirectional": True,
                "num_layers": 3,
                "input_size": 5,
                "hidden_size": 7,
            }
        )
        encoder = Seq2VecEncoder.from_params(params)

        assert encoder.__class__.__name__ == "LstmSeq2VecEncoder"
        assert encoder._module.__class__.__name__ == "LSTM"
        assert encoder._module.num_layers == 3
        assert encoder._module.input_size == 5
        assert encoder._module.hidden_size == 7
        assert encoder._module.bidirectional is True
        assert encoder._module.batch_first is True 
开发者ID:allenai,项目名称:allennlp,代码行数:23,代码来源:seq2vec_encoder_test.py

示例10: test_forward_does_an_additive_product

# 需要导入模块: from allennlp import common [as 别名]
# 或者: from allennlp.common import Params [as 别名]
def test_forward_does_an_additive_product(self):
        params = Params({"vector_dim": 2, "matrix_dim": 3, "normalize": False})
        additive = AdditiveAttention.from_params(params)
        additive._w_matrix = Parameter(torch.Tensor([[-0.2, 0.3], [-0.5, 0.5]]))
        additive._u_matrix = Parameter(torch.Tensor([[0.0, 1.0], [1.0, 1.0], [1.0, -1.0]]))
        additive._v_vector = Parameter(torch.Tensor([[1.0], [-1.0]]))
        vectors = torch.FloatTensor([[0.7, -0.8], [0.4, 0.9]])
        matrices = torch.FloatTensor(
            [
                [[1.0, -1.0, 3.0], [0.5, -0.3, 0.0], [0.2, -1.0, 1.0], [0.7, 0.8, -1.0]],
                [[-2.0, 3.0, -3.0], [0.6, 0.2, 2.0], [0.5, -0.4, -1.0], [0.2, 0.2, 0.0]],
            ]
        )
        result = additive(vectors, matrices).detach().numpy()
        assert result.shape == (2, 4)
        assert_almost_equal(
            result,
            [
                [1.975072, -0.04997836, 1.2176098, -0.9205586],
                [-1.4851665, 1.489604, -1.890285, -1.0672251],
            ],
        ) 
开发者ID:allenai,项目名称:allennlp,代码行数:24,代码来源:additive_attention_test.py

示例11: test_forward_works_with_projection_layer

# 需要导入模块: from allennlp import common [as 别名]
# 或者: from allennlp.common import Params [as 别名]
def test_forward_works_with_projection_layer(self):
        vocab = Vocabulary()
        vocab.add_token_to_namespace("the")
        vocab.add_token_to_namespace("a")
        params = Params(
            {
                "pretrained_file": str(
                    self.FIXTURES_ROOT / "embeddings/glove.6B.300d.sample.txt.gz"
                ),
                "embedding_dim": 300,
                "projection_dim": 20,
            }
        )
        embedding_layer = Embedding.from_params(params, vocab=vocab)
        input_tensor = torch.LongTensor([[3, 2, 1, 0]])
        embedded = embedding_layer(input_tensor).data.numpy()
        assert embedded.shape == (1, 4, 20)

        input_tensor = torch.LongTensor([[[3, 2, 1, 0]]])
        embedded = embedding_layer(input_tensor).data.numpy()
        assert embedded.shape == (1, 1, 4, 20) 
开发者ID:allenai,项目名称:allennlp,代码行数:23,代码来源:embedding_test.py

示例12: test_embedding_layer_actually_initializes_word_vectors_correctly

# 需要导入模块: from allennlp import common [as 别名]
# 或者: from allennlp.common import Params [as 别名]
def test_embedding_layer_actually_initializes_word_vectors_correctly(self):
        vocab = Vocabulary()
        vocab.add_token_to_namespace("word")
        vocab.add_token_to_namespace("word2")
        unicode_space = "\xa0\xa0\xa0\xa0\xa0\xa0\xa0\xa0"
        vocab.add_token_to_namespace(unicode_space)
        embeddings_filename = str(self.TEST_DIR / "embeddings.gz")
        with gzip.open(embeddings_filename, "wb") as embeddings_file:
            embeddings_file.write("word 1.0 2.3 -1.0\n".encode("utf-8"))
            embeddings_file.write(f"{unicode_space} 3.4 3.3 5.0\n".encode("utf-8"))
        params = Params({"pretrained_file": embeddings_filename, "embedding_dim": 3})
        embedding_layer = Embedding.from_params(params, vocab=vocab)
        word_vector = embedding_layer.weight.data[vocab.get_token_index("word")]
        assert numpy.allclose(word_vector.numpy(), numpy.array([1.0, 2.3, -1.0]))
        word_vector = embedding_layer.weight.data[vocab.get_token_index(unicode_space)]
        assert numpy.allclose(word_vector.numpy(), numpy.array([3.4, 3.3, 5.0]))
        word_vector = embedding_layer.weight.data[vocab.get_token_index("word2")]
        assert not numpy.allclose(word_vector.numpy(), numpy.array([1.0, 2.3, -1.0])) 
开发者ID:allenai,项目名称:allennlp,代码行数:20,代码来源:embedding_test.py

示例13: test_embedding_vocab_extension_with_specified_namespace

# 需要导入模块: from allennlp import common [as 别名]
# 或者: from allennlp.common import Params [as 别名]
def test_embedding_vocab_extension_with_specified_namespace(self):
        vocab = Vocabulary()
        vocab.add_token_to_namespace("word1", "tokens_a")
        vocab.add_token_to_namespace("word2", "tokens_a")
        embedding_params = Params({"vocab_namespace": "tokens_a", "embedding_dim": 10})
        embedder = Embedding.from_params(embedding_params, vocab=vocab)
        original_weight = embedder.weight

        assert original_weight.shape[0] == 4

        extension_counter = {"tokens_a": {"word3": 1}}
        vocab._extend(extension_counter)

        embedder.extend_vocab(vocab, "tokens_a")  # specified namespace

        extended_weight = embedder.weight
        assert extended_weight.shape[0] == 5
        assert torch.all(extended_weight[:4, :] == original_weight[:4, :]) 
开发者ID:allenai,项目名称:allennlp,代码行数:20,代码来源:embedding_test.py

示例14: test_embedding_vocab_extension_with_default_namespace

# 需要导入模块: from allennlp import common [as 别名]
# 或者: from allennlp.common import Params [as 别名]
def test_embedding_vocab_extension_with_default_namespace(self):
        vocab = Vocabulary()
        vocab.add_token_to_namespace("word1")
        vocab.add_token_to_namespace("word2")
        embedding_params = Params({"vocab_namespace": "tokens", "embedding_dim": 10})
        embedder = Embedding.from_params(embedding_params, vocab=vocab)
        original_weight = embedder.weight

        assert original_weight.shape[0] == 4

        extension_counter = {"tokens": {"word3": 1}}
        vocab._extend(extension_counter)

        embedder.extend_vocab(vocab)  # default namespace

        extended_weight = embedder.weight
        assert extended_weight.shape[0] == 5
        assert torch.all(extended_weight[:4, :] == original_weight[:4, :]) 
开发者ID:allenai,项目名称:allennlp,代码行数:20,代码来源:embedding_test.py

示例15: test_embedding_vocab_extension_is_no_op_when_extension_should_not_happen

# 需要导入模块: from allennlp import common [as 别名]
# 或者: from allennlp.common import Params [as 别名]
def test_embedding_vocab_extension_is_no_op_when_extension_should_not_happen(self):
        # Case1: When vocab is already in sync with embeddings it should be a no-op.
        vocab = Vocabulary({"tokens": {"word1": 1, "word2": 1}})
        embedding_params = Params({"vocab_namespace": "tokens", "embedding_dim": 10})
        embedder = Embedding.from_params(embedding_params, vocab=vocab)
        original_weight = embedder.weight
        embedder.extend_vocab(vocab, "tokens")
        assert torch.all(embedder.weight == original_weight)

        # Case2: Shouldn't wrongly assuming "tokens" namespace for extension if no
        # information on vocab_namespece is available. Rather log a warning and be a no-op.
        vocab = Vocabulary()
        vocab.add_token_to_namespace("word1", "tokens")
        vocab.add_token_to_namespace("word2", "tokens")
        embedding_params = Params({"vocab_namespace": "tokens", "embedding_dim": 10})
        embedder = Embedding.from_params(embedding_params, vocab=vocab)
        # Previous models won't have _vocab_namespace attribute. Force it to be None
        embedder._vocab_namespace = None
        embedder.weight = torch.nn.Parameter(embedder.weight[:1, :])
        assert embedder.weight.shape[0] == 1
        embedder.extend_vocab(vocab)  # Don't specify namespace
        assert embedder.weight.shape[0] == 1 
开发者ID:allenai,项目名称:allennlp,代码行数:24,代码来源:embedding_test.py


注:本文中的allennlp.common.Params方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。