当前位置: 首页>>代码示例>>Python>>正文


Python Params.pop方法代码示例

本文整理汇总了Python中allennlp.common.Params.pop方法的典型用法代码示例。如果您正苦于以下问题:Python Params.pop方法的具体用法?Python Params.pop怎么用?Python Params.pop使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在allennlp.common.Params的用法示例。


在下文中一共展示了Params.pop方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: extend_from_instances

# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import pop [as 别名]
    def extend_from_instances(self,
                              params: Params,
                              instances: Iterable['adi.Instance'] = ()) -> None:
        """
        Extends an already generated vocabulary using a collection of instances.
        """
        min_count = params.pop("min_count", None)
        max_vocab_size = pop_max_vocab_size(params)
        non_padded_namespaces = params.pop("non_padded_namespaces", DEFAULT_NON_PADDED_NAMESPACES)
        pretrained_files = params.pop("pretrained_files", {})
        min_pretrained_embeddings = params.pop("min_pretrained_embeddings", None)
        only_include_pretrained_words = params.pop_bool("only_include_pretrained_words", False)
        tokens_to_add = params.pop("tokens_to_add", None)
        params.assert_empty("Vocabulary - from dataset")

        logger.info("Fitting token dictionary from dataset.")
        namespace_token_counts: Dict[str, Dict[str, int]] = defaultdict(lambda: defaultdict(int))
        for instance in Tqdm.tqdm(instances):
            instance.count_vocab_items(namespace_token_counts)
        self._extend(counter=namespace_token_counts,
                     min_count=min_count,
                     max_vocab_size=max_vocab_size,
                     non_padded_namespaces=non_padded_namespaces,
                     pretrained_files=pretrained_files,
                     only_include_pretrained_words=only_include_pretrained_words,
                     tokens_to_add=tokens_to_add,
                     min_pretrained_embeddings=min_pretrained_embeddings)
开发者ID:ziaridoy20,项目名称:allennlp,代码行数:29,代码来源:vocabulary.py

示例2: from_params

# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import pop [as 别名]
 def from_params(cls, vocab: Vocabulary, params: Params) -> 'ElmoTokenEmbedder':  # type: ignore
     # pylint: disable=arguments-differ
     params.add_file_to_archive('options_file')
     params.add_file_to_archive('weight_file')
     options_file = params.pop('options_file')
     weight_file = params.pop('weight_file')
     requires_grad = params.pop('requires_grad', False)
     do_layer_norm = params.pop_bool('do_layer_norm', False)
     dropout = params.pop_float("dropout", 0.5)
     namespace_to_cache = params.pop("namespace_to_cache", None)
     if namespace_to_cache is not None:
         vocab_to_cache = list(vocab.get_token_to_index_vocabulary(namespace_to_cache).keys())
     else:
         vocab_to_cache = None
     projection_dim = params.pop_int("projection_dim", None)
     scalar_mix_parameters = params.pop('scalar_mix_parameters', None)
     params.assert_empty(cls.__name__)
     return cls(options_file=options_file,
                weight_file=weight_file,
                do_layer_norm=do_layer_norm,
                dropout=dropout,
                requires_grad=requires_grad,
                projection_dim=projection_dim,
                vocab_to_cache=vocab_to_cache,
                scalar_mix_parameters=scalar_mix_parameters)
开发者ID:apmoore1,项目名称:allennlp,代码行数:27,代码来源:elmo_token_embedder.py

示例3: datasets_from_params

# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import pop [as 别名]
def datasets_from_params(params: Params) -> Dict[str, Iterable[Instance]]:
    """
    Load all the datasets specified by the config.
    """
    dataset_reader = DatasetReader.from_params(params.pop('dataset_reader'))
    validation_dataset_reader_params = params.pop("validation_dataset_reader", None)

    validation_and_test_dataset_reader: DatasetReader = dataset_reader
    if validation_dataset_reader_params is not None:
        logger.info("Using a separate dataset reader to load validation and test data.")
        validation_and_test_dataset_reader = DatasetReader.from_params(validation_dataset_reader_params)

    train_data_path = params.pop('train_data_path')
    logger.info("Reading training data from %s", train_data_path)
    train_data = dataset_reader.read(train_data_path)

    datasets: Dict[str, Iterable[Instance]] = {"train": train_data}

    validation_data_path = params.pop('validation_data_path', None)
    if validation_data_path is not None:
        logger.info("Reading validation data from %s", validation_data_path)
        validation_data = validation_and_test_dataset_reader.read(validation_data_path)
        datasets["validation"] = validation_data

    test_data_path = params.pop("test_data_path", None)
    if test_data_path is not None:
        logger.info("Reading test data from %s", test_data_path)
        test_data = validation_and_test_dataset_reader.read(test_data_path)
        datasets["test"] = test_data

    return datasets
开发者ID:Jordan-Sauchuk,项目名称:allennlp,代码行数:33,代码来源:train.py

示例4: from_params

# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import pop [as 别名]
 def from_params(cls, params: Params) -> 'SrlReader':
     token_indexers = TokenIndexer.dict_from_params(params.pop('token_indexers', {}))
     domain_identifier = params.pop("domain_identifier", None)
     lazy = params.pop('lazy', False)
     params.assert_empty(cls.__name__)
     return SrlReader(token_indexers=token_indexers,
                      domain_identifier=domain_identifier,
                      lazy=lazy)
开发者ID:Jordan-Sauchuk,项目名称:allennlp,代码行数:10,代码来源:semantic_role_labeling.py

示例5: from_params

# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import pop [as 别名]
            def from_params(cls, params: Params) -> 'B':
                params.add_file_to_archive("filename")

                filename = params.pop("filename")
                c_params = params.pop("c")
                c = C.from_params(c_params)

                return cls(filename, c)
开发者ID:Jordan-Sauchuk,项目名称:allennlp,代码行数:10,代码来源:params_test.py

示例6: from_params

# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import pop [as 别名]
 def from_params(cls, params: Params) -> 'SnliReader':
     tokenizer = Tokenizer.from_params(params.pop('tokenizer', {}))
     token_indexers = TokenIndexer.dict_from_params(params.pop('token_indexers', {}))
     lazy = params.pop('lazy', False)
     params.assert_empty(cls.__name__)
     return SnliReader(tokenizer=tokenizer,
                       token_indexers=token_indexers,
                       lazy=lazy)
开发者ID:Jordan-Sauchuk,项目名称:allennlp,代码行数:10,代码来源:snli.py

示例7: from_params

# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import pop [as 别名]
 def from_params(cls, params: Params) -> 'PennTreeBankConstituencySpanDatasetReader':
     token_indexers = TokenIndexer.dict_from_params(params.pop('token_indexers', {}))
     use_pos_tags = params.pop('use_pos_tags', True)
     lazy = params.pop('lazy', False)
     params.assert_empty(cls.__name__)
     return PennTreeBankConstituencySpanDatasetReader(token_indexers=token_indexers,
                                                      use_pos_tags=use_pos_tags,
                                                      lazy=lazy)
开发者ID:Jordan-Sauchuk,项目名称:allennlp,代码行数:10,代码来源:penn_tree_bank.py

示例8: from_params

# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import pop [as 别名]
 def from_params(cls, vocab: Vocabulary, params: Params) -> 'ElmoTokenEmbedder':
     params.add_file_to_archive('options_file')
     params.add_file_to_archive('weight_file')
     options_file = params.pop('options_file')
     weight_file = params.pop('weight_file')
     requires_grad = params.pop('requires_grad', False)
     do_layer_norm = params.pop_bool('do_layer_norm', False)
     dropout = params.pop_float("dropout", 0.5)
     params.assert_empty(cls.__name__)
     return cls(options_file, weight_file, do_layer_norm, dropout, requires_grad=requires_grad)
开发者ID:Jordan-Sauchuk,项目名称:allennlp,代码行数:12,代码来源:elmo_token_embedder.py

示例9: from_params

# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import pop [as 别名]
 def from_params(cls, params: Params) -> 'CharacterTokenizer':
     byte_encoding = params.pop('byte_encoding', None)
     lowercase_characters = params.pop('lowercase_characters', False)
     start_tokens = params.pop('start_tokens', None)
     end_tokens = params.pop('end_tokens', None)
     params.assert_empty(cls.__name__)
     return cls(byte_encoding=byte_encoding,
                lowercase_characters=lowercase_characters,
                start_tokens=start_tokens,
                end_tokens=end_tokens)
开发者ID:Jordan-Sauchuk,项目名称:allennlp,代码行数:12,代码来源:character_tokenizer.py

示例10: from_params

# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import pop [as 别名]
 def from_params(cls, params: Params) -> 'LanguageModelingReader':
     tokens_per_instance = params.pop_int('tokens_per_instance', None)
     tokenizer = Tokenizer.from_params(params.pop('tokenizer', {}))
     token_indexers = TokenIndexer.dict_from_params(params.pop('token_indexers', {}))
     lazy = params.pop('lazy', False)
     params.assert_empty(cls.__name__)
     return LanguageModelingReader(tokens_per_instance=tokens_per_instance,
                                   tokenizer=tokenizer,
                                   token_indexers=token_indexers,
                                   lazy=lazy)
开发者ID:Jordan-Sauchuk,项目名称:allennlp,代码行数:12,代码来源:language_modeling.py

示例11: from_params

# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import pop [as 别名]
 def from_params(cls, params: Params) -> 'Conll2003DatasetReader':
     token_indexers = TokenIndexer.dict_from_params(params.pop('token_indexers', {}))
     tag_label = params.pop('tag_label', None)
     feature_labels = params.pop('feature_labels', ())
     lazy = params.pop('lazy', False)
     params.assert_empty(cls.__name__)
     return Conll2003DatasetReader(token_indexers=token_indexers,
                                   tag_label=tag_label,
                                   feature_labels=feature_labels,
                                   lazy=lazy)
开发者ID:Jordan-Sauchuk,项目名称:allennlp,代码行数:12,代码来源:conll2003.py

示例12: from_params

# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import pop [as 别名]
 def from_params(cls, params: Params) -> 'SequenceTaggingDatasetReader':
     token_indexers = TokenIndexer.dict_from_params(params.pop('token_indexers', {}))
     word_tag_delimiter = params.pop("word_tag_delimiter", DEFAULT_WORD_TAG_DELIMITER)
     token_delimiter = params.pop("token_delimiter", None)
     lazy = params.pop('lazy', False)
     params.assert_empty(cls.__name__)
     return SequenceTaggingDatasetReader(token_indexers=token_indexers,
                                         word_tag_delimiter=word_tag_delimiter,
                                         token_delimiter=token_delimiter,
                                         lazy=lazy)
开发者ID:Jordan-Sauchuk,项目名称:allennlp,代码行数:12,代码来源:sequence_tagging.py

示例13: from_params

# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import pop [as 别名]
 def from_params(cls, params: Params) -> 'LinearSimilarity':
     tensor_1_dim = params.pop_int("tensor_1_dim")
     tensor_2_dim = params.pop_int("tensor_2_dim")
     combination = params.pop("combination", "x,y")
     activation = Activation.by_name(params.pop("activation", "linear"))()
     params.assert_empty(cls.__name__)
     return cls(tensor_1_dim=tensor_1_dim,
                tensor_2_dim=tensor_2_dim,
                combination=combination,
                activation=activation)
开发者ID:Jordan-Sauchuk,项目名称:allennlp,代码行数:12,代码来源:linear.py

示例14: from_params

# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import pop [as 别名]
 def from_params(cls, params: Params) -> 'StanfordSentimentTreeBankDatasetReader':
     token_indexers = TokenIndexer.dict_from_params(params.pop('token_indexers', {}))
     use_subtrees = params.pop('use_subtrees', False)
     granularity = params.pop_choice('granularity', ["5-class", "3-class", "2-class"], True)
     lazy = params.pop('lazy', False)
     params.assert_empty(cls.__name__)
     return StanfordSentimentTreeBankDatasetReader(
             token_indexers=token_indexers,
             use_subtrees=use_subtrees,
             granularity=granularity,
             lazy=lazy)
开发者ID:Jordan-Sauchuk,项目名称:allennlp,代码行数:13,代码来源:stanford_sentiment_tree_bank.py

示例15: from_params

# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import pop [as 别名]
 def from_params(cls, vocab: Vocabulary, params: Params) -> 'TokenCharactersEncoder':  # type: ignore
     # pylint: disable=arguments-differ
     embedding_params: Params = params.pop("embedding")
     # Embedding.from_params() uses "tokens" as the default namespace, but we need to change
     # that to be "token_characters" by default.
     embedding_params.setdefault("vocab_namespace", "token_characters")
     embedding = Embedding.from_params(vocab, embedding_params)
     encoder_params: Params = params.pop("encoder")
     encoder = Seq2VecEncoder.from_params(encoder_params)
     dropout = params.pop_float("dropout", 0.0)
     params.assert_empty(cls.__name__)
     return cls(embedding, encoder, dropout)
开发者ID:apmoore1,项目名称:allennlp,代码行数:14,代码来源:token_characters_encoder.py


注:本文中的allennlp.common.Params.pop方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。