当前位置: 首页>>代码示例>>Python>>正文


Python Params.assert_empty方法代码示例

本文整理汇总了Python中allennlp.common.Params.assert_empty方法的典型用法代码示例。如果您正苦于以下问题:Python Params.assert_empty方法的具体用法?Python Params.assert_empty怎么用?Python Params.assert_empty使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在allennlp.common.Params的用法示例。


在下文中一共展示了Params.assert_empty方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: extend_from_instances

# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import assert_empty [as 别名]
    def extend_from_instances(self,
                              params: Params,
                              instances: Iterable['adi.Instance'] = ()) -> None:
        """
        Extends an already generated vocabulary using a collection of instances.
        """
        min_count = params.pop("min_count", None)
        max_vocab_size = pop_max_vocab_size(params)
        non_padded_namespaces = params.pop("non_padded_namespaces", DEFAULT_NON_PADDED_NAMESPACES)
        pretrained_files = params.pop("pretrained_files", {})
        min_pretrained_embeddings = params.pop("min_pretrained_embeddings", None)
        only_include_pretrained_words = params.pop_bool("only_include_pretrained_words", False)
        tokens_to_add = params.pop("tokens_to_add", None)
        params.assert_empty("Vocabulary - from dataset")

        logger.info("Fitting token dictionary from dataset.")
        namespace_token_counts: Dict[str, Dict[str, int]] = defaultdict(lambda: defaultdict(int))
        for instance in Tqdm.tqdm(instances):
            instance.count_vocab_items(namespace_token_counts)
        self._extend(counter=namespace_token_counts,
                     min_count=min_count,
                     max_vocab_size=max_vocab_size,
                     non_padded_namespaces=non_padded_namespaces,
                     pretrained_files=pretrained_files,
                     only_include_pretrained_words=only_include_pretrained_words,
                     tokens_to_add=tokens_to_add,
                     min_pretrained_embeddings=min_pretrained_embeddings)
开发者ID:ziaridoy20,项目名称:allennlp,代码行数:29,代码来源:vocabulary.py

示例2: from_params

# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import assert_empty [as 别名]
    def from_params(cls, vocab: Vocabulary, params: Params) -> 'DecomposableAttention':
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)

        premise_encoder_params = params.pop("premise_encoder", None)
        if premise_encoder_params is not None:
            premise_encoder = Seq2SeqEncoder.from_params(premise_encoder_params)
        else:
            premise_encoder = None

        hypothesis_encoder_params = params.pop("hypothesis_encoder", None)
        if hypothesis_encoder_params is not None:
            hypothesis_encoder = Seq2SeqEncoder.from_params(hypothesis_encoder_params)
        else:
            hypothesis_encoder = None

        attend_feedforward = FeedForward.from_params(params.pop('attend_feedforward'))
        similarity_function = SimilarityFunction.from_params(params.pop("similarity_function"))
        compare_feedforward = FeedForward.from_params(params.pop('compare_feedforward'))
        aggregate_feedforward = FeedForward.from_params(params.pop('aggregate_feedforward'))
        initializer = InitializerApplicator.from_params(params.pop('initializer', []))
        regularizer = RegularizerApplicator.from_params(params.pop('regularizer', []))

        params.assert_empty(cls.__name__)
        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   attend_feedforward=attend_feedforward,
                   similarity_function=similarity_function,
                   compare_feedforward=compare_feedforward,
                   aggregate_feedforward=aggregate_feedforward,
                   premise_encoder=premise_encoder,
                   hypothesis_encoder=hypothesis_encoder,
                   initializer=initializer,
                   regularizer=regularizer)
开发者ID:Jordan-Sauchuk,项目名称:allennlp,代码行数:36,代码来源:decomposable_attention.py

示例3: from_params

# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import assert_empty [as 别名]
    def from_params(cls, vocab: Vocabulary, params: Params) -> 'SpanConstituencyParser':
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)
        span_extractor = SpanExtractor.from_params(params.pop("span_extractor"))
        encoder = Seq2SeqEncoder.from_params(params.pop("encoder"))

        feed_forward_params = params.pop("feedforward", None)
        if feed_forward_params is not None:
            feedforward_layer = FeedForward.from_params(feed_forward_params)
        else:
            feedforward_layer = None
        pos_tag_embedding_params = params.pop("pos_tag_embedding", None)
        if pos_tag_embedding_params is not None:
            pos_tag_embedding = Embedding.from_params(vocab, pos_tag_embedding_params)
        else:
            pos_tag_embedding = None
        initializer = InitializerApplicator.from_params(params.pop('initializer', []))
        regularizer = RegularizerApplicator.from_params(params.pop('regularizer', []))
        evalb_directory_path = params.pop("evalb_directory_path", None)
        params.assert_empty(cls.__name__)

        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   span_extractor=span_extractor,
                   encoder=encoder,
                   feedforward_layer=feedforward_layer,
                   pos_tag_embedding=pos_tag_embedding,
                   initializer=initializer,
                   regularizer=regularizer,
                   evalb_directory_path=evalb_directory_path)
开发者ID:Jordan-Sauchuk,项目名称:allennlp,代码行数:32,代码来源:constituency_parser.py

示例4: from_params

# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import assert_empty [as 别名]
 def from_params(cls, vocab: Vocabulary, params: Params) -> 'ElmoTokenEmbedder':  # type: ignore
     # pylint: disable=arguments-differ
     params.add_file_to_archive('options_file')
     params.add_file_to_archive('weight_file')
     options_file = params.pop('options_file')
     weight_file = params.pop('weight_file')
     requires_grad = params.pop('requires_grad', False)
     do_layer_norm = params.pop_bool('do_layer_norm', False)
     dropout = params.pop_float("dropout", 0.5)
     namespace_to_cache = params.pop("namespace_to_cache", None)
     if namespace_to_cache is not None:
         vocab_to_cache = list(vocab.get_token_to_index_vocabulary(namespace_to_cache).keys())
     else:
         vocab_to_cache = None
     projection_dim = params.pop_int("projection_dim", None)
     scalar_mix_parameters = params.pop('scalar_mix_parameters', None)
     params.assert_empty(cls.__name__)
     return cls(options_file=options_file,
                weight_file=weight_file,
                do_layer_norm=do_layer_norm,
                dropout=dropout,
                requires_grad=requires_grad,
                projection_dim=projection_dim,
                vocab_to_cache=vocab_to_cache,
                scalar_mix_parameters=scalar_mix_parameters)
开发者ID:apmoore1,项目名称:allennlp,代码行数:27,代码来源:elmo_token_embedder.py

示例5: from_params

# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import assert_empty [as 别名]
 def from_params(cls, params: Params) -> 'WordSplitter':
     language = params.pop('language', 'en_core_web_sm')
     pos_tags = params.pop_bool('pos_tags', False)
     parse = params.pop_bool('parse', False)
     ner = params.pop_bool('ner', False)
     params.assert_empty(cls.__name__)
     return cls(language, pos_tags, parse, ner)
开发者ID:Jordan-Sauchuk,项目名称:allennlp,代码行数:9,代码来源:word_splitter.py

示例6: from_params

# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import assert_empty [as 别名]
 def from_params(cls, vocab: Vocabulary, params: Params) -> 'BasicTextFieldEmbedder':
     token_embedders = {}
     keys = list(params.keys())
     for key in keys:
         embedder_params = params.pop(key)
         token_embedders[key] = TokenEmbedder.from_params(vocab, embedder_params)
     params.assert_empty(cls.__name__)
     return cls(token_embedders)
开发者ID:Jordan-Sauchuk,项目名称:allennlp,代码行数:10,代码来源:basic_text_field_embedder.py

示例7: from_params

# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import assert_empty [as 别名]
 def from_params(cls, params: Params) -> 'PennTreeBankConstituencySpanDatasetReader':
     token_indexers = TokenIndexer.dict_from_params(params.pop('token_indexers', {}))
     use_pos_tags = params.pop('use_pos_tags', True)
     lazy = params.pop('lazy', False)
     params.assert_empty(cls.__name__)
     return PennTreeBankConstituencySpanDatasetReader(token_indexers=token_indexers,
                                                      use_pos_tags=use_pos_tags,
                                                      lazy=lazy)
开发者ID:Jordan-Sauchuk,项目名称:allennlp,代码行数:10,代码来源:penn_tree_bank.py

示例8: from_params

# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import assert_empty [as 别名]
 def from_params(cls, params: Params) -> 'SrlReader':
     token_indexers = TokenIndexer.dict_from_params(params.pop('token_indexers', {}))
     domain_identifier = params.pop("domain_identifier", None)
     lazy = params.pop('lazy', False)
     params.assert_empty(cls.__name__)
     return SrlReader(token_indexers=token_indexers,
                      domain_identifier=domain_identifier,
                      lazy=lazy)
开发者ID:Jordan-Sauchuk,项目名称:allennlp,代码行数:10,代码来源:semantic_role_labeling.py

示例9: from_params

# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import assert_empty [as 别名]
 def from_params(cls, params: Params) -> 'SnliReader':
     tokenizer = Tokenizer.from_params(params.pop('tokenizer', {}))
     token_indexers = TokenIndexer.dict_from_params(params.pop('token_indexers', {}))
     lazy = params.pop('lazy', False)
     params.assert_empty(cls.__name__)
     return SnliReader(tokenizer=tokenizer,
                       token_indexers=token_indexers,
                       lazy=lazy)
开发者ID:Jordan-Sauchuk,项目名称:allennlp,代码行数:10,代码来源:snli.py

示例10: from_params

# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import assert_empty [as 别名]
 def from_params(cls, params: Params) -> 'BasicIterator':
     batch_size = params.pop_int('batch_size', 32)
     instances_per_epoch = params.pop_int('instances_per_epoch', None)
     max_instances_in_memory = params.pop_int('max_instances_in_memory', None)
     params.assert_empty(cls.__name__)
     return cls(batch_size=batch_size,
                instances_per_epoch=instances_per_epoch,
                max_instances_in_memory=max_instances_in_memory)
开发者ID:Jordan-Sauchuk,项目名称:allennlp,代码行数:10,代码来源:basic_iterator.py

示例11: from_params

# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import assert_empty [as 别名]
 def from_params(cls, vocab: Vocabulary, params: Params) -> 'ElmoTokenEmbedder':
     params.add_file_to_archive('options_file')
     params.add_file_to_archive('weight_file')
     options_file = params.pop('options_file')
     weight_file = params.pop('weight_file')
     requires_grad = params.pop('requires_grad', False)
     do_layer_norm = params.pop_bool('do_layer_norm', False)
     dropout = params.pop_float("dropout", 0.5)
     params.assert_empty(cls.__name__)
     return cls(options_file, weight_file, do_layer_norm, dropout, requires_grad=requires_grad)
开发者ID:Jordan-Sauchuk,项目名称:allennlp,代码行数:12,代码来源:elmo_token_embedder.py

示例12: from_params

# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import assert_empty [as 别名]
 def from_params(cls, params: Params) -> 'Conll2003DatasetReader':
     token_indexers = TokenIndexer.dict_from_params(params.pop('token_indexers', {}))
     tag_label = params.pop('tag_label', None)
     feature_labels = params.pop('feature_labels', ())
     lazy = params.pop('lazy', False)
     params.assert_empty(cls.__name__)
     return Conll2003DatasetReader(token_indexers=token_indexers,
                                   tag_label=tag_label,
                                   feature_labels=feature_labels,
                                   lazy=lazy)
开发者ID:Jordan-Sauchuk,项目名称:allennlp,代码行数:12,代码来源:conll2003.py

示例13: from_params

# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import assert_empty [as 别名]
 def from_params(cls, params: Params) -> 'LanguageModelingReader':
     tokens_per_instance = params.pop_int('tokens_per_instance', None)
     tokenizer = Tokenizer.from_params(params.pop('tokenizer', {}))
     token_indexers = TokenIndexer.dict_from_params(params.pop('token_indexers', {}))
     lazy = params.pop('lazy', False)
     params.assert_empty(cls.__name__)
     return LanguageModelingReader(tokens_per_instance=tokens_per_instance,
                                   tokenizer=tokenizer,
                                   token_indexers=token_indexers,
                                   lazy=lazy)
开发者ID:Jordan-Sauchuk,项目名称:allennlp,代码行数:12,代码来源:language_modeling.py

示例14: from_params

# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import assert_empty [as 别名]
 def from_params(cls, params: Params) -> 'LinearSimilarity':
     tensor_1_dim = params.pop_int("tensor_1_dim")
     tensor_2_dim = params.pop_int("tensor_2_dim")
     combination = params.pop("combination", "x,y")
     activation = Activation.by_name(params.pop("activation", "linear"))()
     params.assert_empty(cls.__name__)
     return cls(tensor_1_dim=tensor_1_dim,
                tensor_2_dim=tensor_2_dim,
                combination=combination,
                activation=activation)
开发者ID:Jordan-Sauchuk,项目名称:allennlp,代码行数:12,代码来源:linear.py

示例15: from_params

# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import assert_empty [as 别名]
 def from_params(cls, params: Params) -> 'SequenceTaggingDatasetReader':
     token_indexers = TokenIndexer.dict_from_params(params.pop('token_indexers', {}))
     word_tag_delimiter = params.pop("word_tag_delimiter", DEFAULT_WORD_TAG_DELIMITER)
     token_delimiter = params.pop("token_delimiter", None)
     lazy = params.pop('lazy', False)
     params.assert_empty(cls.__name__)
     return SequenceTaggingDatasetReader(token_indexers=token_indexers,
                                         word_tag_delimiter=word_tag_delimiter,
                                         token_delimiter=token_delimiter,
                                         lazy=lazy)
开发者ID:Jordan-Sauchuk,项目名称:allennlp,代码行数:12,代码来源:sequence_tagging.py


注:本文中的allennlp.common.Params.assert_empty方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。