当前位置: 首页>>代码示例>>Python>>正文


Python Params.pop_int方法代码示例

本文整理汇总了Python中allennlp.common.Params.pop_int方法的典型用法代码示例。如果您正苦于以下问题:Python Params.pop_int方法的具体用法?Python Params.pop_int怎么用?Python Params.pop_int使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在allennlp.common.Params的用法示例。


在下文中一共展示了Params.pop_int方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: from_params

# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import pop_int [as 别名]
 def from_params(cls, params: Params) -> 'BasicIterator':
     batch_size = params.pop_int('batch_size', 32)
     instances_per_epoch = params.pop_int('instances_per_epoch', None)
     max_instances_in_memory = params.pop_int('max_instances_in_memory', None)
     params.assert_empty(cls.__name__)
     return cls(batch_size=batch_size,
                instances_per_epoch=instances_per_epoch,
                max_instances_in_memory=max_instances_in_memory)
开发者ID:Jordan-Sauchuk,项目名称:allennlp,代码行数:10,代码来源:basic_iterator.py

示例2: from_params

# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import pop_int [as 别名]
 def from_params(cls, params: Params) -> 'CnnEncoder':
     embedding_dim = params.pop_int('embedding_dim')
     output_dim = params.pop_int('output_dim', None)
     num_filters = params.pop_int('num_filters')
     conv_layer_activation = Activation.by_name(params.pop("conv_layer_activation", "relu"))()
     ngram_filter_sizes = tuple(params.pop('ngram_filter_sizes', [2, 3, 4, 5]))
     params.assert_empty(cls.__name__)
     return cls(embedding_dim=embedding_dim,
                num_filters=num_filters,
                ngram_filter_sizes=ngram_filter_sizes,
                conv_layer_activation=conv_layer_activation,
                output_dim=output_dim)
开发者ID:Jordan-Sauchuk,项目名称:allennlp,代码行数:14,代码来源:cnn_encoder.py

示例3: from_params

# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import pop_int [as 别名]
 def from_params(cls, params: Params) -> 'MultiHeadedSimilarity':
     num_heads = params.pop_int("num_heads")
     tensor_1_dim = params.pop_int("tensor_1_dim")
     tensor_1_projected_dim = params.pop_int("tensor_1_projected_dim", None)
     tensor_2_dim = params.pop_int("tensor_2_dim", None)
     tensor_2_projected_dim = params.pop_int("tensor_1_projected_dim", None)
     internal_similarity = SimilarityFunction.from_params(params.pop("internal_similarity", {}))
     params.assert_empty(cls.__name__)
     return cls(num_heads=num_heads,
                tensor_1_dim=tensor_1_dim,
                tensor_1_projected_dim=tensor_1_projected_dim,
                tensor_2_dim=tensor_2_dim,
                tensor_2_projected_dim=tensor_2_projected_dim,
                internal_similarity=internal_similarity)
开发者ID:Jordan-Sauchuk,项目名称:allennlp,代码行数:16,代码来源:multiheaded.py

示例4: from_params

# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import pop_int [as 别名]
 def from_params(cls, params: Params) -> 'IntraSentenceAttentionEncoder':
     input_dim = params.pop_int('input_dim')
     projection_dim = params.pop_int('projection_dim', None)
     similarity_function = SimilarityFunction.from_params(params.pop('similarity_function', {}))
     num_attention_heads = params.pop_int('num_attention_heads', 1)
     combination = params.pop('combination', '1,2')
     output_dim = params.pop_int('output_dim', None)
     params.assert_empty(cls.__name__)
     return cls(input_dim=input_dim,
                projection_dim=projection_dim,
                similarity_function=similarity_function,
                num_attention_heads=num_attention_heads,
                combination=combination,
                output_dim=output_dim)
开发者ID:Jordan-Sauchuk,项目名称:allennlp,代码行数:16,代码来源:intra_sentence_attention.py

示例5: from_params

# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import pop_int [as 别名]
 def from_params(cls, params: Params):
     input_dim = params.pop_int('input_dim')
     num_layers = params.pop_int('num_layers')
     hidden_dims = params.pop('hidden_dims')
     activations = params.pop('activations')
     dropout = params.pop('dropout', 0.0)
     if isinstance(activations, list):
         activations = [Activation.by_name(name)() for name in activations]
     else:
         activations = Activation.by_name(activations)()
     params.assert_empty(cls.__name__)
     return cls(input_dim=input_dim,
                num_layers=num_layers,
                hidden_dims=hidden_dims,
                activations=activations,
                dropout=dropout)
开发者ID:apmoore1,项目名称:allennlp,代码行数:18,代码来源:feedforward.py

示例6: from_params

# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import pop_int [as 别名]
 def from_params(cls, vocab: Vocabulary, params: Params) -> 'ElmoTokenEmbedder':  # type: ignore
     # pylint: disable=arguments-differ
     params.add_file_to_archive('options_file')
     params.add_file_to_archive('weight_file')
     options_file = params.pop('options_file')
     weight_file = params.pop('weight_file')
     requires_grad = params.pop('requires_grad', False)
     do_layer_norm = params.pop_bool('do_layer_norm', False)
     dropout = params.pop_float("dropout", 0.5)
     namespace_to_cache = params.pop("namespace_to_cache", None)
     if namespace_to_cache is not None:
         vocab_to_cache = list(vocab.get_token_to_index_vocabulary(namespace_to_cache).keys())
     else:
         vocab_to_cache = None
     projection_dim = params.pop_int("projection_dim", None)
     scalar_mix_parameters = params.pop('scalar_mix_parameters', None)
     params.assert_empty(cls.__name__)
     return cls(options_file=options_file,
                weight_file=weight_file,
                do_layer_norm=do_layer_norm,
                dropout=dropout,
                requires_grad=requires_grad,
                projection_dim=projection_dim,
                vocab_to_cache=vocab_to_cache,
                scalar_mix_parameters=scalar_mix_parameters)
开发者ID:apmoore1,项目名称:allennlp,代码行数:27,代码来源:elmo_token_embedder.py

示例7: from_params

# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import pop_int [as 别名]
    def from_params(cls, vocab: Vocabulary, params: Params) -> 'Embedding':
        """
        We need the vocabulary here to know how many items we need to embed, and we look for a
        ``vocab_namespace`` key in the parameter dictionary to know which vocabulary to use.  If
        you know beforehand exactly how many embeddings you need, or aren't using a vocabulary
        mapping for the things getting embedded here, then you can pass in the ``num_embeddings``
        key directly, and the vocabulary will be ignored.
        """
        num_embeddings = params.pop_int('num_embeddings', None)
        vocab_namespace = params.pop("vocab_namespace", "tokens")
        if num_embeddings is None:
            num_embeddings = vocab.get_vocab_size(vocab_namespace)
        embedding_dim = params.pop_int('embedding_dim')
        pretrained_file = params.pop("pretrained_file", None)
        projection_dim = params.pop_int("projection_dim", None)
        trainable = params.pop_bool("trainable", True)
        padding_index = params.pop_int('padding_index', None)
        max_norm = params.pop_float('max_norm', None)
        norm_type = params.pop_float('norm_type', 2.)
        scale_grad_by_freq = params.pop_bool('scale_grad_by_freq', False)
        sparse = params.pop_bool('sparse', False)
        params.assert_empty(cls.__name__)

        if pretrained_file:
            # If we're loading a saved model, we don't want to actually read a pre-trained
            # embedding file - the embeddings will just be in our saved weights, and we might not
            # have the original embedding file anymore, anyway.
            weight = _read_pretrained_embedding_file(pretrained_file,
                                                     embedding_dim,
                                                     vocab,
                                                     vocab_namespace)
        else:
            weight = None

        return cls(num_embeddings=num_embeddings,
                   embedding_dim=embedding_dim,
                   projection_dim=projection_dim,
                   weight=weight,
                   padding_index=padding_index,
                   trainable=trainable,
                   max_norm=max_norm,
                   norm_type=norm_type,
                   scale_grad_by_freq=scale_grad_by_freq,
                   sparse=sparse)
开发者ID:Jordan-Sauchuk,项目名称:allennlp,代码行数:46,代码来源:embedding.py

示例8: from_params

# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import pop_int [as 别名]
 def from_params(cls, params: Params) -> 'LinearSimilarity':
     tensor_1_dim = params.pop_int("tensor_1_dim")
     tensor_2_dim = params.pop_int("tensor_2_dim")
     combination = params.pop("combination", "x,y")
     activation = Activation.by_name(params.pop("activation", "linear"))()
     params.assert_empty(cls.__name__)
     return cls(tensor_1_dim=tensor_1_dim,
                tensor_2_dim=tensor_2_dim,
                combination=combination,
                activation=activation)
开发者ID:Jordan-Sauchuk,项目名称:allennlp,代码行数:12,代码来源:linear.py

示例9: from_params

# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import pop_int [as 别名]
 def from_params(cls, params: Params) -> 'LanguageModelingReader':
     tokens_per_instance = params.pop_int('tokens_per_instance', None)
     tokenizer = Tokenizer.from_params(params.pop('tokenizer', {}))
     token_indexers = TokenIndexer.dict_from_params(params.pop('token_indexers', {}))
     lazy = params.pop('lazy', False)
     params.assert_empty(cls.__name__)
     return LanguageModelingReader(tokens_per_instance=tokens_per_instance,
                                   tokenizer=tokenizer,
                                   token_indexers=token_indexers,
                                   lazy=lazy)
开发者ID:Jordan-Sauchuk,项目名称:allennlp,代码行数:12,代码来源:language_modeling.py

示例10: from_params

# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import pop_int [as 别名]
    def from_params(cls, params: Params) -> 'AdaptiveIterator':
        adaptive_memory_usage_constant = params.pop_int('adaptive_memory_usage_constant')
        padding_memory_scaling = params.pop('padding_memory_scaling')
        maximum_batch_size = params.pop_int('maximum_batch_size', 10000)
        biggest_batch_first = params.pop_bool('biggest_batch_first', False)
        batch_size = params.pop_int('batch_size', None)
        sorting_keys = params.pop('sorting_keys', None)
        padding_noise = params.pop_float('sorting_noise', 0.2)
        instances_per_epoch = params.pop_int('instances_per_epoch', None)
        max_instances_in_memory = params.pop_int('max_instances_in_memory', None)
        params.assert_empty(cls.__name__)

        return cls(adaptive_memory_usage_constant=adaptive_memory_usage_constant,
                   padding_memory_scaling=padding_memory_scaling,
                   maximum_batch_size=maximum_batch_size,
                   biggest_batch_first=biggest_batch_first,
                   batch_size=batch_size,
                   sorting_keys=sorting_keys,
                   padding_noise=padding_noise,
                   instances_per_epoch=instances_per_epoch,
                   max_instances_in_memory=max_instances_in_memory)
开发者ID:Jordan-Sauchuk,项目名称:allennlp,代码行数:23,代码来源:adaptive_iterator.py

示例11: from_params

# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import pop_int [as 别名]
    def from_params(cls,
                    model: Model,
                    serialization_dir: str,
                    iterator: DataIterator,
                    train_data: Iterable[Instance],
                    validation_data: Optional[Iterable[Instance]],
                    params: Params,
                    validation_iterator: DataIterator = None) -> 'Trainer':

        patience = params.pop_int("patience", None)
        validation_metric = params.pop("validation_metric", "-loss")
        num_epochs = params.pop_int("num_epochs", 20)
        cuda_device = params.pop_int("cuda_device", -1)
        grad_norm = params.pop_float("grad_norm", None)
        grad_clipping = params.pop_float("grad_clipping", None)
        lr_scheduler_params = params.pop("learning_rate_scheduler", None)

        if cuda_device >= 0:
            model = model.cuda(cuda_device)
        parameters = [[n, p] for n, p in model.named_parameters() if p.requires_grad]
        optimizer = Optimizer.from_params(parameters, params.pop("optimizer"))

        if lr_scheduler_params:
            scheduler = LearningRateScheduler.from_params(optimizer, lr_scheduler_params)
        else:
            scheduler = None

        num_serialized_models_to_keep = params.pop_int("num_serialized_models_to_keep", 20)
        keep_serialized_model_every_num_seconds = params.pop_int(
                "keep_serialized_model_every_num_seconds", None)
        model_save_interval = params.pop_float("model_save_interval", None)
        summary_interval = params.pop_int("summary_interval", 100)
        histogram_interval = params.pop_int("histogram_interval", None)

        params.assert_empty(cls.__name__)
        return Trainer(model, optimizer, iterator,
                       train_data, validation_data,
                       patience=patience,
                       validation_metric=validation_metric,
                       validation_iterator=validation_iterator,
                       num_epochs=num_epochs,
                       serialization_dir=serialization_dir,
                       cuda_device=cuda_device,
                       grad_norm=grad_norm,
                       grad_clipping=grad_clipping,
                       learning_rate_scheduler=scheduler,
                       num_serialized_models_to_keep=num_serialized_models_to_keep,
                       keep_serialized_model_every_num_seconds=keep_serialized_model_every_num_seconds,
                       model_save_interval=model_save_interval,
                       summary_interval=summary_interval,
                       histogram_interval=histogram_interval)
开发者ID:pyknife,项目名称:allennlp,代码行数:53,代码来源:trainer.py

示例12: from_params

# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import pop_int [as 别名]
    def from_params(cls, params: Params):
        input_dim = params.pop_int('input_dim')
        hidden_dim = params.pop_int('hidden_dim')
        projection_dim = params.pop_int('projection_dim', None)
        feedforward_hidden_dim = params.pop_int("feedforward_hidden_dim")
        num_layers = params.pop_int("num_layers", 2)
        num_attention_heads = params.pop_int('num_attention_heads', 3)
        use_positional_encoding = params.pop_bool('use_positional_encoding', True)
        dropout_prob = params.pop_float("dropout_prob", 0.2)
        params.assert_empty(cls.__name__)

        return cls(input_dim=input_dim,
                   hidden_dim=hidden_dim,
                   feedforward_hidden_dim=feedforward_hidden_dim,
                   projection_dim=projection_dim,
                   num_layers=num_layers,
                   num_attention_heads=num_attention_heads,
                   use_positional_encoding=use_positional_encoding,
                   dropout_prob=dropout_prob)
开发者ID:Jordan-Sauchuk,项目名称:allennlp,代码行数:21,代码来源:stacked_self_attention.py

示例13: from_params

# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import pop_int [as 别名]
    def from_params(cls, vocab: Vocabulary, params: Params) -> 'Embedding':  # type: ignore
        """
        We need the vocabulary here to know how many items we need to embed, and we look for a
        ``vocab_namespace`` key in the parameter dictionary to know which vocabulary to use.  If
        you know beforehand exactly how many embeddings you need, or aren't using a vocabulary
        mapping for the things getting embedded here, then you can pass in the ``num_embeddings``
        key directly, and the vocabulary will be ignored.

        In the configuration file, a file containing pretrained embeddings can be specified
        using the parameter ``"pretrained_file"``.
        It can be the path to a local file or an URL of a (cached) remote file.
        Two formats are supported:

            * hdf5 file - containing an embedding matrix in the form of a torch.Tensor;

            * text file - an utf-8 encoded text file with space separated fields::

                    [word] [dim 1] [dim 2] ...

              The text file can eventually be compressed with gzip, bz2, lzma or zip.
              You can even select a single file inside an archive containing multiple files
              using the URI::

                    "(archive_uri)#file_path_inside_the_archive"

              where ``archive_uri`` can be a file system path or a URL. For example::

                    "(http://nlp.stanford.edu/data/glove.twitter.27B.zip)#glove.twitter.27B.200d.txt"
        """
        # pylint: disable=arguments-differ
        num_embeddings = params.pop_int('num_embeddings', None)
        vocab_namespace = params.pop("vocab_namespace", "tokens")
        if num_embeddings is None:
            num_embeddings = vocab.get_vocab_size(vocab_namespace)
        embedding_dim = params.pop_int('embedding_dim')
        pretrained_file = params.pop("pretrained_file", None)
        projection_dim = params.pop_int("projection_dim", None)
        trainable = params.pop_bool("trainable", True)
        padding_index = params.pop_int('padding_index', None)
        max_norm = params.pop_float('max_norm', None)
        norm_type = params.pop_float('norm_type', 2.)
        scale_grad_by_freq = params.pop_bool('scale_grad_by_freq', False)
        sparse = params.pop_bool('sparse', False)
        params.assert_empty(cls.__name__)

        if pretrained_file:
            # If we're loading a saved model, we don't want to actually read a pre-trained
            # embedding file - the embeddings will just be in our saved weights, and we might not
            # have the original embedding file anymore, anyway.
            weight = _read_pretrained_embeddings_file(pretrained_file,
                                                      embedding_dim,
                                                      vocab,
                                                      vocab_namespace)
        else:
            weight = None

        return cls(num_embeddings=num_embeddings,
                   embedding_dim=embedding_dim,
                   projection_dim=projection_dim,
                   weight=weight,
                   padding_index=padding_index,
                   trainable=trainable,
                   max_norm=max_norm,
                   norm_type=norm_type,
                   scale_grad_by_freq=scale_grad_by_freq,
                   sparse=sparse)
开发者ID:pyknife,项目名称:allennlp,代码行数:68,代码来源:embedding.py

示例14: from_params

# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import pop_int [as 别名]
 def from_params(cls, params: Params) -> "WinobiasReader":
     token_indexers = TokenIndexer.dict_from_params(params.pop("token_indexers", {}))
     max_span_width = params.pop_int("max_span_width")
     lazy = params.pop('lazy', False)
     params.assert_empty(cls.__name__)
     return cls(token_indexers=token_indexers, max_span_width=max_span_width, lazy=lazy)
开发者ID:Jordan-Sauchuk,项目名称:allennlp,代码行数:8,代码来源:winobias.py

示例15: from_params

# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import pop_int [as 别名]
 def from_params(cls, params: Params) -> 'BagOfEmbeddingsEncoder':
     embedding_dim = params.pop_int('embedding_dim')
     averaged = params.pop_bool('averaged', default=None)
     params.assert_empty(cls.__name__)
     return cls(embedding_dim=embedding_dim,
                averaged=averaged)
开发者ID:Jordan-Sauchuk,项目名称:allennlp,代码行数:8,代码来源:boe_encoder.py


注:本文中的allennlp.common.Params.pop_int方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。