本文整理汇总了Python中allennlp.common.Params.pop_float方法的典型用法代码示例。如果您正苦于以下问题:Python Params.pop_float方法的具体用法?Python Params.pop_float怎么用?Python Params.pop_float使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类allennlp.common.Params
的用法示例。
在下文中一共展示了Params.pop_float方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: from_params
# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import pop_float [as 别名]
def from_params(cls,
model: Model,
serialization_dir: str,
iterator: DataIterator,
train_data: Iterable[Instance],
validation_data: Optional[Iterable[Instance]],
params: Params,
validation_iterator: DataIterator = None) -> 'Trainer':
patience = params.pop_int("patience", None)
validation_metric = params.pop("validation_metric", "-loss")
num_epochs = params.pop_int("num_epochs", 20)
cuda_device = params.pop_int("cuda_device", -1)
grad_norm = params.pop_float("grad_norm", None)
grad_clipping = params.pop_float("grad_clipping", None)
lr_scheduler_params = params.pop("learning_rate_scheduler", None)
if cuda_device >= 0:
model = model.cuda(cuda_device)
parameters = [[n, p] for n, p in model.named_parameters() if p.requires_grad]
optimizer = Optimizer.from_params(parameters, params.pop("optimizer"))
if lr_scheduler_params:
scheduler = LearningRateScheduler.from_params(optimizer, lr_scheduler_params)
else:
scheduler = None
num_serialized_models_to_keep = params.pop_int("num_serialized_models_to_keep", 20)
keep_serialized_model_every_num_seconds = params.pop_int(
"keep_serialized_model_every_num_seconds", None)
model_save_interval = params.pop_float("model_save_interval", None)
summary_interval = params.pop_int("summary_interval", 100)
histogram_interval = params.pop_int("histogram_interval", None)
params.assert_empty(cls.__name__)
return Trainer(model, optimizer, iterator,
train_data, validation_data,
patience=patience,
validation_metric=validation_metric,
validation_iterator=validation_iterator,
num_epochs=num_epochs,
serialization_dir=serialization_dir,
cuda_device=cuda_device,
grad_norm=grad_norm,
grad_clipping=grad_clipping,
learning_rate_scheduler=scheduler,
num_serialized_models_to_keep=num_serialized_models_to_keep,
keep_serialized_model_every_num_seconds=keep_serialized_model_every_num_seconds,
model_save_interval=model_save_interval,
summary_interval=summary_interval,
histogram_interval=histogram_interval)
示例2: from_params
# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import pop_float [as 别名]
def from_params(cls, vocab: Vocabulary, params: Params) -> 'ElmoTokenEmbedder': # type: ignore
# pylint: disable=arguments-differ
params.add_file_to_archive('options_file')
params.add_file_to_archive('weight_file')
options_file = params.pop('options_file')
weight_file = params.pop('weight_file')
requires_grad = params.pop('requires_grad', False)
do_layer_norm = params.pop_bool('do_layer_norm', False)
dropout = params.pop_float("dropout", 0.5)
namespace_to_cache = params.pop("namespace_to_cache", None)
if namespace_to_cache is not None:
vocab_to_cache = list(vocab.get_token_to_index_vocabulary(namespace_to_cache).keys())
else:
vocab_to_cache = None
projection_dim = params.pop_int("projection_dim", None)
scalar_mix_parameters = params.pop('scalar_mix_parameters', None)
params.assert_empty(cls.__name__)
return cls(options_file=options_file,
weight_file=weight_file,
do_layer_norm=do_layer_norm,
dropout=dropout,
requires_grad=requires_grad,
projection_dim=projection_dim,
vocab_to_cache=vocab_to_cache,
scalar_mix_parameters=scalar_mix_parameters)
示例3: from_params
# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import pop_float [as 别名]
def from_params(cls, vocab: Vocabulary, params: Params) -> 'Embedding':
"""
We need the vocabulary here to know how many items we need to embed, and we look for a
``vocab_namespace`` key in the parameter dictionary to know which vocabulary to use. If
you know beforehand exactly how many embeddings you need, or aren't using a vocabulary
mapping for the things getting embedded here, then you can pass in the ``num_embeddings``
key directly, and the vocabulary will be ignored.
"""
num_embeddings = params.pop_int('num_embeddings', None)
vocab_namespace = params.pop("vocab_namespace", "tokens")
if num_embeddings is None:
num_embeddings = vocab.get_vocab_size(vocab_namespace)
embedding_dim = params.pop_int('embedding_dim')
pretrained_file = params.pop("pretrained_file", None)
projection_dim = params.pop_int("projection_dim", None)
trainable = params.pop_bool("trainable", True)
padding_index = params.pop_int('padding_index', None)
max_norm = params.pop_float('max_norm', None)
norm_type = params.pop_float('norm_type', 2.)
scale_grad_by_freq = params.pop_bool('scale_grad_by_freq', False)
sparse = params.pop_bool('sparse', False)
params.assert_empty(cls.__name__)
if pretrained_file:
# If we're loading a saved model, we don't want to actually read a pre-trained
# embedding file - the embeddings will just be in our saved weights, and we might not
# have the original embedding file anymore, anyway.
weight = _read_pretrained_embedding_file(pretrained_file,
embedding_dim,
vocab,
vocab_namespace)
else:
weight = None
return cls(num_embeddings=num_embeddings,
embedding_dim=embedding_dim,
projection_dim=projection_dim,
weight=weight,
padding_index=padding_index,
trainable=trainable,
max_norm=max_norm,
norm_type=norm_type,
scale_grad_by_freq=scale_grad_by_freq,
sparse=sparse)
示例4: from_params
# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import pop_float [as 别名]
def from_params(cls, vocab: Vocabulary, params: Params) -> 'ElmoTokenEmbedder':
params.add_file_to_archive('options_file')
params.add_file_to_archive('weight_file')
options_file = params.pop('options_file')
weight_file = params.pop('weight_file')
requires_grad = params.pop('requires_grad', False)
do_layer_norm = params.pop_bool('do_layer_norm', False)
dropout = params.pop_float("dropout", 0.5)
params.assert_empty(cls.__name__)
return cls(options_file, weight_file, do_layer_norm, dropout, requires_grad=requires_grad)
示例5: from_params
# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import pop_float [as 别名]
def from_params(cls, vocab: Vocabulary, params: Params) -> 'TokenCharactersEncoder': # type: ignore
# pylint: disable=arguments-differ
embedding_params: Params = params.pop("embedding")
# Embedding.from_params() uses "tokens" as the default namespace, but we need to change
# that to be "token_characters" by default.
embedding_params.setdefault("vocab_namespace", "token_characters")
embedding = Embedding.from_params(vocab, embedding_params)
encoder_params: Params = params.pop("encoder")
encoder = Seq2VecEncoder.from_params(encoder_params)
dropout = params.pop_float("dropout", 0.0)
params.assert_empty(cls.__name__)
return cls(embedding, encoder, dropout)
示例6: from_params
# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import pop_float [as 别名]
def from_params(cls, params: Params) -> 'Elmo':
# Add files to archive
params.add_file_to_archive('options_file')
params.add_file_to_archive('weight_file')
options_file = params.pop('options_file')
weight_file = params.pop('weight_file')
requires_grad = params.pop('requires_grad', False)
num_output_representations = params.pop('num_output_representations')
do_layer_norm = params.pop_bool('do_layer_norm', False)
dropout = params.pop_float('dropout', 0.5)
params.assert_empty(cls.__name__)
return cls(options_file=options_file,
weight_file=weight_file,
num_output_representations=num_output_representations,
requires_grad=requires_grad,
do_layer_norm=do_layer_norm,
dropout=dropout)
示例7: from_params
# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import pop_float [as 别名]
def from_params(cls, params: Params):
input_dim = params.pop_int('input_dim')
hidden_dim = params.pop_int('hidden_dim')
projection_dim = params.pop_int('projection_dim', None)
feedforward_hidden_dim = params.pop_int("feedforward_hidden_dim")
num_layers = params.pop_int("num_layers", 2)
num_attention_heads = params.pop_int('num_attention_heads', 3)
use_positional_encoding = params.pop_bool('use_positional_encoding', True)
dropout_prob = params.pop_float("dropout_prob", 0.2)
params.assert_empty(cls.__name__)
return cls(input_dim=input_dim,
hidden_dim=hidden_dim,
feedforward_hidden_dim=feedforward_hidden_dim,
projection_dim=projection_dim,
num_layers=num_layers,
num_attention_heads=num_attention_heads,
use_positional_encoding=use_positional_encoding,
dropout_prob=dropout_prob)
示例8: from_params
# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import pop_float [as 别名]
def from_params(cls, params: Params) -> 'AdaptiveIterator':
adaptive_memory_usage_constant = params.pop_int('adaptive_memory_usage_constant')
padding_memory_scaling = params.pop('padding_memory_scaling')
maximum_batch_size = params.pop_int('maximum_batch_size', 10000)
biggest_batch_first = params.pop_bool('biggest_batch_first', False)
batch_size = params.pop_int('batch_size', None)
sorting_keys = params.pop('sorting_keys', None)
padding_noise = params.pop_float('sorting_noise', 0.2)
instances_per_epoch = params.pop_int('instances_per_epoch', None)
max_instances_in_memory = params.pop_int('max_instances_in_memory', None)
params.assert_empty(cls.__name__)
return cls(adaptive_memory_usage_constant=adaptive_memory_usage_constant,
padding_memory_scaling=padding_memory_scaling,
maximum_batch_size=maximum_batch_size,
biggest_batch_first=biggest_batch_first,
batch_size=batch_size,
sorting_keys=sorting_keys,
padding_noise=padding_noise,
instances_per_epoch=instances_per_epoch,
max_instances_in_memory=max_instances_in_memory)
示例9: from_params
# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import pop_float [as 别名]
def from_params(cls, vocab, params: Params) -> 'SimpleSeq2Seq':
source_embedder_params = params.pop("source_embedder")
source_embedder = TextFieldEmbedder.from_params(vocab, source_embedder_params)
encoder = Seq2SeqEncoder.from_params(params.pop("encoder"))
max_decoding_steps = params.pop("max_decoding_steps")
target_namespace = params.pop("target_namespace", "tokens")
# If no attention function is specified, we should not use attention, not attention with
# default similarity function.
attention_function_type = params.pop("attention_function", None)
if attention_function_type is not None:
attention_function = SimilarityFunction.from_params(attention_function_type)
else:
attention_function = None
scheduled_sampling_ratio = params.pop_float("scheduled_sampling_ratio", 0.0)
params.assert_empty(cls.__name__)
return cls(vocab,
source_embedder=source_embedder,
encoder=encoder,
max_decoding_steps=max_decoding_steps,
target_namespace=target_namespace,
attention_function=attention_function,
scheduled_sampling_ratio=scheduled_sampling_ratio)
示例10: from_params
# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import pop_float [as 别名]
def from_params(cls, params: Params) -> 'Elmo':
# Add files to archive
params.add_file_to_archive('options_file')
params.add_file_to_archive('weight_file')
options_file = params.pop('options_file')
weight_file = params.pop('weight_file')
requires_grad = params.pop('requires_grad', False)
num_output_representations = params.pop('num_output_representations')
do_layer_norm = params.pop_bool('do_layer_norm', False)
keep_sentence_boundaries = params.pop_bool('keep_sentence_boundaries', False)
dropout = params.pop_float('dropout', 0.5)
scalar_mix_parameters = params.pop('scalar_mix_parameters', None)
params.assert_empty(cls.__name__)
return cls(options_file=options_file,
weight_file=weight_file,
num_output_representations=num_output_representations,
requires_grad=requires_grad,
do_layer_norm=do_layer_norm,
keep_sentence_boundaries=keep_sentence_boundaries,
dropout=dropout,
scalar_mix_parameters=scalar_mix_parameters)
示例11: from_params
# 需要导入模块: from allennlp.common import Params [as 别名]
# 或者: from allennlp.common.Params import pop_float [as 别名]
def from_params(cls, vocab: Vocabulary, params: Params) -> 'Embedding': # type: ignore
"""
We need the vocabulary here to know how many items we need to embed, and we look for a
``vocab_namespace`` key in the parameter dictionary to know which vocabulary to use. If
you know beforehand exactly how many embeddings you need, or aren't using a vocabulary
mapping for the things getting embedded here, then you can pass in the ``num_embeddings``
key directly, and the vocabulary will be ignored.
In the configuration file, a file containing pretrained embeddings can be specified
using the parameter ``"pretrained_file"``.
It can be the path to a local file or an URL of a (cached) remote file.
Two formats are supported:
* hdf5 file - containing an embedding matrix in the form of a torch.Tensor;
* text file - an utf-8 encoded text file with space separated fields::
[word] [dim 1] [dim 2] ...
The text file can eventually be compressed with gzip, bz2, lzma or zip.
You can even select a single file inside an archive containing multiple files
using the URI::
"(archive_uri)#file_path_inside_the_archive"
where ``archive_uri`` can be a file system path or a URL. For example::
"(http://nlp.stanford.edu/data/glove.twitter.27B.zip)#glove.twitter.27B.200d.txt"
"""
# pylint: disable=arguments-differ
num_embeddings = params.pop_int('num_embeddings', None)
vocab_namespace = params.pop("vocab_namespace", "tokens")
if num_embeddings is None:
num_embeddings = vocab.get_vocab_size(vocab_namespace)
embedding_dim = params.pop_int('embedding_dim')
pretrained_file = params.pop("pretrained_file", None)
projection_dim = params.pop_int("projection_dim", None)
trainable = params.pop_bool("trainable", True)
padding_index = params.pop_int('padding_index', None)
max_norm = params.pop_float('max_norm', None)
norm_type = params.pop_float('norm_type', 2.)
scale_grad_by_freq = params.pop_bool('scale_grad_by_freq', False)
sparse = params.pop_bool('sparse', False)
params.assert_empty(cls.__name__)
if pretrained_file:
# If we're loading a saved model, we don't want to actually read a pre-trained
# embedding file - the embeddings will just be in our saved weights, and we might not
# have the original embedding file anymore, anyway.
weight = _read_pretrained_embeddings_file(pretrained_file,
embedding_dim,
vocab,
vocab_namespace)
else:
weight = None
return cls(num_embeddings=num_embeddings,
embedding_dim=embedding_dim,
projection_dim=projection_dim,
weight=weight,
padding_index=padding_index,
trainable=trainable,
max_norm=max_norm,
norm_type=norm_type,
scale_grad_by_freq=scale_grad_by_freq,
sparse=sparse)