本文整理汇总了Python中allennlp.common.Params类的典型用法代码示例。如果您正苦于以下问题:Python Params类的具体用法?Python Params怎么用?Python Params使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Params类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: from_params
def from_params(cls, vocab: Vocabulary, params: Params) -> 'DecomposableAttention':
embedder_params = params.pop("text_field_embedder")
text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)
premise_encoder_params = params.pop("premise_encoder", None)
if premise_encoder_params is not None:
premise_encoder = Seq2SeqEncoder.from_params(premise_encoder_params)
else:
premise_encoder = None
hypothesis_encoder_params = params.pop("hypothesis_encoder", None)
if hypothesis_encoder_params is not None:
hypothesis_encoder = Seq2SeqEncoder.from_params(hypothesis_encoder_params)
else:
hypothesis_encoder = None
attend_feedforward = FeedForward.from_params(params.pop('attend_feedforward'))
similarity_function = SimilarityFunction.from_params(params.pop("similarity_function"))
compare_feedforward = FeedForward.from_params(params.pop('compare_feedforward'))
aggregate_feedforward = FeedForward.from_params(params.pop('aggregate_feedforward'))
initializer = InitializerApplicator.from_params(params.pop('initializer', []))
regularizer = RegularizerApplicator.from_params(params.pop('regularizer', []))
params.assert_empty(cls.__name__)
return cls(vocab=vocab,
text_field_embedder=text_field_embedder,
attend_feedforward=attend_feedforward,
similarity_function=similarity_function,
compare_feedforward=compare_feedforward,
aggregate_feedforward=aggregate_feedforward,
premise_encoder=premise_encoder,
hypothesis_encoder=hypothesis_encoder,
initializer=initializer,
regularizer=regularizer)
示例2: from_params
def from_params(self, params: Params) -> PytorchSeq2SeqWrapper:
if not params.pop_bool('batch_first', True):
raise ConfigurationError("Our encoder semantics assumes batch is always first!")
if self._module_class in self.PYTORCH_MODELS:
params['batch_first'] = True
module = self._module_class(**params.as_dict())
return PytorchSeq2SeqWrapper(module)
示例3: create_serialization_dir
def create_serialization_dir(params: Params, serialization_dir: str, recover: bool) -> None:
"""
This function creates the serialization directory if it doesn't exist. If it already exists,
then it verifies that we're recovering from a training with an identical configuration.
Parameters
----------
params: ``Params``
A parameter object specifying an AllenNLP Experiment.
serialization_dir: ``str``
The directory in which to save results and logs.
recover: ``bool``
If ``True``, we will try to recover from an existing serialization directory, and crash if
the directory doesn't exist, or doesn't match the configuration we're given.
"""
if os.path.exists(serialization_dir):
if serialization_dir == '/output':
# Special-casing the beaker output directory, which will already exist when training
# starts.
return
if not recover:
raise ConfigurationError(f"Serialization directory ({serialization_dir}) already exists. "
f"Specify --recover to recover training from existing output.")
logger.info(f"Recovering from prior training at {serialization_dir}.")
recovered_config_file = os.path.join(serialization_dir, CONFIG_NAME)
if not os.path.exists(recovered_config_file):
raise ConfigurationError("The serialization directory already exists but doesn't "
"contain a config.json. You probably gave the wrong directory.")
else:
loaded_params = Params.from_file(recovered_config_file)
# Check whether any of the training configuration differs from the configuration we are
# resuming. If so, warn the user that training may fail.
fail = False
flat_params = params.as_flat_dict()
flat_loaded = loaded_params.as_flat_dict()
for key in flat_params.keys() - flat_loaded.keys():
logger.error(f"Key '{key}' found in training configuration but not in the serialization "
f"directory we're recovering from.")
fail = True
for key in flat_loaded.keys() - flat_params.keys():
logger.error(f"Key '{key}' found in the serialization directory we're recovering from "
f"but not in the training config.")
fail = True
for key in flat_params.keys():
if flat_params.get(key, None) != flat_loaded.get(key, None):
logger.error(f"Value for '{key}' in training configuration does not match that the value in "
f"the serialization directory we're recovering from: "
f"{flat_params[key]} != {flat_loaded[key]}")
fail = True
if fail:
raise ConfigurationError("Training configuration does not match the configuration we're "
"recovering from.")
else:
if recover:
raise ConfigurationError(f"--recover specified but serialization_dir ({serialization_dir}) "
"does not exist. There is nothing to recover from.")
os.makedirs(serialization_dir)
示例4: datasets_from_params
def datasets_from_params(params: Params) -> Dict[str, Iterable[Instance]]:
"""
Load all the datasets specified by the config.
"""
dataset_reader = DatasetReader.from_params(params.pop('dataset_reader'))
validation_dataset_reader_params = params.pop("validation_dataset_reader", None)
validation_and_test_dataset_reader: DatasetReader = dataset_reader
if validation_dataset_reader_params is not None:
logger.info("Using a separate dataset reader to load validation and test data.")
validation_and_test_dataset_reader = DatasetReader.from_params(validation_dataset_reader_params)
train_data_path = params.pop('train_data_path')
logger.info("Reading training data from %s", train_data_path)
train_data = dataset_reader.read(train_data_path)
datasets: Dict[str, Iterable[Instance]] = {"train": train_data}
validation_data_path = params.pop('validation_data_path', None)
if validation_data_path is not None:
logger.info("Reading validation data from %s", validation_data_path)
validation_data = validation_and_test_dataset_reader.read(validation_data_path)
datasets["validation"] = validation_data
test_data_path = params.pop("test_data_path", None)
if test_data_path is not None:
logger.info("Reading test data from %s", test_data_path)
test_data = validation_and_test_dataset_reader.read(test_data_path)
datasets["test"] = test_data
return datasets
示例5: from_params
def from_params(cls, params: Params) -> 'BasicIterator':
batch_size = params.pop_int('batch_size', 32)
instances_per_epoch = params.pop_int('instances_per_epoch', None)
max_instances_in_memory = params.pop_int('max_instances_in_memory', None)
params.assert_empty(cls.__name__)
return cls(batch_size=batch_size,
instances_per_epoch=instances_per_epoch,
max_instances_in_memory=max_instances_in_memory)
示例6: from_params
def from_params(cls, vocab: Vocabulary, params: Params) -> 'BasicTextFieldEmbedder':
token_embedders = {}
keys = list(params.keys())
for key in keys:
embedder_params = params.pop(key)
token_embedders[key] = TokenEmbedder.from_params(vocab, embedder_params)
params.assert_empty(cls.__name__)
return cls(token_embedders)
示例7: from_params
def from_params(cls, params: Params) -> 'SnliReader':
tokenizer = Tokenizer.from_params(params.pop('tokenizer', {}))
token_indexers = TokenIndexer.dict_from_params(params.pop('token_indexers', {}))
lazy = params.pop('lazy', False)
params.assert_empty(cls.__name__)
return SnliReader(tokenizer=tokenizer,
token_indexers=token_indexers,
lazy=lazy)
示例8: from_params
def from_params(cls, params: Params) -> 'SrlReader':
token_indexers = TokenIndexer.dict_from_params(params.pop('token_indexers', {}))
domain_identifier = params.pop("domain_identifier", None)
lazy = params.pop('lazy', False)
params.assert_empty(cls.__name__)
return SrlReader(token_indexers=token_indexers,
domain_identifier=domain_identifier,
lazy=lazy)
示例9: from_params
def from_params(cls, params: Params) -> 'PennTreeBankConstituencySpanDatasetReader':
token_indexers = TokenIndexer.dict_from_params(params.pop('token_indexers', {}))
use_pos_tags = params.pop('use_pos_tags', True)
lazy = params.pop('lazy', False)
params.assert_empty(cls.__name__)
return PennTreeBankConstituencySpanDatasetReader(token_indexers=token_indexers,
use_pos_tags=use_pos_tags,
lazy=lazy)
示例10: from_params
def from_params(cls, params: Params) -> 'B':
params.add_file_to_archive("filename")
filename = params.pop("filename")
c_params = params.pop("c")
c = C.from_params(c_params)
return cls(filename, c)
示例11: from_params
def from_params(cls, params: Params) -> 'LinearSimilarity':
tensor_1_dim = params.pop_int("tensor_1_dim")
tensor_2_dim = params.pop_int("tensor_2_dim")
combination = params.pop("combination", "x,y")
activation = Activation.by_name(params.pop("activation", "linear"))()
params.assert_empty(cls.__name__)
return cls(tensor_1_dim=tensor_1_dim,
tensor_2_dim=tensor_2_dim,
combination=combination,
activation=activation)
示例12: from_params
def from_params(cls, vocab: Vocabulary, params: Params) -> 'SpanConstituencyParser':
embedder_params = params.pop("text_field_embedder")
text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)
span_extractor = SpanExtractor.from_params(params.pop("span_extractor"))
encoder = Seq2SeqEncoder.from_params(params.pop("encoder"))
feed_forward_params = params.pop("feedforward", None)
if feed_forward_params is not None:
feedforward_layer = FeedForward.from_params(feed_forward_params)
else:
feedforward_layer = None
pos_tag_embedding_params = params.pop("pos_tag_embedding", None)
if pos_tag_embedding_params is not None:
pos_tag_embedding = Embedding.from_params(vocab, pos_tag_embedding_params)
else:
pos_tag_embedding = None
initializer = InitializerApplicator.from_params(params.pop('initializer', []))
regularizer = RegularizerApplicator.from_params(params.pop('regularizer', []))
evalb_directory_path = params.pop("evalb_directory_path", None)
params.assert_empty(cls.__name__)
return cls(vocab=vocab,
text_field_embedder=text_field_embedder,
span_extractor=span_extractor,
encoder=encoder,
feedforward_layer=feedforward_layer,
pos_tag_embedding=pos_tag_embedding,
initializer=initializer,
regularizer=regularizer,
evalb_directory_path=evalb_directory_path)
示例13: from_params
def from_params(cls, vocab: Vocabulary, params: Params) -> 'TokenCharactersEncoder': # type: ignore
# pylint: disable=arguments-differ
embedding_params: Params = params.pop("embedding")
# Embedding.from_params() uses "tokens" as the default namespace, but we need to change
# that to be "token_characters" by default.
embedding_params.setdefault("vocab_namespace", "token_characters")
embedding = Embedding.from_params(vocab, embedding_params)
encoder_params: Params = params.pop("encoder")
encoder = Seq2VecEncoder.from_params(encoder_params)
dropout = params.pop_float("dropout", 0.0)
params.assert_empty(cls.__name__)
return cls(embedding, encoder, dropout)
示例14: from_params
def from_params(cls, vocab: Vocabulary, params: Params) -> 'SimpleTagger':
embedder_params = params.pop("text_field_embedder")
text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)
encoder = Seq2SeqEncoder.from_params(params.pop("encoder"))
initializer = InitializerApplicator.from_params(params.pop('initializer', []))
regularizer = RegularizerApplicator.from_params(params.pop('regularizer', []))
params.assert_empty(cls.__name__)
return cls(vocab=vocab,
text_field_embedder=text_field_embedder,
encoder=encoder,
initializer=initializer,
regularizer=regularizer)
示例15: test_mismatched_dimensions_raise_configuration_errors
def test_mismatched_dimensions_raise_configuration_errors(self):
params = Params.from_file(self.param_file)
# Make the input_dim to the first feedforward_layer wrong - it should be 2.
params["model"]["attend_feedforward"]["input_dim"] = 10
with pytest.raises(ConfigurationError):
Model.from_params(vocab=self.vocab, params=params.pop("model"))
params = Params.from_file(self.param_file)
# Make the projection output_dim of the last layer wrong - it should be
# 3, equal to the number of classes.
params["model"]["aggregate_feedforward"]["output_dim"] = 10
with pytest.raises(ConfigurationError):
Model.from_params(vocab=self.vocab, params=params.pop("model"))