本文整理汇总了Python中allennlp.common.params.Params.from_file方法的典型用法代码示例。如果您正苦于以下问题:Python Params.from_file方法的具体用法?Python Params.from_file怎么用?Python Params.from_file使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类allennlp.common.params.Params
的用法示例。
在下文中一共展示了Params.from_file方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_fine_tune_nograd_regex
# 需要导入模块: from allennlp.common.params import Params [as 别名]
# 或者: from allennlp.common.params.Params import from_file [as 别名]
def test_fine_tune_nograd_regex(self):
original_model = load_archive(self.model_archive).model
name_parameters_original = dict(original_model.named_parameters())
regex_lists = [[],
[".*attend_feedforward.*", ".*token_embedder.*"],
[".*compare_feedforward.*"]]
for regex_list in regex_lists:
params = Params.from_file(self.config_file)
params["trainer"]["no_grad"] = regex_list
shutil.rmtree(self.serialization_dir, ignore_errors=True)
tuned_model = fine_tune_model(model=original_model,
params=params,
serialization_dir=self.serialization_dir)
# If regex is matched, parameter name should have requires_grad False
# If regex is matched, parameter name should have same requires_grad
# as the originally loaded model
for name, parameter in tuned_model.named_parameters():
if any(re.search(regex, name) for regex in regex_list):
assert not parameter.requires_grad
else:
assert parameter.requires_grad \
== name_parameters_original[name].requires_grad
# If all parameters have requires_grad=False, then error.
with pytest.raises(Exception) as _:
params = Params.from_file(self.config_file)
params["trainer"]["no_grad"] = ["*"]
shutil.rmtree(self.serialization_dir, ignore_errors=True)
tuned_model = fine_tune_model(model=original_model,
params=params,
serialization_dir=self.serialization_dir)
示例2: test_known_configs
# 需要导入模块: from allennlp.common.params import Params [as 别名]
# 或者: from allennlp.common.params.Params import from_file [as 别名]
def test_known_configs(self):
configs = os.listdir(self.PROJECT_ROOT / "training_config")
# Our configs use environment variable substitution, and the _jsonnet parser
# will fail if we don't pass it correct environment variables.
forced_variables = [
# constituency parser
'PTB_TRAIN_PATH', 'PTB_DEV_PATH', 'PTB_TEST_PATH',
# srl_elmo_5.5B
'SRL_TRAIN_DATA_PATH', 'SRL_VALIDATION_DATA_PATH',
# coref
'COREF_TRAIN_DATA_PATH', 'COREF_DEV_DATA_PATH', 'COREF_TEST_DATA_PATH',
# ner
'NER_TRAIN_DATA_PATH', 'NER_TEST_A_PATH', 'NER_TEST_B_PATH'
]
for var in forced_variables:
os.environ[var] = os.environ.get(var) or str(self.TEST_DIR)
for config in configs:
try:
Params.from_file(self.PROJECT_ROOT / "training_config" / config)
except Exception as e:
raise AssertionError(f"unable to load params for {config}, because {e}")
for var in forced_variables:
if os.environ[var] == str(self.TEST_DIR):
del os.environ[var]
示例3: test_regexes_with_backslashes
# 需要导入模块: from allennlp.common.params import Params [as 别名]
# 或者: from allennlp.common.params.Params import from_file [as 别名]
def test_regexes_with_backslashes(self):
bad_regex = self.TEST_DIR / 'bad_regex.jsonnet'
good_regex = self.TEST_DIR / 'good_regex.jsonnet'
with open(bad_regex, 'w') as f:
f.write(r'{"myRegex": "a\.b"}')
with open(good_regex, 'w') as f:
f.write(r'{"myRegex": "a\\.b"}')
with pytest.raises(RuntimeError):
Params.from_file(bad_regex)
params = Params.from_file(good_regex)
regex = params['myRegex']
assert re.match(regex, "a.b")
assert not re.match(regex, "a-b")
# Check roundtripping
good_regex2 = self.TEST_DIR / 'good_regex2.jsonnet'
with open(good_regex2, 'w') as f:
f.write(json.dumps(params.as_dict()))
params2 = Params.from_file(good_regex2)
assert params.as_dict() == params2.as_dict()
示例4: test_mismatching_contextualizer_unidirectionality_throws_configuration_error
# 需要导入模块: from allennlp.common.params import Params [as 别名]
# 或者: from allennlp.common.params.Params import from_file [as 别名]
def test_mismatching_contextualizer_unidirectionality_throws_configuration_error(self):
params = Params.from_file(self.param_file)
# Make the contextualizer unidirectionality wrong - it should be
# False to match the language model.
params["model"]["contextualizer"]["bidirectional"] = (not self.bidirectional)
with pytest.raises(ConfigurationError):
Model.from_params(vocab=self.vocab, params=params.get("model"))
示例5: test_mismatching_dimensions_throws_configuration_error
# 需要导入模块: from allennlp.common.params import Params [as 别名]
# 或者: from allennlp.common.params.Params import from_file [as 别名]
def test_mismatching_dimensions_throws_configuration_error(self):
params = Params.from_file(self.param_file)
# Make the phrase layer wrong - it should be 150 to match
# the embedding + binary feature dimensions.
params["model"]["encoder"]["input_size"] = 10
with pytest.raises(ConfigurationError):
Model.from_params(vocab=self.vocab, params=params.pop("model"))
示例6: main
# 需要导入模块: from allennlp.common.params import Params [as 别名]
# 或者: from allennlp.common.params.Params import from_file [as 别名]
def main(param_file: str, extra_beaker_commands: List[str]):
ecr_repository = "896129387501.dkr.ecr.us-west-2.amazonaws.com"
commit = subprocess.check_output(["git", "rev-parse", "HEAD"], universal_newlines=True).strip()
image = f"{ecr_repository}/allennlp/allennlp:{commit}"
overrides = ""
# Reads params and sets environment.
params = Params.from_file(param_file, overrides)
flat_params = params.as_flat_dict()
env = []
for k, v in flat_params.items():
k = str(k).replace('.', '_')
env.append(f"--env={k}={v}")
# If the git repository is dirty, add a random hash.
result = subprocess.run('git diff-index --quiet HEAD --', shell=True)
if result.returncode != 0:
dirty_hash = "%x" % random_int
image += "-" + dirty_hash
# Get temporary ecr login. For this command to work, you need the python awscli
# package with a version more recent than 1.11.91.
print("Generating ECR Login Command")
login_command = subprocess.check_output('aws --region=us-west-2 ecr get-login --no-include-email', shell=True)
print("Logging into ECR")
subprocess.run(login_command, shell=True, check=True)
print(f"Building the Docker image ({image})")
subprocess.run(f'docker build -t {image} .', shell=True, check=True)
print(f"Pushing the Docker image ({image})")
subprocess.run(f'docker push {image}', shell=True, check=True)
config_dataset_id = subprocess.check_output(f'beaker dataset create --quiet {param_file}', shell=True, universal_newlines=True).strip()
filename = os.path.basename(param_file)
allennlp_command = [
"python",
"-m",
"allennlp.run",
"train",
"/config.json",
"-s",
"/output",
"--file-friendly-logging"
]
# TODO(michaels): add back in the env list.
# Presently this makes the Beaker UI unusably cluttered.
command = [
'/usr/local/bin/beaker',
'experiment',
'run',
'--result-path',
'/output',
"--source",
f"{config_dataset_id}:/config.json"] + env + extra_beaker_commands + [image] + allennlp_command
print(' '.join(command))
subprocess.run(command, check=True)
示例7: test_elmo_but_no_set_flags_throws_configuration_error
# 需要导入模块: from allennlp.common.params import Params [as 别名]
# 或者: from allennlp.common.params.Params import from_file [as 别名]
def test_elmo_but_no_set_flags_throws_configuration_error(self):
# pylint: disable=line-too-long
params = Params.from_file(self.FIXTURES_ROOT / 'biattentive_classification_network' / 'elmo_experiment.json')
# Elmo is specified in the model, but set both flags to false.
params["model"]["use_input_elmo"] = False
params["model"]["use_integrator_output_elmo"] = False
with pytest.raises(ConfigurationError):
Model.from_params(vocab=self.vocab, params=params.get("model"))
示例8: test_load_from_file
# 需要导入模块: from allennlp.common.params import Params [as 别名]
# 或者: from allennlp.common.params.Params import from_file [as 别名]
def test_load_from_file(self):
filename = self.FIXTURES_ROOT / 'bidaf' / 'experiment.json'
params = Params.from_file(filename)
assert "dataset_reader" in params
assert "trainer" in params
model_params = params.pop("model")
assert model_params.pop("type") == "bidaf"
示例9: test_fine_tune_does_not_expand_vocab_by_default
# 需要导入模块: from allennlp.common.params import Params [as 别名]
# 或者: from allennlp.common.params.Params import from_file [as 别名]
def test_fine_tune_does_not_expand_vocab_by_default(self):
params = Params.from_file(self.config_file)
# snli2 has a new token in it
params["train_data_path"] = str(self.FIXTURES_ROOT / 'data' / 'snli2.jsonl')
model = load_archive(self.model_archive).model
# By default, no vocab expansion.
fine_tune_model(model, params, self.serialization_dir)
示例10: test_fine_tune_runtime_errors_with_vocab_expansion
# 需要导入模块: from allennlp.common.params import Params [as 别名]
# 或者: from allennlp.common.params.Params import from_file [as 别名]
def test_fine_tune_runtime_errors_with_vocab_expansion(self):
params = Params.from_file(self.config_file)
params["train_data_path"] = str(self.FIXTURES_ROOT / 'data' / 'snli2.jsonl')
model = load_archive(self.model_archive).model
# If we do vocab expansion, we get a runtime error because of the embedding.
with pytest.raises(RuntimeError):
fine_tune_model(model, params, self.serialization_dir, extend_vocab=True)
示例11: test_env_var_substitution
# 需要导入模块: from allennlp.common.params import Params [as 别名]
# 或者: from allennlp.common.params.Params import from_file [as 别名]
def test_env_var_substitution(self):
substitutor = self.TEST_DIR / 'substitutor.jsonnet'
key = 'TEST_ENV_VAR_SUBSTITUTION'
assert os.environ.get(key) is None
with open(substitutor, 'w') as f:
f.write(f'{{"path": std.extVar("{key}")}}')
# raises without environment variable set
with pytest.raises(RuntimeError):
Params.from_file(substitutor)
os.environ[key] = "PERFECT"
params = Params.from_file(substitutor)
assert params['path'] == "PERFECT"
del os.environ[key]
示例12: make_vocab_from_args
# 需要导入模块: from allennlp.common.params import Params [as 别名]
# 或者: from allennlp.common.params.Params import from_file [as 别名]
def make_vocab_from_args(args: argparse.Namespace):
"""
Just converts from an ``argparse.Namespace`` object to params.
"""
parameter_path = args.param_path
overrides = args.overrides
params = Params.from_file(parameter_path, overrides)
make_vocab_from_params(params)
示例13: dry_run_from_args
# 需要导入模块: from allennlp.common.params import Params [as 别名]
# 或者: from allennlp.common.params.Params import from_file [as 别名]
def dry_run_from_args(args: argparse.Namespace):
"""
Just converts from an ``argparse.Namespace`` object to params.
"""
parameter_path = args.param_path
serialization_dir = args.serialization_dir
overrides = args.overrides
params = Params.from_file(parameter_path, overrides)
dry_run_from_params(params, serialization_dir)
示例14: test_overrides
# 需要导入模块: from allennlp.common.params import Params [as 别名]
# 或者: from allennlp.common.params.Params import from_file [as 别名]
def test_overrides(self):
filename = self.FIXTURES_ROOT / 'bidaf' / 'experiment.json'
overrides = '{ "train_data_path": "FOO", "model": { "type": "BAR" },'\
'"model.text_field_embedder.tokens.type": "BAZ" }'
params = Params.from_file(filename, overrides)
assert "dataset_reader" in params
assert "trainer" in params
assert params["train_data_path"] == "FOO"
model_params = params.pop("model")
assert model_params.pop("type") == "BAR"
assert model_params["text_field_embedder"]["tokens"]["type"] == "BAZ"
示例15: setUp
# 需要导入模块: from allennlp.common.params import Params [as 别名]
# 或者: from allennlp.common.params.Params import from_file [as 别名]
def setUp(self):
super().setUp()
param_file = self.FIXTURES_ROOT / 'simple_tagger' / 'experiment_with_regularization.json'
self.set_up_model(param_file,
self.FIXTURES_ROOT / 'data' / 'sequence_tagging.tsv')
params = Params.from_file(param_file)
self.reader = DatasetReader.from_params(params['dataset_reader'])
self.iterator = DataIterator.from_params(params['iterator'])
self.trainer = Trainer.from_params(
self.model,
self.TEST_DIR,
self.iterator,
self.dataset,
None,
params.get('trainer')
)