本文整理汇总了Python中monty.serialization.dumpfn函数的典型用法代码示例。如果您正苦于以下问题:Python dumpfn函数的具体用法?Python dumpfn怎么用?Python dumpfn使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了dumpfn函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: im_vac_antisite_def_energy_parse
def im_vac_antisite_def_energy_parse():
m_description = 'Command to parse vacancy and antisite defect ' \
'energies for intermetallics from the VASP DFT ' \
'calculations.'
parser = ArgumentParser(description=m_description)
parser.add_argument("--mpid",
type=str.lower,
help="Materials Project id of the intermetallic structure.\n" \
"For more info on Materials Project, please refer to " \
"www.materialsproject.org")
parser.add_argument("--mapi_key",
default = None,
help="Your Materials Project REST API key.\n" \
"For more info, please refer to " \
"www.materialsproject.org/opne")
args = parser.parse_args()
print args
energy_dict = vac_antisite_def_parse_energy(args.mpid, args.mapi_key)
print type(energy_dict)
for key,value in energy_dict.items():
print key
print type(key), type(value)
for key2, val2 in value.items():
print type(key2), type(val2)
if energy_dict:
fl_nm = args.mpid+'_raw_defect_energy.json'
dumpfn(energy_dict, fl_nm, cls=MontyEncoder, indent=2)
示例2: im_sol_sub_def_energy_parse
def im_sol_sub_def_energy_parse():
m_description = 'Command to parse solute substitution defect ' \
'energies for intermetallics from the VASP DFT ' \
'calculations.'
parser = ArgumentParser(description=m_description)
parser.add_argument("--mpid",
type=str.lower,
help="Materials Project id of the intermetallic structure.\n" \
"For more info on Materials Project, please refer to " \
"www.materialsproject.org")
parser.add_argument("--solute", help="Solute Element")
parser.add_argument("--mapi_key",
default = None,
help="Your Materials Project REST API key.\n" \
"For more info, please refer to " \
"www.materialsproject.org/opne")
args = parser.parse_args()
energy_dict = solute_def_parse_energy(args.mpid, args.solute,
args.mapi_key)
if energy_dict:
fl_nm = args.mpid+'_solute-'+args.solute+'_raw_defect_energy.json'
dumpfn(energy_dict, fl_nm, indent=2, cls=MontyEncoder)
示例3: run_task
def run_task(self, fw_spec):
transformations = []
transformation_params = self.get("transformation_params",
[{} for i in range(len(self["transformations"]))])
for t in self["transformations"]:
found = False
for m in ["advanced_transformations", "defect_transformations",
"site_transformations", "standard_transformations"]:
mod = import_module("pymatgen.transformations.{}".format(m))
try:
t_cls = getattr(mod, t)
except AttributeError:
continue
t_obj = t_cls(**transformation_params.pop(0))
transformations.append(t_obj)
found = True
if not found:
raise ValueError("Could not find transformation: {}".format(t))
# TODO: @matk86 - should prev_calc_dir use CONTCAR instead of POSCAR? Note that if
# current dir, maybe it is POSCAR indeed best ... -computron
structure = self['structure'] if not self.get('prev_calc_dir', None) else \
Poscar.from_file(os.path.join(self['prev_calc_dir'], 'POSCAR')).structure
ts = TransformedStructure(structure)
transmuter = StandardTransmuter([ts], transformations)
final_structure = transmuter.transformed_structures[-1].final_structure.copy()
vis_orig = self["vasp_input_set"]
vis_dict = vis_orig.as_dict()
vis_dict["structure"] = final_structure.as_dict()
vis_dict.update(self.get("override_default_vasp_params", {}) or {})
vis = vis_orig.__class__.from_dict(vis_dict)
vis.write_input(".")
dumpfn(transmuter.transformed_structures[-1], "transformations.json")
示例4: run
def run(self, job_cmd=None):
"""
run the vasp jobs through custodian
if the job list is empty,
run a single job with the initial input set
"""
for j in self.jobs:
if job_cmd is not None:
j.job_cmd = job_cmd
else:
j.job_cmd = self.job_cmd
c_params = {'jobs': [j.as_dict() for j in self.jobs],
'handlers': [h.as_dict() for h in self.handlers],
'max_errors': 5}
c = Custodian(self.handlers, self.jobs, max_errors=5)
c.run()
for j in self.jobs:
self.cal_log.append({"job": j.as_dict(),
'job_id': j.job_id,
"corrections": [],
'final_energy': None})
self.job_ids.append(j.job_id)
if self.checkpoint_file:
dumpfn(self.cal_log, self.checkpoint_file,
cls=MontyEncoder, indent=4)
else:
dumpfn(self.cal_log, Calibrate.LOG_FILE, cls=MontyEncoder,
indent=4)
示例5: setup
def setup(self):
"""
Performs initial setup for VaspJob, including overriding any settings
and backing up.
"""
decompress_dir('.')
if self.backup:
for f in VASP_INPUT_FILES:
shutil.copy(f, "{}.orig".format(f))
if self.auto_npar:
try:
incar = Incar.from_file("INCAR")
# Only optimized NPAR for non-HF and non-RPA calculations.
if not (incar.get("LHFCALC") or incar.get("LRPA") or
incar.get("LEPSILON")):
if incar.get("IBRION") in [5, 6, 7, 8]:
# NPAR should not be set for Hessian matrix
# calculations, whether in DFPT or otherwise.
del incar["NPAR"]
else:
import multiprocessing
# try sge environment variable first
# (since multiprocessing counts cores on the current
# machine only)
ncores = os.environ.get('NSLOTS') or \
multiprocessing.cpu_count()
ncores = int(ncores)
for npar in range(int(math.sqrt(ncores)),
ncores):
if ncores % npar == 0:
incar["NPAR"] = npar
break
incar.write_file("INCAR")
except:
pass
if self.auto_continue:
if os.path.exists("continue.json"):
actions = loadfn("continue.json").get("actions")
logger.info("Continuing previous VaspJob. Actions: {}".format(actions))
backup(VASP_BACKUP_FILES, prefix="prev_run")
VaspModder().apply_actions(actions)
else:
# Default functionality is to copy CONTCAR to POSCAR and set
# ISTART to 1 in the INCAR, but other actions can be specified
if self.auto_continue is True:
actions = [{"file": "CONTCAR",
"action": {"_file_copy": {"dest": "POSCAR"}}},
{"dict": "INCAR",
"action": {"_set": {"ISTART": 1}}}]
else:
actions = self.auto_continue
dumpfn({"actions": actions}, "continue.json")
if self.settings_override is not None:
VaspModder().apply_actions(self.settings_override)
示例6: generate_single_job_dict
def generate_single_job_dict():
"""
Used to generate test dictionary for single jobs.
"""
single_job_dict = {}
for file in single_job_out_names:
single_job_dict[file] = QCOutput(os.path.join(test_dir, file)).data
dumpfn(single_job_dict, "single_job.json")
示例7: run
def run(self):
"""
Override of Custodian.run() to include instructions to copy the
temp_dir to the scratch partition on slave compute nodes if requested.
"""
cwd = os.getcwd()
with ScratchDir(self.scratch_dir, create_symbolic_link=True,
copy_to_current_on_exit=True,
copy_from_current_on_enter=True) as temp_dir:
self._manage_node_scratch(temp_dir_path=temp_dir,
job_start=True)
self.total_errors = 0
start = datetime.datetime.now()
logger.info("Run started at {} in {}.".format(
start, temp_dir))
v = sys.version.replace("\n", " ")
logger.info("Custodian running on Python version {}".format(v))
try:
# skip jobs until the restart
for job_n, job in islice(enumerate(self.jobs, 1),
self.restart, None):
self._run_job(job_n, job, temp_dir)
# Checkpoint after each job so that we can recover from
# last point and remove old checkpoints
if self.checkpoint:
super(SSHCustodian, self)._save_checkpoint(cwd, job_n)
except CustodianError as ex:
logger.error(ex.message)
if ex.raises:
raise RuntimeError("{} errors reached: {}. Exited..."
.format(self.total_errors, ex))
finally:
# Log the corrections to a json file.
logger.info("Logging to {}...".format(super(SSHCustodian,
self).LOG_FILE))
dumpfn(self.run_log, super(SSHCustodian, self).LOG_FILE,
cls=MontyEncoder, indent=4)
end = datetime.datetime.now()
logger.info("Run ended at {}.".format(end))
run_time = end - start
logger.info("Run completed. Total time taken = {}."
.format(run_time))
# Remove duplicate copy of log file, provided it ends with
# ".log"
for x in ([x for x in os.listdir(temp_dir)
if re.match(r'\w*\.log', x)]):
os.remove(os.path.join(temp_dir, x))
self._manage_node_scratch(temp_dir_path=temp_dir,
job_start=False)
if self.gzipped_output:
gzip_dir(".")
# Cleanup checkpoint files (if any) if run is successful.
super(SSHCustodian, self)._delete_checkpoints(cwd)
return self.run_log
示例8: run
def run(self):
"""
Runs all the jobs jobs.
Returns:
All errors encountered as a list of list.
[[error_dicts for job 1], [error_dicts for job 2], ....]
"""
cwd = os.getcwd()
with ScratchDir(self.scratch_dir, create_symbolic_link=True,
copy_to_current_on_exit=True,
copy_from_current_on_enter=True) as temp_dir:
self.total_errors = 0
start = datetime.datetime.now()
logger.info("Run started at {} in {}.".format(
start, temp_dir))
v = sys.version.replace("\n", " ")
logger.info("Custodian running on Python version {}".format(v))
logger.info("Hostname: {}, Cluster: {}".format(
*get_execution_host_info()))
try:
# skip jobs until the restart
for job_n, job in islice(enumerate(self.jobs, 1),
self.restart, None):
self._run_job(job_n, job)
# Checkpoint after each job so that we can recover from last
# point and remove old checkpoints
if self.checkpoint:
self.restart = job_n
Custodian._save_checkpoint(cwd, job_n)
except CustodianError as ex:
logger.error(ex.message)
if ex.raises:
raise RuntimeError("{} errors reached: {}. Exited..."
.format(self.total_errors, ex))
finally:
# Log the corrections to a json file.
logger.info("Logging to {}...".format(Custodian.LOG_FILE))
dumpfn(self.run_log, Custodian.LOG_FILE, cls=MontyEncoder,
indent=4)
end = datetime.datetime.now()
logger.info("Run ended at {}.".format(end))
run_time = end - start
logger.info("Run completed. Total time taken = {}."
.format(run_time))
if self.gzipped_output:
gzip_dir(".")
# Cleanup checkpoint files (if any) if run is successful.
Custodian._delete_checkpoints(cwd)
return self.run_log
示例9: generate_multi_job_dict
def generate_multi_job_dict():
"""
Used to generate test dictionary for multiple jobs
"""
multi_job_dict = {}
for file in multi_job_out_names:
outputs = QCOutput.multiple_outputs_from_file(QCOutput, os.path.join(test_dir, file), keep_sub_files=False)
data = []
for sub_output in outputs:
data.append(sub_output.data)
multi_job_dict[file] = data
dumpfn(multi_job_dict, "multi_job.json")
示例10: update_checkpoint
def update_checkpoint(launchpad, launch_id, checkpoint):
"""
Helper function to update checkpoint
Args:
launchpad (LaunchPad): LaunchPad to ping with checkpoint data
launch_id (int): launch id to update
checkpoint (dict): checkpoint data
"""
if launchpad:
launchpad.ping_launch(launch_id, checkpoint=checkpoint)
else:
offline_info = loadfn("FW_offline.json")
offline_info.update({"checkpoint": checkpoint})
dumpfn(offline_info, "FW_offline.json")
示例11: add_config_var
def add_config_var(args):
d = {}
if os.path.exists(SETTINGS_FILE):
shutil.copy(SETTINGS_FILE, SETTINGS_FILE + ".bak")
print("Existing %s backed up to %s"
% (SETTINGS_FILE, SETTINGS_FILE + ".bak"))
d = loadfn(SETTINGS_FILE)
toks = args.var_spec
if len(toks) % 2 != 0:
print("Bad variable specification!")
sys.exit(-1)
for i in range(int(len(toks) / 2)):
d[toks[2 * i]] = toks[2 * i + 1]
dumpfn(d, SETTINGS_FILE, default_flow_style=False)
print("New %s written!" % (SETTINGS_FILE))
示例12: _do_check
def _do_check(self, handlers, terminate_func=None):
"""
checks the specified handlers. Returns True iff errors caught
"""
corrections = []
for h in handlers:
try:
if h.check():
if h.max_num_corrections is not None \
and h.n_applied_corrections >= h.max_num_corrections:
msg = "Maximum number of corrections {} reached " \
"for handler {}".format(h.max_num_corrections, h)
if h.raise_on_max:
self.run_log[-1]["handler"] = h
self.run_log[-1]["max_errors_per_handler"] = True
raise MaxCorrectionsPerHandlerError(msg, True, h.max_num_corrections, h)
else:
logger.warning(msg+" Correction not applied.")
continue
if terminate_func is not None and h.is_terminating:
logger.info("Terminating job")
terminate_func()
# make sure we don't terminate twice
terminate_func = None
d = h.correct()
d["handler"] = h
logger.error("\n" + pformat(d, indent=2, width=-1))
corrections.append(d)
h.n_applied_corrections += 1
except Exception:
if not self.skip_over_errors:
raise
else:
import traceback
logger.error("Bad handler %s " % h)
logger.error(traceback.format_exc())
corrections.append(
{"errors": ["Bad handler %s " % h],
"actions": []})
self.total_errors += len(corrections)
self.errors_current_job += len(corrections)
self.run_log[-1]["corrections"].extend(corrections)
# We do a dump of the run log after each check.
dumpfn(self.run_log, Custodian.LOG_FILE, cls=MontyEncoder,
indent=4)
return len(corrections) > 0
示例13: do_query
def do_query(args):
m = MPRester()
try:
criteria = json.loads(args.criteria)
except json.decoder.JSONDecodeError:
criteria = args.criteria
if args.structure:
count = 0
for d in m.query(criteria, properties=["structure", "task_id"]):
s = d["structure"]
formula = re.sub(r"\s+", "", s.formula)
if args.structure == "poscar":
fname = "POSCAR.%s_%s" % (d["task_id"], formula)
else:
fname = "%s-%s.%s" % (d["task_id"], formula, args.structure)
s.to(filename=fname)
count += 1
print("%d structures written!" % count)
elif args.entries:
entries = m.get_entries(criteria)
dumpfn(entries, args.entries)
print("%d entries written to %s!" % (len(entries), args.entries))
else:
props = ["e_above_hull", "spacegroup"]
props += args.data
entries = m.get_entries(criteria, property_data=props)
t = []
headers = ["mp-id", "Formula", "Spacegroup", "E/atom (eV)",
"E above hull (eV)"] + args.data
for e in entries:
row = [e.entry_id, e.composition.reduced_formula,
e.data["spacegroup"]["symbol"],
e.energy_per_atom, e.data["e_above_hull"]]
row += [e.data[s] for s in args.data]
t.append(row)
t = sorted(t, key=lambda x: x[headers.index("E above hull (eV)")])
print(tabulate(t, headers=headers, tablefmt="pipe", floatfmt=".3f"))
示例14: pmg_dump
def pmg_dump(obj, filename, **kwargs):
"""
Dump an object to a json file using MontyEncoder. Note that these
objects can be lists, dicts or otherwise nested pymatgen objects that
support the as_dict() and from_dict MSONable protocol.
Args:
obj (object): Object to dump.
filename (str): Filename of file to open. Can be gzipped or bzipped.
\*\*kwargs: Any of the keyword arguments supported by the json.dump
method.
"""
return dumpfn(obj, filename, **kwargs)
示例15: test_dumpfn_loadfn
def test_dumpfn_loadfn(self):
d = {"hello": "world"}
dumpfn(d, "monte_test.json", indent=4)
d2 = loadfn("monte_test.json")
self.assertEqual(d, d2)
os.remove("monte_test.json")
dumpfn(d, "monte_test.yaml", default_flow_style=False)
d2 = loadfn("monte_test.yaml")
self.assertEqual(d, d2)
dumpfn(d, "monte_test.yaml", Dumper=Dumper)
d2 = loadfn("monte_test.yaml")
os.remove("monte_test.yaml")
dumpfn(d, "monte_test.mpk")
d2 = loadfn("monte_test.mpk")
self.assertEqual(d, {k.decode('utf-8'): v.decode('utf-8') for k, v in d2.items()})
os.remove("monte_test.mpk")