本文整理汇总了Python中pymatgen.apps.borg.queen.BorgQueen.parallel_assimilate方法的典型用法代码示例。如果您正苦于以下问题:Python BorgQueen.parallel_assimilate方法的具体用法?Python BorgQueen.parallel_assimilate怎么用?Python BorgQueen.parallel_assimilate使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pymatgen.apps.borg.queen.BorgQueen
的用法示例。
在下文中一共展示了BorgQueen.parallel_assimilate方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_energies
# 需要导入模块: from pymatgen.apps.borg.queen import BorgQueen [as 别名]
# 或者: from pymatgen.apps.borg.queen.BorgQueen import parallel_assimilate [as 别名]
def get_energies(rootdir, reanalyze, verbose, pretty):
if verbose:
FORMAT = "%(relativeCreated)d msecs : %(message)s"
logging.basicConfig(level=logging.INFO, format=FORMAT)
drone = GaussianToComputedEntryDrone(inc_structure=True,
parameters=['filename'])
ncpus = multiprocessing.cpu_count()
logging.info('Detected {} cpus'.format(ncpus))
queen = BorgQueen(drone, number_of_drones=ncpus)
if os.path.exists(save_file) and not reanalyze:
msg = 'Using previously assimilated data from {}. ' + \
'Use -f to force re-analysis'.format(save_file)
queen.load_data(save_file)
else:
queen.parallel_assimilate(rootdir)
msg = 'Results saved to {} for faster reloading.'.format(save_file)
queen.save_data(save_file)
entries = queen.get_data()
entries = sorted(entries, key=lambda x: x.parameters['filename'])
all_data = [(e.parameters['filename'].replace("./", ""),
re.sub("\s+", "", e.composition.formula),
"{}".format(e.parameters['charge']),
"{}".format(e.parameters['spin_mult']),
"{:.5f}".format(e.energy), "{:.5f}".format(e.energy_per_atom),
) for e in entries]
headers = ("Directory", "Formula", "Charge", "Spin Mult.", "Energy",
"E/Atom")
print(tabulate(all_data, headers=headers))
print("")
print(msg)
示例2: get_energies
# 需要导入模块: from pymatgen.apps.borg.queen import BorgQueen [as 别名]
# 或者: from pymatgen.apps.borg.queen.BorgQueen import parallel_assimilate [as 别名]
def get_energies(rootdir, reanalyze, verbose, detailed, sort):
"""
Doc string.
"""
if verbose:
FORMAT = "%(relativeCreated)d msecs : %(message)s"
logging.basicConfig(level=logging.INFO, format=FORMAT)
if not detailed:
drone = SimpleVaspToComputedEntryDrone(inc_structure=True)
else:
drone = VaspToComputedEntryDrone(inc_structure=True,
data=["filename",
"initial_structure"])
ncpus = multiprocessing.cpu_count()
logging.info("Detected {} cpus".format(ncpus))
queen = BorgQueen(drone, number_of_drones=ncpus)
if os.path.exists(SAVE_FILE) and not reanalyze:
msg = "Using previously assimilated data from {}.".format(SAVE_FILE) \
+ " Use -f to force re-analysis."
queen.load_data(SAVE_FILE)
else:
if ncpus > 1:
queen.parallel_assimilate(rootdir)
else:
queen.serial_assimilate(rootdir)
msg = "Analysis results saved to {} for faster ".format(SAVE_FILE) + \
"subsequent loading."
queen.save_data(SAVE_FILE)
entries = queen.get_data()
if sort == "energy_per_atom":
entries = sorted(entries, key=lambda x: x.energy_per_atom)
elif sort == "filename":
entries = sorted(entries, key=lambda x: x.data["filename"])
all_data = []
for e in entries:
if not detailed:
delta_vol = "{:.2f}".format(e.data["delta_volume"] * 100)
else:
delta_vol = e.structure.volume / \
e.data["initial_structure"].volume - 1
delta_vol = "{:.2f}".format(delta_vol * 100)
all_data.append((e.data["filename"].replace("./", ""),
re.sub("\s+", "", e.composition.formula),
"{:.5f}".format(e.energy),
"{:.5f}".format(e.energy_per_atom),
delta_vol))
if len(all_data) > 0:
headers = ("Directory", "Formula", "Energy", "E/Atom", "% vol chg")
t = PrettyTable(headers)
t.align["Directory"] = "l"
for d in all_data:
t.add_row(d)
print(t)
print(msg)
else:
print("No valid vasp run found.")
示例3: submit_vasp_directory
# 需要导入模块: from pymatgen.apps.borg.queen import BorgQueen [as 别名]
# 或者: from pymatgen.apps.borg.queen.BorgQueen import parallel_assimilate [as 别名]
def submit_vasp_directory(self, rootdir, authors, projects=None,
references='', remarks=None, master_data=None,
master_history=None, created_at=None,
ncpus=None):
"""
Assimilates all vasp run directories beneath a particular
directory using BorgQueen to obtain structures, and then submits thhem
to the Materials Project as SNL files. VASP related meta data like
initial structure and final energies are automatically incorporated.
.. note::
As of now, this MP REST feature is open only to a select group of
users. Opening up submissions to all users is being planned for
the future.
Args:
rootdir:
Rootdir to start assimilating VASP runs from.
authors:
*List* of {"name":'', "email":''} dicts,
*list* of Strings as 'John Doe <[email protected]>',
or a single String with commas separating authors. The same
list of authors should apply to all runs.
projects:
List of Strings ['Project A', 'Project B']. This applies to
all structures.
references:
A String in BibTeX format. Again, this applies to all
structures.
remarks:
List of Strings ['Remark A', 'Remark B']
masterdata:
A free form dict. Namespaced at the root level with an
underscore, e.g. {"_materialsproject":<custom data>}. This
data is added to all structures detected in the directory,
in addition to other vasp data on a per structure basis.
created_at:
A datetime object
ncpus:
Number of cpus to use in using BorgQueen to assimilate
"""
drone = VaspToComputedEntryDrone(inc_structure=True,
data=["filename",
"initial_structure"])
queen = BorgQueen(drone, number_of_drones=ncpus)
queen.parallel_assimilate(rootdir)
structures = []
metadata = []
# TODO: Get histories from the data.
for e in queen.get_data():
structures.append(e.structure)
m = {
"_vasp": {
"parameters": e.parameters,
"final_energy": e.energy,
"final_energy_per_atom": e.energy_per_atom,
"initial_structure": e.data["initial_structure"].to_dict
}
}
if master_data is not None:
m.update(master_data)
metadata.append(m)
histories = None
if master_history is not None:
histories = master_history * len(structures)
return self.submit_structures(
structures, authors, projects=projects, references=references,
remarks=remarks, data=metadata, histories=histories,
created_at=created_at)
示例4: MPINTVaspToDbTaskDrone
# 需要导入模块: from pymatgen.apps.borg.queen import BorgQueen [as 别名]
# 或者: from pymatgen.apps.borg.queen.BorgQueen import parallel_assimilate [as 别名]
from __future__ import division, unicode_literals, print_function
from mpinterfaces.database import MPINTVaspToDbTaskDrone
from pymatgen.apps.borg.queen import BorgQueen
#import multiprocessing
additional_fields = {"author":"kiran"} #"doi":"10.1063/1.4865107"
drone = MPINTVaspToDbTaskDrone(host="127.0.0.1", port=27017,
database="vasp", collection="collection_name",
user="username", password="password",
additional_fields=additional_fields)
ncpus = 4 #multiprocessing.cpu_count()
queen = BorgQueen(drone, number_of_drones=ncpus)
queen.parallel_assimilate('path_to_vasp_calculation_folders')
示例5: get_energies
# 需要导入模块: from pymatgen.apps.borg.queen import BorgQueen [as 别名]
# 或者: from pymatgen.apps.borg.queen.BorgQueen import parallel_assimilate [as 别名]
def get_energies(rootdir, reanalyze, verbose, detailed,
sort, formulaunit, debug, hull, threshold, args, templatestructure):
ion_list = 'Novalue'
ave_key_list = 'Novalue'
threscount = 0
"""
Doc string.
"""
if (verbose and not debug):
FORMAT = "%(relativeCreated)d msecs : %(message)s"
logging.basicConfig(level=logging.INFO, format=FORMAT)
elif debug:
logging.basicConfig(level=logging.DEBUG)
if not detailed:
drone = SimpleVaspToComputedEntryDrone(inc_structure=True)
else:
drone = VaspToComputedEntryDrone(inc_structure=True,
data=["filename",
"initial_structure"])
ncpus = multiprocessing.cpu_count()
logging.info("Detected {} cpus".format(ncpus))
queen = BorgQueen(drone, number_of_drones=ncpus)
if os.path.exists(SAVE_FILE) and not reanalyze:
msg = "Using previously assimilated data from {}.".format(SAVE_FILE) \
+ " Use -f to force re-analysis."
queen.load_data(SAVE_FILE)
else:
if ncpus > 1:
queen.parallel_assimilate(rootdir)
else:
queen.serial_assimilate(rootdir)
msg = "Analysis results saved to {} for faster ".format(SAVE_FILE) + \
"subsequent loading."
queen.save_data(SAVE_FILE)
entries = queen.get_data()
if sort == "energy_per_atom":
entries = sorted(entries, key=lambda x: x.energy_per_atom)
elif sort == "filename":
entries = sorted(entries, key=lambda x: x.data["filename"])
# logging.debug('First Energy entry is {}'.format(entries[0]))
base_energy = entries[0].energy
logging.debug('Type of entries is: {}'.format(type(entries)))
logging.debug('First Element of Entries is:{}'.format(entries[0]))
# logging.debug('First Energy entry structure is {}'.format(entries[0].structure))
xy_direction = int(args.XYdirection)
tolerance = float(args.tolerance)
if args.template:
logging.debug('Temp Structure site info is: {}'.format(Na12(['Co','Mn'],['Na'],templatestructure,templatestructure,XY_Direction=xy_direction,tol=tolerance)))
template_site_info = Na12(['Co','Mn'],['Na'],templatestructure,templatestructure,XY_Direction=xy_direction,tol=tolerance)
all_data = []
energy_diff = []
threshold=float(threshold)
Structure_info_dict = {}
check_ion_seq = [args.dupion]
for e in entries:
if not detailed:
delta_vol = "{:.2f}".format(e.data["delta_volume"] * 100)
else:
delta_vol = e.structure.volume / \
e.data["initial_structure"].volume - 1
delta_vol = "{:.2f}".format(delta_vol * 100)
entry_path = e.data['filename'].rsplit('/',1)[0]
entry_site_info = Na12(['Co','Mn'],['Na'],e.structure,e.structure,XY_Direction=xy_direction,tol=tolerance)
logging.debug('Total Na site: {}'.format(entry_site_info['Total_Na_Site']))
#Coordination extraction part
# na_layer_site_fcoords = [site._fcoords for site in s if site.specie.symbol == "Na"]
# if 'Cif_Structure' in e.data.keys():
# na_sites_fcoords = [site._fcoords for site in e.data['Cif_Structure'] if site.specie.symbol == 'Na']
# na_sites_fcoords_list_tuple = [tuple(coord) for coord in na_sites_fcoords]
na_sites_fcoords = [site._fcoords for site in e.data['CONTCAR_Structure'] if site.specie.symbol == 'Na']
na_sites_fcoords_list_tuple = [tuple(coord) for coord in na_sites_fcoords]
#.........这里部分代码省略.........