本文整理汇总了Python中pymatgen.apps.borg.queen.BorgQueen类的典型用法代码示例。如果您正苦于以下问题:Python BorgQueen类的具体用法?Python BorgQueen怎么用?Python BorgQueen使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了BorgQueen类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_energies
def get_energies(rootdir, reanalyze, verbose, detailed, sort):
"""
Doc string.
"""
if verbose:
FORMAT = "%(relativeCreated)d msecs : %(message)s"
logging.basicConfig(level=logging.INFO, format=FORMAT)
if not detailed:
drone = SimpleVaspToComputedEntryDrone(inc_structure=True)
else:
drone = VaspToComputedEntryDrone(inc_structure=True,
data=["filename",
"initial_structure"])
ncpus = multiprocessing.cpu_count()
logging.info("Detected {} cpus".format(ncpus))
queen = BorgQueen(drone, number_of_drones=ncpus)
if os.path.exists(SAVE_FILE) and not reanalyze:
msg = "Using previously assimilated data from {}.".format(SAVE_FILE) \
+ " Use -f to force re-analysis."
queen.load_data(SAVE_FILE)
else:
if ncpus > 1:
queen.parallel_assimilate(rootdir)
else:
queen.serial_assimilate(rootdir)
msg = "Analysis results saved to {} for faster ".format(SAVE_FILE) + \
"subsequent loading."
queen.save_data(SAVE_FILE)
entries = queen.get_data()
if sort == "energy_per_atom":
entries = sorted(entries, key=lambda x: x.energy_per_atom)
elif sort == "filename":
entries = sorted(entries, key=lambda x: x.data["filename"])
all_data = []
for e in entries:
if not detailed:
delta_vol = "{:.2f}".format(e.data["delta_volume"] * 100)
else:
delta_vol = e.structure.volume / \
e.data["initial_structure"].volume - 1
delta_vol = "{:.2f}".format(delta_vol * 100)
all_data.append((e.data["filename"].replace("./", ""),
re.sub("\s+", "", e.composition.formula),
"{:.5f}".format(e.energy),
"{:.5f}".format(e.energy_per_atom),
delta_vol))
if len(all_data) > 0:
headers = ("Directory", "Formula", "Energy", "E/Atom", "% vol chg")
t = PrettyTable(headers)
t.align["Directory"] = "l"
for d in all_data:
t.add_row(d)
print(t)
print(msg)
else:
print("No valid vasp run found.")
示例2: test_load_data
def test_load_data(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
drone = VaspToComputedEntryDrone()
queen = BorgQueen(drone)
queen.load_data(os.path.join(test_dir, "assimilated.json"))
self.assertEqual(len(queen.get_data()), 1)
示例3: run_task
def run_task(self, fw_spec):
"""
go through the measurement job dirs and
put the measurement jobs in the database
"""
drone = MPINTVaspToDbTaskDrone(**self.get("dbase_params", {}))
queen = BorgQueen(drone) # , number_of_drones=ncpus)
queen.serial_assimilate(self["measure_dir"])
return FWAction()
示例4: setUpClass
def setUpClass(cls):
try:
drone = VaspToDbTaskDrone(database="creator_unittest")
queen = BorgQueen(drone)
queen.serial_assimilate(os.path.join(test_dir, "db_test", "success_mp_aflow"))
cls.conn = MongoClient()
cls.qe = QueryEngine(database="creator_unittest")
except ConnectionFailure:
cls.qe = None
cls.conn = None
示例5: _extract_MP_data
def _extract_MP_data(self,MP_data_filename):
drone = VaspToComputedEntryDrone()
queen = BorgQueen(drone, "dummy", 1)
queen.load_data(MP_data_filename)
computed_entries = queen.get_data()
del drone
del queen
return computed_entries
示例6: get_energies
def get_energies(rootdir, reanalyze, verbose, pretty):
if verbose:
FORMAT = "%(relativeCreated)d msecs : %(message)s"
logging.basicConfig(level=logging.INFO, format=FORMAT)
drone = GaussianToComputedEntryDrone(inc_structure=True,
parameters=['filename'])
ncpus = multiprocessing.cpu_count()
logging.info('Detected {} cpus'.format(ncpus))
queen = BorgQueen(drone, number_of_drones=ncpus)
if os.path.exists(save_file) and not reanalyze:
msg = 'Using previously assimilated data from {}. ' + \
'Use -f to force re-analysis'.format(save_file)
queen.load_data(save_file)
else:
queen.parallel_assimilate(rootdir)
msg = 'Results saved to {} for faster reloading.'.format(save_file)
queen.save_data(save_file)
entries = queen.get_data()
entries = sorted(entries, key=lambda x: x.parameters['filename'])
all_data = [(e.parameters['filename'].replace("./", ""),
re.sub("\s+", "", e.composition.formula),
"{}".format(e.parameters['charge']),
"{}".format(e.parameters['spin_mult']),
"{:.5f}".format(e.energy), "{:.5f}".format(e.energy_per_atom),
) for e in entries]
headers = ("Directory", "Formula", "Charge", "Spin Mult.", "Energy",
"E/Atom")
print(tabulate(all_data, headers=headers))
print("")
print(msg)
示例7: test_get_data
def test_get_data(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
drone = VaspToComputedEntryDrone()
self.queen = BorgQueen(drone, test_dir, 1)
data = self.queen.get_data()
self.assertEqual(len(data), 11)
示例8: AbstractVaspDirCollParser
class AbstractVaspDirCollParser(six.with_metaclass(abc.ABCMeta, object)):
"""Abstract base class for parsers of a collection of VASP directories
To implement a new parser, inherit from this class and
define the :meth:`compile` method.
"""
def __init__(self, rootdir):
"""read vasp output via drone and extract all data
:param rootdir: root directory containing collection of VASP dirs
:type rootdir: str
"""
from pymatgen.apps.borg.hive import SimpleVaspToComputedEntryDrone
from pymatgen.apps.borg.queen import BorgQueen
self.rootdir = rootdir
self.drone = SimpleVaspToComputedEntryDrone(inc_structure=True)
self.queen = BorgQueen(self.drone, rootdir, 1) # TODO: make sure uw2_si2 also works in parallel
self.data = self.queen.get_data()
def find_entry_for_directory(self, regex, oszicar=True):
"""returns the computed entry for a VASP directory matching the regex"""
# scan in reverse alpha-numeric order under the assumption that
# directories with the highest (local) index correspond to final VaspRun
for entry in reversed(self.data):
if fnmatch.fnmatch(entry.data['filename'], regex):
if oszicar and not entry.energy < 1e10: continue
return entry
@abc.abstractmethod
def compile(self):
"""compile the extracted data into a reduced dataset to be contributed"""
return
示例9: get_e_v
def get_e_v(path):
"""
uses pymatgen drone and borgqueen classes to get energy and
volume data from the given directory path.
"""
volumes = []
energies = []
drone = MPINTVaspDrone(inc_structure=True)
bg = BorgQueen(drone)
# bg.parallel_assimilate(path)
bg.serial_assimilate(path)
allentries = bg.get_data()
for e in allentries:
if e:
energies.append(e.energy)
volumes.append(e.structure.lattice.volume)
return (volumes, energies)
示例10: __init__
def __init__(self, rootdir):
"""read vasp output via drone and extract all data
:param rootdir: root directory containing collection of VASP dirs
:type rootdir: str
"""
self.rootdir = rootdir
self.drone = SimpleVaspToComputedEntryDrone(inc_structure=True)
self.queen = BorgQueen(self.drone, rootdir, 1) # TODO: make sure uw2_si2 also works in parallel
self.data = self.queen.get_data()
示例11: BorgQueenTest
class BorgQueenTest(unittest.TestCase):
def setUp(self):
drone = VaspToComputedEntryDrone()
self.queen = BorgQueen(drone, test_dir, 1)
def test_get_data(self):
data = self.queen.get_data()
self.assertEqual(len(data), 8)
def test_load_data(self):
drone = VaspToComputedEntryDrone()
queen = BorgQueen(drone)
queen.load_data(os.path.join(test_dir, "assimilated.json"))
self.assertEqual(len(queen.get_data()), 1)
示例12: extract_json_data
def extract_json_data():
"""
Routine tries to read VASP data into pymatgen objects, and
then extracts only the relevant data. This is then written to json,
allowing the voluminous OUTCAR and vasprun.xml files to be discarded.
"""
try:
o = Outcar('OUTCAR')
found_outcar = True
except:
print('OUTCAR file missing or not readable')
found_outcar = False
try:
vr = Vasprun('vasprun.xml')
found_vasprun = True
except:
print('vasprun.xml file missing or not readable')
found_vasprun = False
dictionary_data = {}
if found_outcar:
dictionary_data['OUTCAR'] = o.as_dict()
if found_vasprun:
try:
# try to extract a Computed Entry object, using
# pymatgen technology
drone = VaspToComputedEntryDrone()
queen = BorgQueen(drone, './', 1)
entry = queen.get_data()[0]
dictionary_data['ComputedEntry'] = entry.as_dict()
except:
print('ComputedEntry COULD NOT BE EXTRACTED BY PYMATGEN...')
"""
# Do not extract DOS in run_data.json; this is too memory intensive!
try:
dictionary_data['DOS'] = vr.complete_dos.as_dict()
pymatgen_dos_success = True
except:
print('DOS COULD NOT BE EXTRACTED BY PYMATGEN...')
pymatgen_dos_success = False
"""
relaxation_data = []
for step in vr.ionic_steps:
data_dict = {}
for key in ['forces','structure','stress']:
if key in step:
data_dict[key] = step[key]
data_dict['electronic'] = step['electronic_steps'][-1]
relaxation_data.append(data_dict)
dictionary_data['relaxation'] = relaxation_data
if found_outcar or found_vasprun:
pmg_dump(dictionary_data, 'run_data.json')
return
示例13: get_energies
def get_energies(rootdir, reanalyze, verbose, detailed,
sort, formulaunit, debug, hull, threshold, args, templatestructure):
ion_list = 'Novalue'
ave_key_list = 'Novalue'
threscount = 0
"""
Doc string.
"""
if (verbose and not debug):
FORMAT = "%(relativeCreated)d msecs : %(message)s"
logging.basicConfig(level=logging.INFO, format=FORMAT)
elif debug:
logging.basicConfig(level=logging.DEBUG)
if not detailed:
drone = SimpleVaspToComputedEntryDrone(inc_structure=True)
else:
drone = VaspToComputedEntryDrone(inc_structure=True,
data=["filename",
"initial_structure"])
ncpus = multiprocessing.cpu_count()
logging.info("Detected {} cpus".format(ncpus))
queen = BorgQueen(drone, number_of_drones=ncpus)
if os.path.exists(SAVE_FILE) and not reanalyze:
msg = "Using previously assimilated data from {}.".format(SAVE_FILE) \
+ " Use -f to force re-analysis."
queen.load_data(SAVE_FILE)
else:
if ncpus > 1:
queen.parallel_assimilate(rootdir)
else:
queen.serial_assimilate(rootdir)
msg = "Analysis results saved to {} for faster ".format(SAVE_FILE) + \
"subsequent loading."
queen.save_data(SAVE_FILE)
entries = queen.get_data()
if sort == "energy_per_atom":
entries = sorted(entries, key=lambda x: x.energy_per_atom)
elif sort == "filename":
entries = sorted(entries, key=lambda x: x.data["filename"])
# logging.debug('First Energy entry is {}'.format(entries[0]))
base_energy = entries[0].energy
logging.debug('Type of entries is: {}'.format(type(entries)))
logging.debug('First Element of Entries is:{}'.format(entries[0]))
# logging.debug('First Energy entry structure is {}'.format(entries[0].structure))
xy_direction = int(args.XYdirection)
tolerance = float(args.tolerance)
if args.template:
logging.debug('Temp Structure site info is: {}'.format(Na12(['Co','Mn'],['Na'],templatestructure,templatestructure,XY_Direction=xy_direction,tol=tolerance)))
template_site_info = Na12(['Co','Mn'],['Na'],templatestructure,templatestructure,XY_Direction=xy_direction,tol=tolerance)
all_data = []
energy_diff = []
threshold=float(threshold)
Structure_info_dict = {}
check_ion_seq = [args.dupion]
for e in entries:
if not detailed:
delta_vol = "{:.2f}".format(e.data["delta_volume"] * 100)
else:
delta_vol = e.structure.volume / \
e.data["initial_structure"].volume - 1
delta_vol = "{:.2f}".format(delta_vol * 100)
entry_path = e.data['filename'].rsplit('/',1)[0]
entry_site_info = Na12(['Co','Mn'],['Na'],e.structure,e.structure,XY_Direction=xy_direction,tol=tolerance)
logging.debug('Total Na site: {}'.format(entry_site_info['Total_Na_Site']))
#Coordination extraction part
# na_layer_site_fcoords = [site._fcoords for site in s if site.specie.symbol == "Na"]
# if 'Cif_Structure' in e.data.keys():
# na_sites_fcoords = [site._fcoords for site in e.data['Cif_Structure'] if site.specie.symbol == 'Na']
# na_sites_fcoords_list_tuple = [tuple(coord) for coord in na_sites_fcoords]
na_sites_fcoords = [site._fcoords for site in e.data['CONTCAR_Structure'] if site.specie.symbol == 'Na']
na_sites_fcoords_list_tuple = [tuple(coord) for coord in na_sites_fcoords]
#.........这里部分代码省略.........
示例14: test_load_data
def test_load_data(self):
drone = VaspToComputedEntryDrone()
queen = BorgQueen(drone)
queen.load_data(os.path.join(test_dir, "assimilated.json"))
self.assertEqual(len(queen.get_data()), 1)
示例15: setUp
def setUp(self):
drone = VaspToComputedEntryDrone()
self.queen = BorgQueen(drone, test_dir, 1)