本文整理汇总了Python中yaff.log.log.section函数的典型用法代码示例。如果您正苦于以下问题:Python section函数的具体用法?Python section怎么用?Python section使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了section函数的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: set_standard_masses
def set_standard_masses(self):
"""Initialize the ``masses`` attribute based on the atomic numbers."""
with log.section('SYS'):
from molmod.periodic import periodic
if self.masses is not None:
if log.do_warning:
log.warn('Overwriting existing masses with default masses.')
self.masses = np.array([periodic[n].mass for n in self.numbers])
示例2: g09log_to_hdf5
def g09log_to_hdf5(f, fn_log):
"""Convert Gaussian09 BOMD log file to Yaff HDF5 format.
**Arguments:**
f
An open and writable HDF5 file.
fn_log
The name of the Gaussian log file.
"""
with log.section('G09H5'):
if log.do_medium:
log('Loading Gaussian 09 file \'%s\' into \'trajectory\' of HDF5 file \'%s\'' % (
fn_log, f.filename
))
# First make sure the HDF5 file has a system description that is consistent
# with the XYZ file.
if 'system' not in f:
raise ValueError('The HDF5 file must contain a system group.')
if 'numbers' not in f['system']:
raise ValueError('The HDF5 file must have a system group with atomic numbers.')
natom = f['system/numbers'].shape[0]
# Take care of the trajectory group
tgrp = get_trajectory_group(f)
# Take care of the pos and vel datasets
dss = get_trajectory_datasets(tgrp,
('pos', (natom, 3)),
('vel', (natom, 3)),
('frc', (natom, 3)),
('time', (1,)),
('step', (1,)),
('epot', (1,)),
('ekin', (1,)),
('etot', (1,)),
)
ds_pos, ds_vel, ds_frc, ds_time, ds_step, ds_epot, ds_ekin, ds_etot = dss
# Load frame by frame
row = get_last_trajectory_row(dss)
for numbers, pos, vel, frc, time, step, epot, ekin, etot in _iter_frames_g09(fn_log):
if (numbers != f['system/numbers']).any():
log.warn('The element numbers of the HDF5 and LOG file do not match.')
write_to_dataset(ds_pos, pos, row)
write_to_dataset(ds_vel, vel, row)
write_to_dataset(ds_frc, frc, row)
write_to_dataset(ds_time, time, row)
write_to_dataset(ds_step, step, row)
write_to_dataset(ds_epot, epot, row)
write_to_dataset(ds_ekin, ekin, row)
write_to_dataset(ds_etot, etot, row)
row += 1
# Check number of rows
check_trajectory_rows(tgrp, dss, row)
示例3: run
def run(self, nstep=None):
with log.section(self.log_name), timer.section(self.log_name):
if nstep is None:
while True:
if self.propagate():
break
else:
for i in xrange(nstep):
if self.propagate():
break
self.finalize()
示例4: __init__
def __init__(self, system, alpha, gcut=0.35, dielectric=1.0, exclude_frame=False, n_frame=0):
'''
**Arguments:**
system
The system to which this interaction applies.
alpha
The alpha parameter in the Ewald summation method.
**Optional arguments:**
gcut
The cutoff in reciprocal space.
dielectric
The scalar relative permittivity of the system.
exclude_frame
A boolean to exclude framework-framework interactions
(exclude_frame=True) for efficiency sake in MC simulations.
n_frame
Number of framework atoms. This parameter is used to exclude
framework-framework neighbors when exclude_frame=True.
'''
ForcePart.__init__(self, 'ewald_reci', system)
if not system.cell.nvec == 3:
raise TypeError('The system must have a 3D periodic cell.')
if system.charges is None:
raise ValueError('The system does not have charges.')
self.system = system
self.alpha = alpha
self.gcut = gcut
self.dielectric = dielectric
self.update_gmax()
self.work = np.empty(system.natom*2)
if exclude_frame == True and n_frame < 0:
raise ValueError('The number of framework atoms to exclude must be positive.')
elif exclude_frame == False:
n_frame = 0
self.n_frame = n_frame
if log.do_medium:
with log.section('FPINIT'):
log('Force part: %s' % self.name)
log.hline()
log(' alpha: %s' % log.invlength(self.alpha))
log(' gcut: %s' % log.invlength(self.gcut))
log(' relative permittivity: %5.3f' % self.dielectric)
log.hline()
示例5: __init__
def __init__(self, ff, state=None, hooks=None, counter0=0):
"""
**Arguments:**
ff
The ForceField instance used in the iterative algorithm
**Optional arguments:**
state
A list with state items. State items are simple objects
that take or derive a property from the current state of the
iterative algorithm.
hooks
A function (or a list of functions) that is called after every
iterative.
counter0
The counter value associated with the initial state.
"""
self.ff = ff
if state is None:
self.state_list = [state_item.copy() for state_item in self.default_state]
else:
#self.state_list = state
self.state_list = [state_item.copy() for state_item in self.default_state]
self.state_list += state
self.state = dict((item.key, item) for item in self.state_list)
if hooks is None:
self.hooks = []
elif hasattr(hooks, '__len__'):
self.hooks = hooks
else:
self.hooks = [hooks]
self._add_default_hooks()
self.counter0 = counter0
self.counter = counter0
with log.section(self.log_name), timer.section(self.log_name):
self.initialize()
# Initialize restart hook if present
from yaff.sampling.io import RestartWriter
for hook in self.hooks:
if isinstance(hook, RestartWriter):
hook.init_state(self)
示例6: _verify_hooks
def _verify_hooks(self):
with log.section('ENSEM'):
thermo = None
index_thermo = 0
baro = None
index_baro = 0
# Look for the presence of a thermostat and/or barostat
if hasattr(self.hooks, '__len__'):
for index, hook in enumerate(self.hooks):
if hook.method == 'thermostat':
thermo = hook
index_thermo = index
elif hook.method == 'barostat':
baro = hook
index_baro = index
elif self.hooks is not None:
if self.hooks.method == 'thermostat':
thermo = self.hooks
elif self.hooks.method == 'barostat':
baro = self.hooks
# If both are present, delete them and generate TBCombination element
if thermo is not None and baro is not None:
from yaff.sampling.npt import TBCombination
if log.do_warning:
log.warn('Both thermostat and barostat are present separately and will be merged')
del self.hooks[max(index_thermo, index_thermo)]
del self.hooks[min(index_thermo, index_baro)]
self.hooks.append(TBCombination(thermo, baro))
if hasattr(self.hooks, '__len__'):
for hook in self.hooks:
if hook.name == 'TBCombination':
thermo = hook.thermostat
baro = hook.barostat
elif self.hooks is not None:
if self.hooks.name == 'TBCombination':
thermo = self.hooks.thermostat
baro = self.hooks.barostat
if log.do_warning:
if thermo is not None:
log('Temperature coupling achieved through ' + str(thermo.name) + ' thermostat')
if baro is not None:
log('Pressure coupling achieved through ' + str(baro.name) + ' barostat')
示例7: detect_ffatypes
def detect_ffatypes(self, rules):
"""Initialize the ``ffatypes`` attribute based on ATSELECT rules.
**Argument:**
rules
A list of (ffatype, rule) pairs that will be used to initialize
the attributes ``self.ffatypes`` and ``self.ffatype_ids``.
If the system already has FF atom types, they will be overwritten.
"""
with log.section('SYS'):
# Give warning if needed
if self.ffatypes is not None:
if log.do_warning:
log.warn('Overwriting existing FF atom types.')
# Compile all the rules
my_rules = []
for ffatype, rule in rules:
check_name(ffatype)
if isinstance(rule, basestring):
rule = atsel_compile(rule)
my_rules.append((ffatype, rule))
# Use the rules to detect the atom types
lookup = {}
self.ffatypes = []
self.ffatype_ids = np.zeros(self.natom, int)
for i in xrange(self.natom):
my_ffatype = None
for ffatype, rule in my_rules:
if rule(self, i):
my_ffatype = ffatype
break
if my_ffatype is None:
raise ValueError('Could not detect FF atom type of atom %i.' % i)
ffatype_id = lookup.get(my_ffatype)
if ffatype_id is None:
ffatype_id = len(lookup)
self.ffatypes.append(my_ffatype)
lookup[my_ffatype] = ffatype_id
self.ffatype_ids[i] = ffatype_id
# Make sure all is done well ...
self._init_derived_ffatypes()
示例8: detect_bonds
def detect_bonds(self, exceptions=None):
"""Initialize the ``bonds`` attribute based on inter-atomic distances
**Optional argument:**
exceptions:
Specify custom threshold for certain pairs of elements. This
must be a dictionary with ((num0, num1), threshold) as items.
For each pair of elements, a distance threshold is used to detect
bonded atoms. The distance threshold is based on a database of known
bond lengths. If the database does not contain a record for the given
element pair, the threshold is based on the sum of covalent radii.
"""
with log.section('SYS'):
from molmod.bonds import bonds
if self.bonds is not None:
if log.do_warning:
log.warn('Overwriting existing bonds.')
work = np.zeros((self.natom*(self.natom-1))/2, float)
self.cell.compute_distances(work, self.pos)
ishort = (work < bonds.max_length*1.01).nonzero()[0]
new_bonds = []
for i in ishort:
i0, i1 = _unravel_triangular(i)
n0 = self.numbers[i0]
n1 = self.numbers[i1]
if exceptions is not None:
threshold = exceptions.get((n0, n1))
if threshold is None and n0!=n1:
threshold = exceptions.get((n1, n0))
if threshold is not None:
if work[i] < threshold:
new_bonds.append([i0, i1])
continue
if bonds.bonded(n0, n1, work[i]):
new_bonds.append([i0, i1])
self.bonds = np.array(new_bonds)
self._init_derived_bonds()
示例9: pca_convergence
def pca_convergence(f, eq_time=0*picosecond, n_parts=None, step=1, fn='PCA_convergence', n_bootstrap=50, mw=True):
"""
Calculates the convergence of the simulation by calculating the pca
similarity for different subsets of the simulation.
**Arguments:**
f
An h5.File instance containing the trajectory data.
**Optional arguments:**
eq_time
Equilibration time, discarded from the simulation.
n_parts
Array containing the number of parts in which
the total simulation is divided.
step
Stepsize used in the trajectory.
fn
Filename containing the convergence plot.
n_bootstrap
The number of bootstrapped trajectories.
mw
If mass_weighted is True, the covariance matrix is mass-weighted.
"""
# Configure n_parts, the array containing the number of parts in which the total simulation is divided
if n_parts is None:
n_parts = np.array([1,3,10,30,100,300])
# Read in the timestep and the number of atoms
time = f['trajectory/time']
timestep = time[1] - time[0]
time_length = len(time)
# Determine the equilibration size
eq_size = int(eq_time/timestep)
### ---PART A: SIMILARITY OF THE TRUE TRAJECTORY--- ###
# Calculate the covariance matrix of the whole production run as golden standard
covar_total, q_ref = calc_cov_mat(f, start=eq_size, step=step, mw=mw)
# Initialize the average similarity vector of the divided trajectories
sim_block = np.zeros(len(n_parts))
# Calculate this average similarity vector
for j in range(len(n_parts)):
# Determine in how many parts the trajectory should be divided and the corresponding block size
n_part = n_parts[j]
block_size = (time_length-eq_size)//n_part
# Calculate the n_part covariance matrices and compare with the total covariance matrix
tot_sim_block=0
for i in range(n_part):
start = eq_size + i*block_size
covars, tmp = calc_cov_mat(f, start=start, end=start+block_size+1, step=step, mw=mw)
tot_sim_block += pca_similarity(covars, covar_total)
# Determine the average similarity
sim_block[j] = tot_sim_block/n_part
### ---PART B: SIMILARITY OF BOOTSTRAPPED TRAJECTORIES --- ###
# Read in the positions, which will be used to generate bootstrapped trajectories
pos = f['trajectory/pos'][eq_size:,:,:]
pos = pos.reshape(pos.shape[0], -1)
if mw:
# Read in the masses of the atoms, and replicate them d times (d=dimension)
masses = f['system/masses']
masses = np.repeat(masses,3)
# Create the mass-weighted positions matrix, on which the bootstrapping will be based
pos *= np.sqrt(masses)
# Initialize the vector containing the average similarity over all the bootstrapped, divided trajectories
sim_bt_all = np.zeros(len(n_parts))
for k in range(n_bootstrap):
with log.section('PCA'):
log('Processing %s of %s bootstrapped trajectories' %(k+1,n_bootstrap))
# Create a bootstrapped trajectory bt
pos_bt = np.zeros(pos.shape)
random_time = random.random(time_length)*time_length
for h in np.arange(time_length):
pos_bt[h,:] = pos[random_time[h],:]
# Covariance matrix of the total bootstrapped trajectory
covar_bt_total, tmp = calc_cov_mat_internal(pos_bt)
# Initialize the vector containing the average similarity over the different blocks,
# for the given bootstrapped trajectory
sim_bt = np.zeros(len(n_parts))
for j in range(len(n_parts)):
#.........这里部分代码省略.........
示例10: write_principal_mode
def write_principal_mode(f, f_pca, index, n_frames=100, select=None, mw=True, scaling=1.):
"""
Writes out one xyz file per principal mode given in index
**Arguments:**
f
Path to an h5.File instance containing the original data.
f_pca
Path to an h5.File instance containing the PCA, with reference structure, eigenvalues
and principal modes.
index
An array containing the principal modes which need to be written out.
**Optional arguments:**
n_frames
The number of frames in each xyz file.
select
A list of atom indexes that are considered for the computation
of the spectrum. If not given, all atoms are used.
mw
If mass_weighted is True, the covariance matrix is assumed to be mass-weighted.
scaling
Scaling factor applied to the maximum deviation of the principal mode (i.e. the maximum
principal component for that mode)
"""
# Load in the relevant data
# Atom numbers, masses and initial frame
numbers = f['system/numbers']
masses = f['system/masses']
pos = f['trajectory/pos']
if select is not None:
numbers = numbers[select]
masses = masses[select]
pos = pos[:,select,:]
masses = np.repeat(masses,3)
# Data from the PC analysis
grp = f_pca['pca']
# The selected principal modes
pm = grp['pm'][:,index]
# The corresponding eigenvalues
eigval = grp['eigvals'][index]
# And the principal components
pc = grp['pc'][:,index]
with log.section('PCA'):
for i in range(len(index)):
log('Writing out principal mode %s' %index[i])
if eigval[i] < 0:
Warning('Negative eigenvalue encountered, skipping this entry')
break
# Determine maximum fluctuation (in units of meter*sqrt(kilogram))
max_fluct = np.max(np.abs(pc[:,i]))
# Initialize XYZWriter from molmod package
xw = XYZWriter('pm_%s.xyz' %index[i], [pd[number].symbol for number in numbers])
# Determine index in trajectory closest to rest state, and the corresponding positions
ind_min = np.argmin(np.abs(pc[:,i]))
r_ref = pos[ind_min,:,:]
for j in range(n_frames):
q_var = scaling*pm[:,i]*max_fluct*(2.*j-n_frames)/n_frames
if mw:
q_var /= np.sqrt(masses)
r = r_ref + q_var.reshape(-1,3)
xw.dump('Frame%s' %j, r)
del xw
示例11: pca_projection
def pca_projection(f_target, f, pm, start=0, end=None, step=1, select=None, path='trajectory/pos', mw=True):
"""
Determines the principal components of an MD simulation
**Arguments:**
f_target
Path to an h5.File instance to which the results are written.
f
An h5.File instance containing the trajectory data.
pm
An array containing the principal modes in its columns
**Optional arguments:**
start
The first sample to be considered for analysis. This may be
negative to indicate that the analysis should start from the
-start last samples.
end
The last sample to be considered for analysis. This may be
negative to indicate that the last -end sample should not be
considered.
step
The spacing between the samples used for the analysis
select
A list of atom indexes that are considered for the computation
of the spectrum. If not given, all atoms are used.
path
The path of the dataset that contains the time dependent data in
the HDF5 file. The first axis of the array must be the time
axis.
mw
If mass_weighted is True, the covariance matrix is mass-weighted.
"""
# Load in the relevant data
q = f[path][start:end:step,:,:]
# Select the given atoms
if select is not None:
q = q[:,select,:]
# Reshape such that all Cartesian coordinates are treated equally
q = q.reshape(q.shape[0],-1)
# If necessary, weight with the mass
if mw:
# Select the necessary masses
masses = f['system/masses']
if select is not None:
masses = masses[select]
# Repeat d times, with d the dimension
masses = np.repeat(masses,3)
# Reweight with the masses
q *= np.sqrt(masses)
# Calculation of the principal components: projection of each q_j on the principal modes
with log.section('PCA'):
log('Determining principal components')
prin_comp = np.dot(q, pm)
# Create output HDF5 file
with h5.File(f_target, 'a') as g:
if not 'pca' in g:
pca = g.create_group('pca')
else:
pca = g['pca']
pca.create_dataset('pc', data=prin_comp)
示例12: calc_pca
def calc_pca(f_target, cov_mat=None, f=None, q_ref=None, start=0, end=None, step=1, select=None, path='trajectory/pos', mw=True, temp=None):
"""
Performs a principle component analysis of the given trajectory.
**Arguments:**
f_target
Path to an h5.File instance to which the results are written.
**Optional arguments:**
cov_mat
The covariance matrix, if already calculated. If not provided,
the covariance matrix will be calculatd based on the file f.
f
An h5.File instance containing the trajectory data.
q_ref
Reference vector of the positions. If not provided, the ensemble
average is taken.
start
The first sample to be considered for analysis. This may be
negative to indicate that the analysis should start from the
-start last samples.
end
The last sample to be considered for analysis. This may be
negative to indicate that the last -end sample should not be
considered.
step
The spacing between the samples used for the analysis
select
A list of atom indexes that are considered for the computation
of the spectrum. If not given, all atoms are used.
path
The path of the dataset that contains the time dependent data in
the HDF5 file. The first axis of the array must be the time
axis.
mw
If mass_weighted is True, the covariance matrix is mass-weighted.
temp
Temperature at which the simulation is carried out, necessary to determine the frequencies
"""
if cov_mat is None:
if f is None:
AssertionError('No covariance matrix nor h5.File instance provided.')
else:
with log.section('PCA'):
log('Calculating covariance matrix')
cov_mat, q_ref = calc_cov_mat(f, q_ref, start, end, step, select, path, mw)
with log.section('PCA'):
log('Diagonalizing the covariance matrix')
# Eigenvalue decomposition
eigval, eigvec = np.linalg.eigh(cov_mat)
# Order the eigenvalues in decreasing order
idx = eigval.argsort()[::-1]
eigval = eigval[idx]
eigvec = eigvec[:,idx]
# Create output HDF5 file
with h5.File(f_target, 'w') as g:
pca = g.create_group('pca')
# Output reference structure q_ref
pca.create_dataset('q_ref', data=q_ref)
# Output covariance matrix
pca.create_dataset('cov_matrix', data=cov_mat)
# Output eigenvectors in columns
pca.create_dataset('pm', data=eigvec)
# Output eigenvalues
pca.create_dataset('eigvals', data=eigval)
log('Determining inverse of the covariance matrix')
# Process matrix to determine inverse
# First, project out the three zero eigenvalues (translations)
eigvec_reduced = eigvec[:,:-3]
eigval_reduced = eigval[:-3]
# Second, calculate the reduced covariance matrix and its inverse
cov_mat_reduced = np.dot(np.dot(eigvec_reduced, np.diag(eigval_reduced)), eigvec_reduced.T)
cov_mat_inverse = np.dot(np.dot(eigvec_reduced, np.diag(1/eigval_reduced)), eigvec_reduced.T)
pca.create_dataset('cov_mat_red', data=cov_mat_reduced)
pca.create_dataset('cov_mat_inv', data=cov_mat_inverse)
# Third, if the temperature is specified, calculate the frequencies
# (the zero frequencies are mentioned last so that their index corresponds to the principal modes)
if temp is not None:
log('Determining frequencies')
frequencies = np.append(np.sqrt(boltzmann*temp/eigval_reduced)/(2*np.pi), np.repeat(0,3))
pca.create_dataset('freqs', data=frequencies)
return eigval, eigvec
示例13: dlpoly_history_to_hdf5
def dlpoly_history_to_hdf5(f, fn_history, sub=slice(None), pos_unit=angstrom,
vel_unit=angstrom/picosecond, frc_unit=amu*angstrom/picosecond**2,
time_unit=picosecond, mass_unit=amu):
"""Convert DLPolay History trajectory file to Yaff HDF5 format.
**Arguments:**
f
An open and writable HDF5 file.
fn_history
The filename of the DLPOLY history file.
**Optional arguments:**
sub
The sub argument for the DLPolyHistoryReader. This must be a slice
object that defines the subsampling of the samples from the history
file. By default all frames are read.
pos_unit, vel_unit, frc_unit, time_unit and mass_unit
The units used in the dlpoly history file. The default values
correspond to the defaults used in DLPOLY.
This routine will also test the consistency of the row attribute of the
trajectory group. If some trajectory data is already present, it will be
replaced by the new data. It is highly recommended to first initialize
the HDF5 file with the ``to_hdf5`` method of the System class.
"""
with log.section('DPH5'):
if log.do_medium:
log('Loading DLPOLY history file \'%s\' into \'trajectory\' of HDF5 file \'%s\'' % (
fn_history, f.filename
))
# Take care of the data group
tgrp = get_trajectory_group(f)
# Open the history file for reading
hist_reader = DLPolyHistoryReader(fn_history, sub, pos_unit, vel_unit,
frc_unit, time_unit, mass_unit)
# Take care of the datasets that should always be present
natom = hist_reader.num_atoms
dss = get_trajectory_datasets(
tgrp,
('step', (1,)),
('time', (1,)),
('cell', (3,3)),
('pos', (natom, 3)),
)
ds_step, ds_time, ds_cell, ds_pos = dss
# Take care of optional data sets
if hist_reader.keytrj > 0:
ds_vel = get_trajectory_datasets(tgrp, ('vel', (natom, 3)))[0]
dss.append(ds_vel)
if hist_reader.keytrj > 1:
ds_frc = get_trajectory_datasets(tgrp, ('frc', (natom, 3)))[0]
dss.append(ds_frc)
# Decide on the first row to start writing data
row = get_last_trajectory_row(dss)
# Load data
for frame in hist_reader:
write_to_dataset(ds_step, frame["step"], row)
write_to_dataset(ds_time, frame["time"], row)
write_to_dataset(ds_cell, frame["cell"].T, row)
write_to_dataset(ds_pos, frame["pos"], row)
if hist_reader.keytrj > 0:
write_to_dataset(ds_vel, frame["vel"], row)
if hist_reader.keytrj > 1:
write_to_dataset(ds_frc, frame["frc"], row)
row += 1
# Check number of rows
check_trajectory_rows(tgrp, dss, row)