本文整理汇总了Python中xia2.Handlers.Streams.Chatter.write方法的典型用法代码示例。如果您正苦于以下问题:Python Chatter.write方法的具体用法?Python Chatter.write怎么用?Python Chatter.write使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类xia2.Handlers.Streams.Chatter
的用法示例。
在下文中一共展示了Chatter.write方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run
# 需要导入模块: from xia2.Handlers.Streams import Chatter [as 别名]
# 或者: from xia2.Handlers.Streams.Chatter import write [as 别名]
def run():
try:
check_environment()
check()
except exceptions.Exception, e:
traceback.print_exc(file = open('xia2.error', 'w'))
Chatter.write('Status: error "%s"' % str(e))
示例2: run
# 需要导入模块: from xia2.Handlers.Streams import Chatter [as 别名]
# 或者: from xia2.Handlers.Streams.Chatter import write [as 别名]
def run():
if os.path.exists('xia2-working.phil'):
sys.argv.append('xia2-working.phil')
try:
check_environment()
except exceptions.Exception, e:
traceback.print_exc(file = open('xia2.error', 'w'))
Chatter.write('Status: error "%s"' % str(e))
示例3: get_new_scales_file
# 需要导入模块: from xia2.Handlers.Streams import Chatter [as 别名]
# 或者: from xia2.Handlers.Streams.Chatter import write [as 别名]
def get_new_scales_file(self):
'''Get the file to which the scales have been written.'''
if self._new_scales_file:
if not os.path.isfile(os.path.join(self.get_working_directory(), self._new_scales_file)):
Chatter.write(
"Aimless did not scale the data, see log file for more details:\n %s" %self.get_log_file())
raise RuntimeError, 'data not scaled'
return os.path.join(self.get_working_directory(), self._new_scales_file)
示例4: _integrate
# 需要导入模块: from xia2.Handlers.Streams import Chatter [as 别名]
# 或者: from xia2.Handlers.Streams.Chatter import write [as 别名]
def _integrate(self):
'''Implement the integrater interface.'''
# cite the program
Citations.cite('mosflm')
images_str = '%d to %d' % tuple(self._intgr_wedge)
cell_str = '%.2f %.2f %.2f %.2f %.2f %.2f' % tuple(self._intgr_cell)
if len(self._fp_directory) <= 50:
dirname = self._fp_directory
else:
dirname = '...%s' % self._fp_directory[-46:]
Journal.block(
'integrating', self._intgr_sweep_name, 'mosflm',
{'images':images_str,
'cell':cell_str,
'lattice':self.get_integrater_refiner().get_refiner_lattice(),
'template':self._fp_template,
'directory':dirname,
'resolution':'%.2f' % self._intgr_reso_high})
self._mosflm_rerun_integration = False
wd = self.get_working_directory()
try:
if self.get_integrater_sweep_name():
pname, xname, dname = self.get_integrater_project_info()
nproc = PhilIndex.params.xia2.settings.multiprocessing.nproc
if nproc > 1:
Debug.write('Parallel integration: %d jobs' %nproc)
self._mosflm_hklout = self._mosflm_parallel_integrate()
else:
self._mosflm_hklout = self._mosflm_integrate()
# record integration output for e.g. BLEND.
sweep = self.get_integrater_sweep_name()
if sweep:
FileHandler.record_more_data_file(
'%s %s %s %s INTEGRATE' % (pname, xname, dname, sweep),
self._mosflm_hklout)
except IntegrationError, e:
if 'negative mosaic spread' in str(e):
if self._mosflm_postref_fix_mosaic:
Chatter.write(
'Negative mosaic spread - stopping integration')
raise BadLatticeError, 'negative mosaic spread'
Chatter.write(
'Negative mosaic spread - rerunning integration')
self.set_integrater_done(False)
self._mosflm_postref_fix_mosaic = True
示例5: integrate
# 需要导入模块: from xia2.Handlers.Streams import Chatter [as 别名]
# 或者: from xia2.Handlers.Streams.Chatter import write [as 别名]
def integrate(self):
'''Actually perform integration until we think we are done...'''
while not self.get_integrater_finish_done():
while not self.get_integrater_done():
while not self.get_integrater_prepare_done():
Debug.write('Preparing to do some integration...')
self.set_integrater_prepare_done(True)
# if this raises an exception, perhaps the autoindexing
# solution has too high symmetry. if this the case, then
# perform a self._intgr_indexer.eliminate() - this should
# reset the indexing system
try:
self._integrate_prepare()
except BadLatticeError, e:
Journal.banner('eliminated this lattice', size = 80)
Chatter.write('Rejecting bad lattice %s' % str(e))
self._intgr_refiner.eliminate()
self._integrater_reset()
# FIXME x1698 - may be the case that _integrate() returns the
# raw intensities, _integrate_finish() returns intensities
# which may have been adjusted or corrected. See #1698 below.
Debug.write('Doing some integration...')
self.set_integrater_done(True)
template = self.get_integrater_sweep().get_template()
if self._intgr_sweep_name:
if PhilIndex.params.xia2.settings.show_template:
Chatter.banner('Integrating %s (%s)' % \
(self._intgr_sweep_name, template))
else:
Chatter.banner('Integrating %s' % \
(self._intgr_sweep_name))
try:
#1698
self._intgr_hklout_raw = self._integrate()
except BadLatticeError, e:
Chatter.write('Rejecting bad lattice %s' % str(e))
Journal.banner('eliminated this lattice', size = 80)
self._intgr_refiner.eliminate()
self._integrater_reset()
示例6: run
# 需要导入模块: from xia2.Handlers.Streams import Chatter [as 别名]
# 或者: from xia2.Handlers.Streams.Chatter import write [as 别名]
def run():
from libtbx.utils import Sorry
if len(sys.argv) < 2 or '-help' in sys.argv or '--help' in sys.argv:
help()
sys.exit()
try:
check_environment()
except exceptions.Exception, e:
traceback.print_exc(file = open('xia2.error', 'w'))
Chatter.write('Status: error "%s"' % str(e))
示例7: eliminate
# 需要导入模块: from xia2.Handlers.Streams import Chatter [as 别名]
# 或者: from xia2.Handlers.Streams.Chatter import write [as 别名]
def eliminate(self, indxr_print = True):
'''Eliminate the highest currently allowed lattice.'''
if len(self._sorted_list) <= 1:
raise RuntimeError, 'cannot eliminate only solution'
if indxr_print:
Chatter.write('Eliminating indexing solution:')
Chatter.write(self.repr()[0])
self._sorted_list = self._sorted_list[1:]
示例8: _index_prepare
# 需要导入模块: from xia2.Handlers.Streams import Chatter [as 别名]
# 或者: from xia2.Handlers.Streams.Chatter import write [as 别名]
def _index_prepare(self):
Chatter.banner('Spotfinding %s' %self.get_indexer_sweep_name())
super(XDSIndexerII, self)._index_prepare()
from dials.array_family import flex
from dials.util.ascii_art import spot_counts_per_image_plot
reflection_pickle = spot_xds_to_reflection_pickle(
self._indxr_payload['SPOT.XDS'],
working_directory=self.get_working_directory())
refl = flex.reflection_table.from_pickle(reflection_pickle)
Chatter.write(spot_counts_per_image_plot(refl), strip=False)
示例9: which
# 需要导入模块: from xia2.Handlers.Streams import Chatter [as 别名]
# 或者: from xia2.Handlers.Streams.Chatter import write [as 别名]
def which(pgm, debug=False):
# python equivalent to the 'which' command
# http://stackoverflow.com/questions/9877462/is-there-a-python-equivalent-to-the-which-command
# FIXME this will not work on Windows as you need to check that there is a
# .bat or a .exe extension
# FIXME also this is implemented in Driver/DriverHelper.py:executable_exists
path = os.getenv('PATH')
for p in path.split(os.path.pathsep):
p = os.path.join(p, pgm)
if debug:
Chatter.write('Seeking %s' % p)
if os.path.exists(p) and os.access(p, os.X_OK):
return p
示例10: run
# 需要导入模块: from xia2.Handlers.Streams import Chatter [as 别名]
# 或者: from xia2.Handlers.Streams.Chatter import write [as 别名]
def run(self):
from xia2.Handlers.Streams import Chatter, Debug
Debug.write('Running dials.refine')
self.clear_command_line()
self.add_command_line(self._experiments_filename)
self.add_command_line(self._indexed_filename)
self.add_command_line('scan_varying=%s' % self._scan_varying)
if self._close_to_spindle_cutoff is not None:
self.add_command_line(
'close_to_spindle_cutoff=%f' %self._close_to_spindle_cutoff)
if self._outlier_algorithm:
self.add_command_line('outlier.algorithm=%s' % self._outlier_algorithm)
self._refined_experiments_filename = os.path.join(
self.get_working_directory(),
'%s_refined_experiments.json' % self.get_xpid())
self.add_command_line(
'output.experiments=%s' % self._refined_experiments_filename)
self._refined_filename = os.path.join(
self.get_working_directory(), '%s_refined.pickle' % self.get_xpid())
self.add_command_line('output.reflections=%s' % self._refined_filename)
if self._reflections_per_degree is not None:
self.add_command_line(
'reflections_per_degree=%i' %self._reflections_per_degree)
if self._interval_width_degrees is not None:
self.add_command_line(
'unit_cell.smoother.interval_width_degrees=%i' % self._interval_width_degrees)
self.add_command_line(
'orientation.smoother.interval_width_degrees=%i' % self._interval_width_degrees)
if self._detector_fix:
self.add_command_line('detector.fix=%s' % self._detector_fix)
if self._beam_fix:
self.add_command_line('beam.fix=%s' % self._beam_fix)
if self._phil_file is not None:
self.add_command_line('%s' %self._phil_file)
self.start()
self.close_wait()
if not os.path.isfile(self._refined_filename) or \
not os.path.isfile(self._refined_experiments_filename):
Chatter.write(
"DIALS did not refine the data, see log file for more details:\n %s" %self.get_log_file())
raise RuntimeError, 'data not refined'
for record in self.get_all_output():
if 'Sorry: Too few reflections to' in record:
raise RuntimeError, record.strip()
self.check_for_errors()
return
示例11: make_logfile_html
# 需要导入模块: from xia2.Handlers.Streams import Chatter [as 别名]
# 或者: from xia2.Handlers.Streams.Chatter import write [as 别名]
def make_logfile_html(logfile):
tables = extract_loggraph_tables(logfile)
if not tables:
return
rst = []
for table in tables:
try:
#for graph_name, html in table_to_google_charts(table).iteritems():
for graph_name, html in table_to_c3js_charts(table).iteritems():
#rst.append('.. __%s:\n' %graph_name)
rst.append('.. raw:: html')
rst.append('\n '.join(html.split('\n')))
except StandardError, e:
Chatter.write('=' * 80)
Chatter.write('Error (%s) while processing table' % str(e))
Chatter.write(" '%s'" % table.title)
Chatter.write('in %s' % logfile)
Chatter.write('=' * 80)
Debug.write('Exception raised while processing log file %s, table %s' % (logfile, table.title))
Debug.write(traceback.format_exc())
示例12: run_one_sweep
# 需要导入模块: from xia2.Handlers.Streams import Chatter [as 别名]
# 或者: from xia2.Handlers.Streams.Chatter import write [as 别名]
def run_one_sweep(args):
from xia2.Handlers.Streams import Debug
assert len(args) == 3
s, failover, job_type = args
if job_type:
DriverFactory.set_driver_type(job_type)
Chatter.cache()
Debug.cache()
try:
s.get_integrater_intensities()
except Exception, e:
if failover:
Chatter.write('Processing sweep %s failed: %s' % \
(s.get_name(), str(e)))
s = None
else:
raise
示例13: _integrate
# 需要导入模块: from xia2.Handlers.Streams import Chatter [as 别名]
# 或者: from xia2.Handlers.Streams.Chatter import write [as 别名]
def _integrate(self):
'''Actually do the integration - in XDS terms this will mean running
DEFPIX and INTEGRATE to measure all the reflections.'''
experiment = self._intgr_refiner.get_refined_experiment_list(
self.get_integrater_epoch())[0]
crystal_model = experiment.crystal
self._intgr_refiner_cell = crystal_model.get_unit_cell().parameters()
images_str = '%d to %d' % tuple(self._intgr_wedge)
cell_str = '%.2f %.2f %.2f %.2f %.2f %.2f' %tuple(self._intgr_refiner_cell)
if len(self._fp_directory) <= 50:
dirname = self._fp_directory
else:
dirname = '...%s' % self._fp_directory[-46:]
Journal.block(
'integrating', self._intgr_sweep_name, 'XDS',
{'images':images_str,
'cell':cell_str,
'lattice':self._intgr_refiner.get_refiner_lattice(),
'template':self._fp_template,
'directory':dirname,
'resolution':'%.2f' % self._intgr_reso_high})
first_image_in_wedge = self.get_image_name(self._intgr_wedge[0])
defpix = self.Defpix()
# pass in the correct data
for file in ['X-CORRECTIONS.cbf',
'Y-CORRECTIONS.cbf',
'BKGINIT.cbf',
'XPARM.XDS']:
defpix.set_input_data_file(file, self._xds_data_files[file])
defpix.set_data_range(self._intgr_wedge[0],
self._intgr_wedge[1])
if self.get_integrater_high_resolution() > 0.0 and \
self.get_integrater_user_resolution():
Debug.write('Setting resolution limit in DEFPIX to %.2f' % \
self.get_integrater_high_resolution())
defpix.set_resolution_high(self.get_integrater_high_resolution())
defpix.set_resolution_low(self.get_integrater_low_resolution())
elif self.get_integrater_low_resolution():
Debug.write('Setting low resolution limit in DEFPIX to %.2f' % \
self.get_integrater_low_resolution())
defpix.set_resolution_high(0.0)
defpix.set_resolution_low(self.get_integrater_low_resolution())
defpix.run()
# and gather the result files
for file in ['BKGPIX.cbf',
'ABS.cbf']:
self._xds_data_files[file] = defpix.get_output_data_file(file)
integrate = self.Integrate()
if self._xds_integrate_parameters:
integrate.set_updates(self._xds_integrate_parameters)
# decide what images we are going to process, if not already
# specified
if not self._intgr_wedge:
images = self.get_matching_images()
self.set_integrater_wedge(min(images),
max(images))
first_image_in_wedge = self.get_image_name(self._intgr_wedge[0])
integrate.set_data_range(self._intgr_wedge[0],
self._intgr_wedge[1])
for file in ['X-CORRECTIONS.cbf',
'Y-CORRECTIONS.cbf',
'BLANK.cbf',
'BKGPIX.cbf',
'GAIN.cbf']:
integrate.set_input_data_file(file, self._xds_data_files[file])
if self._xds_data_files.has_key('GXPARM.XDS'):
Debug.write('Using globally refined parameters')
integrate.set_input_data_file(
'XPARM.XDS', self._xds_data_files['GXPARM.XDS'])
integrate.set_refined_xparm()
else:
integrate.set_input_data_file(
'XPARM.XDS', self._xds_data_files['XPARM.XDS'])
integrate.run()
self._intgr_per_image_statistics = integrate.get_per_image_statistics()
Chatter.write(self.show_per_image_statistics())
#.........这里部分代码省略.........
示例14: _scale
# 需要导入模块: from xia2.Handlers.Streams import Chatter [as 别名]
# 或者: from xia2.Handlers.Streams.Chatter import write [as 别名]
#.........这里部分代码省略.........
'absorption':self._scalr_correct_absorption,
'tails':self._scalr_correct_partiality,
'decay':self._scalr_correct_decay
})
else:
Journal.block(
'scaling', self.get_scaler_xcrystal().get_name(), 'CCP4',
{'scaling model':'default'})
sc = self._updated_aimless()
sc.set_hklin(self._prepared_reflections)
sc.set_intensities(PhilIndex.params.ccp4.aimless.intensities)
sc.set_chef_unmerged(True)
sc.set_new_scales_file('%s.scales' % self._scalr_xname)
user_resolution_limits = { }
for epoch in epochs:
si = self._sweep_handler.get_sweep_information(epoch)
pname, xname, dname = si.get_project_info()
sname = si.get_sweep_name()
intgr = si.get_integrater()
if intgr.get_integrater_user_resolution():
dmin = intgr.get_integrater_high_resolution()
if not user_resolution_limits.has_key((dname, sname)):
user_resolution_limits[(dname, sname)] = dmin
elif dmin < user_resolution_limits[(dname, sname)]:
user_resolution_limits[(dname, sname)] = dmin
start, end = si.get_batch_range()
if (dname, sname) in self._scalr_resolution_limits:
resolution = self._scalr_resolution_limits[(dname, sname)]
sc.add_run(start, end, exclude = False,
resolution = resolution, name = sname)
else:
sc.add_run(start, end, name = sname)
sc.set_hklout(os.path.join(self.get_working_directory(),
'%s_%s_scaled_test.mtz' % \
(self._scalr_pname, self._scalr_xname)))
if self.get_scaler_anomalous():
sc.set_anomalous()
# what follows, sucks
if Flags.get_failover():
try:
sc.scale()
except RuntimeError, e:
es = str(e)
if 'bad batch' in es or \
'negative scales run' in es or \
'no observations' in es:
# first ID the sweep from the batch no
batch = int(es.split()[-1])
epoch = self._identify_sweep_epoch(batch)
sweep = self._scalr_integraters[
epoch].get_integrater_sweep()
# then remove it from my parent xcrystal
self.get_scaler_xcrystal().remove_sweep(sweep)
# then remove it from the scaler list of intergraters
# - this should really be a scaler interface method
del(self._scalr_integraters[epoch])
# then tell the user what is happening
Chatter.write(
'Sweep %s gave negative scales - removing' % \
sweep.get_name())
# then reset the prepare, do, finish flags
self.set_scaler_prepare_done(False)
self.set_scaler_done(False)
self.set_scaler_finish_done(False)
# and return
return
else:
raise e
示例15: _scale_prepare
# 需要导入模块: from xia2.Handlers.Streams import Chatter [as 别名]
# 或者: from xia2.Handlers.Streams.Chatter import write [as 别名]
def _scale_prepare(self):
'''Perform all of the preparation required to deliver the scaled
data. This should sort together the reflection files, ensure that
they are correctly indexed (via pointless) and generally tidy
things up.'''
# acknowledge all of the programs we are about to use...
Citations.cite('pointless')
Citations.cite('aimless')
Citations.cite('ccp4')
# ---------- GATHER ----------
self._sweep_handler = SweepInformationHandler(self._scalr_integraters)
Journal.block(
'gathering', self.get_scaler_xcrystal().get_name(), 'CCP4',
{'working directory':self.get_working_directory()})
for epoch in self._sweep_handler.get_epochs():
si = self._sweep_handler.get_sweep_information(epoch)
pname, xname, dname = si.get_project_info()
sname = si.get_sweep_name()
exclude_sweep = False
for sweep in PhilIndex.params.xia2.settings.sweep:
if sweep.id == sname and sweep.exclude:
exclude_sweep = True
break
if exclude_sweep:
self._sweep_handler.remove_epoch(epoch)
Debug.write('Excluding sweep %s' %sname)
else:
Journal.entry({'adding data from':'%s/%s/%s' % \
(xname, dname, sname)})
# gather data for all images which belonged to the parent
# crystal - allowing for the fact that things could go wrong
# e.g. epoch information not available, exposure times not in
# headers etc...
for e in self._sweep_handler.get_epochs():
si = self._sweep_handler.get_sweep_information(e)
assert is_mtz_file(si.get_reflections())
p, x = self._sweep_handler.get_project_info()
self._scalr_pname = p
self._scalr_xname = x
# verify that the lattices are consistent, calling eliminate if
# they are not N.B. there could be corner cases here
need_to_return = False
multi_sweep_indexing = \
PhilIndex.params.xia2.settings.developmental.multi_sweep_indexing
if len(self._sweep_handler.get_epochs()) > 1:
if multi_sweep_indexing and not self._scalr_input_pointgroup:
pointless_hklins = []
max_batches = 0
for epoch in self._sweep_handler.get_epochs():
si = self._sweep_handler.get_sweep_information(epoch)
hklin = si.get_reflections()
md = self._factory.Mtzdump()
md.set_hklin(hklin)
md.dump()
batches = md.get_batches()
if 1 + max(batches) - min(batches) > max_batches:
max_batches = max(batches) - min(batches) + 1
datasets = md.get_datasets()
Debug.write('In reflection file %s found:' % hklin)
for d in datasets:
Debug.write('... %s' % d)
dataset_info = md.get_dataset_info(datasets[0])
from xia2.lib.bits import nifty_power_of_ten
Debug.write('Biggest sweep has %d batches' % max_batches)
max_batches = nifty_power_of_ten(max_batches)
counter = 0
for epoch in self._sweep_handler.get_epochs():
si = self._sweep_handler.get_sweep_information(epoch)
hklin = si.get_reflections()
integrater = si.get_integrater()
refiner = integrater.get_integrater_refiner()
hklin = self._prepare_pointless_hklin(
#.........这里部分代码省略.........