本文整理汇总了Python中libtbx.containers.OrderedDict类的典型用法代码示例。如果您正苦于以下问题:Python OrderedDict类的具体用法?Python OrderedDict怎么用?Python OrderedDict使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了OrderedDict类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: bravais_lattice_to_space_groups
def bravais_lattice_to_space_groups(chiral_only=True):
from cctbx import sgtbx
from cctbx.sgtbx import bravais_types
from libtbx.containers import OrderedDict
bravais_lattice_to_sg = OrderedDict()
for sgn in range(230):
sg = sgtbx.space_group_info(number=sgn+1).group()
if (not chiral_only) or (sg.is_chiral()):
bravais_lattice = bravais_types.bravais_lattice(group=sg)
bravais_lattice_to_sg.setdefault(str(bravais_lattice), [])
bravais_lattice_to_sg[str(bravais_lattice)].append(sg)
return bravais_lattice_to_sg
示例2: unique_beams
def unique_beams(self):
''' Iterate through unique beams. '''
from dxtbx.imageset import ImageSweep
from libtbx.containers import OrderedDict
obj = OrderedDict()
for iset in self._imagesets:
if isinstance(iset, ImageSweep):
obj[iset.get_beam()] = None
else:
for i in range(len(iset)):
obj[iset.get_beam(i)] = None
return obj.keys()
示例3: cctbx_data_structures_from_cif
class cctbx_data_structures_from_cif(object):
def __init__(self,
file_object=None,
file_path=None,
cif_model=None,
data_structure_builder=None,
data_block_name=None,
base_array_info=None,
**kwds):
assert file_object is None or cif_model is None
if data_structure_builder is None:
data_structure_builders = (
builders.miller_array_builder, builders.crystal_structure_builder)
else:
assert data_structure_builder in (
builders.miller_array_builder, builders.crystal_structure_builder)
data_structure_builders = (data_structure_builder,)
self.xray_structures = OrderedDict()
self.miller_arrays = OrderedDict()
if cif_model is None:
cif_model = reader(file_path=file_path, file_object=file_object).model()
if not len(cif_model):
raise Sorry("No data block found in CIF")
if data_block_name is not None and not data_block_name in cif_model:
if (file_path is None):
msg = 'Unknown CIF data block name: "%s"' % data_block_name
else:
msg = 'Unknown CIF data block name "%s" in file: "%s"' % (
data_block_name, file_path)
raise RuntimeError(msg)
errors = []
wavelengths = {}
for key, block in cif_model.items():
if data_block_name is not None and key != data_block_name: continue
for builder in data_structure_builders:
if builder == builders.crystal_structure_builder:
if '_atom_site_fract_x' in block or '_atom_site_Cartn_x' in block:
self.xray_structures.setdefault(key, builder(block).structure)
elif builder == builders.miller_array_builder:
block_wavelengths = builders.get_wavelengths(block)
if (block_wavelengths is not None) :
wavelengths = block_wavelengths
if base_array_info is not None:
base_array_info = base_array_info.customized_copy(labels=[key])
if ( '_refln_index_h' in block or '_refln.index_h' in block or
'_diffrn_refln' in block
):
self.miller_arrays.setdefault(
key, builder(block, base_array_info=base_array_info,
wavelengths=wavelengths).arrays())
示例4: image_data_cache
class image_data_cache(object):
def __init__(self, imageset, size=10):
self.imageset = imageset
self.size = size
self._image_data = OrderedDict()
def __getitem__(self, i):
image_data = self._image_data.get(i)
if image_data is None:
image_data = self.imageset.get_raw_data(i)
if len(self._image_data) >= self.size:
# remove the oldest entry in the cache
del self._image_data[self._image_data.keys()[0]]
self._image_data[i] = image_data
return image_data
示例5: _unique_detectors_dict
def _unique_detectors_dict(self):
''' Returns an ordered dictionary of detector objects. '''
from dxtbx.imageset import ImageSweep
from libtbx.containers import OrderedDict
obj = OrderedDict()
for iset in self._imagesets:
if isinstance(iset, ImageSweep):
obj[iset.get_detector()] = None
else:
for i in range(len(iset)):
obj[iset.get_detector(i)] = None
detector_id = 0
for detector in obj.keys():
obj[detector] = detector_id
detector_id = detector_id + 1
return obj
示例6: unique_scans
def unique_scans(self):
''' Iterate through unique scans. '''
from dxtbx.imageset import ImageSweep
from libtbx.containers import OrderedDict
obj = OrderedDict()
for iset in self._imagesets:
if isinstance(iset, ImageSweep):
obj[iset.get_scan()] = None
else:
for i in range(len(iset)):
try:
model = iset.get_scan(i)
if model is not None:
obj[model] = None
except Exception:
pass
return obj.keys()
示例7: exercise_odict
def exercise_odict():
from libtbx.containers import OrderedDict as odict
d = odict([('banana',3), ('apple',4), ('pear',1)])
d.setdefault('orange', 2)
assert d.has_key('orange')
assert d['orange'] == 2
assert d.keys() == ['banana', 'apple', 'pear', 'orange']
assert d.values() == [3, 4, 1, 2]
d = odict.fromkeys(('b','c','a'))
assert d.keys() == ['b', 'c', 'a']
示例8: __init__
def __init__(self, header=None, data=None):
self._columns = OrderedDict()
self.keys_lower = {}
if header is not None:
for key in header:
self.setdefault(key, flex.std_string())
if data is not None:
# the number of data items must be an exact multiple of the number of headers
assert len(data) % len(header) == 0, "Wrong number of data items for loop"
n_rows = len(data)//len(header)
n_columns = len(header)
for i in range(n_rows):
self.add_row([data[i*n_columns+j] for j in range(n_columns)])
elif header is None and data is not None:
assert isinstance(data, dict) or isinstance(data, OrderedDict)
self.add_columns(data)
self.keys_lower = dict(
[(key.lower(), key) for key in self._columns.keys()])
示例9: __init__
def __init__(self, unmerged_intensities, batches_all, n_bins=20, d_min=None,
id_to_batches=None):
sel = unmerged_intensities.sigmas() > 0
unmerged_intensities = unmerged_intensities.select(sel)
batches_all = batches_all.select(sel)
unmerged_intensities.setup_binner(n_bins=n_bins)
unmerged_intensities.show_summary()
self.unmerged_intensities = unmerged_intensities
self.merged_intensities = unmerged_intensities.merge_equivalents().array()
separate = separate_unmerged(
unmerged_intensities, batches_all, id_to_batches=id_to_batches)
self.intensities = separate.intensities
self.batches = separate.batches
run_id_to_batch_id = separate.run_id_to_batch_id
self.individual_merged_intensities = OrderedDict()
for k in self.intensities.keys():
self.intensities[k] = self.intensities[k].resolution_filter(d_min=d_min)
self.batches[k] = self.batches[k].resolution_filter(d_min=d_min)
self.individual_merged_intensities[k] = self.intensities[k].merge_equivalents().array()
if run_id_to_batch_id is not None:
labels = run_id_to_batch_id.values()
else:
labels = None
racc = self.relative_anomalous_cc()
if racc is not None:
self.plot_relative_anomalous_cc(racc, labels=labels)
correlation_matrix, linkage_matrix = self.compute_correlation_coefficient_matrix()
self._cluster_dict = self.to_dict(correlation_matrix, linkage_matrix)
self.plot_cc_matrix(correlation_matrix, linkage_matrix, labels=labels)
self.write_output()
示例10: __init__
def __init__(self, xinfo_file, sweep_ids=None, sweep_ranges=None):
'''Initialise myself from an input .xinfo file.'''
# first initialise all of the data structures which will hold the
# information...
self._project = None
self._crystals = OrderedDict()
if sweep_ids is not None:
sweep_ids = [s.lower() for s in sweep_ids]
if sweep_ranges is not None:
assert sweep_ids is not None
assert len(sweep_ids) == len(sweep_ranges)
self._sweep_ids = sweep_ids
self._sweep_ranges = sweep_ranges
# read the contents of the xinfo file
self._parse_project(xinfo_file)
self._validate()
return
示例11: sort
def sort(self, recursive=False, key=None, reverse=False):
self.blocks = OrderedDict(sorted(self.blocks.items(), key=key, reverse=reverse))
if recursive:
for b in self.blocks.values():
b.sort(recursive=recursive, reverse=reverse)
示例12: loop
class loop(DictMixin):
def __init__(self, header=None, data=None):
self._columns = OrderedDict()
self.keys_lower = {}
if header is not None:
for key in header:
self.setdefault(key, flex.std_string())
if data is not None:
# the number of data items must be an exact multiple of the number of headers
assert len(data) % len(header) == 0, "Wrong number of data items for loop"
n_rows = len(data)//len(header)
n_columns = len(header)
for i in range(n_rows):
self.add_row([data[i*n_columns+j] for j in range(n_columns)])
elif header is None and data is not None:
assert isinstance(data, dict) or isinstance(data, OrderedDict)
self.add_columns(data)
self.keys_lower = dict(
[(key.lower(), key) for key in self._columns.keys()])
def __setitem__(self, key, value):
if not re.match(tag_re, key):
raise Sorry("%s is not a valid data name" %key)
if len(self) > 0:
assert len(value) == self.size()
if not isinstance(value, flex.std_string):
for flex_numeric_type in (flex.int, flex.double):
if isinstance(value, flex_numeric_type):
value = value.as_string()
else:
try:
value = flex_numeric_type(value).as_string()
except TypeError:
continue
else:
break
if not isinstance(value, flex.std_string):
value = flex.std_string(value)
# value must be a mutable type
assert hasattr(value, '__setitem__')
self._columns[key] = value
self.keys_lower[key.lower()] = key
def __getitem__(self, key):
return self._columns[self.keys_lower[key.lower()]]
def __delitem__(self, key):
del self._columns[self.keys_lower[key.lower()]]
del self.keys_lower[key.lower()]
def keys(self):
return self._columns.keys()
def __repr__(self):
return repr(OrderedDict(self.iteritems()))
def name(self):
return common_substring(self.keys()).rstrip('_').rstrip('.')
def size(self):
size = 0
for column in self.values():
size = max(size, len(column))
return size
def n_rows(self):
size = 0
for column in self.values():
size = max(size, len(column))
return size
def n_columns(self):
return len(self.keys())
def add_row(self, row, default_value="?"):
if isinstance(row, dict):
for key in self:
if key in row:
self[key].append(str(row[key]))
else:
self[key].append(default_value)
else:
assert len(row) == len(self)
for i, key in enumerate(self):
self[key].append(str(row[i]))
def add_column(self, key, values):
if self.size() != 0:
assert len(values) == self.size()
self[key] = values
self.keys_lower[key.lower()] = key
def add_columns(self, columns):
assert isinstance(columns, dict) or isinstance(columns, OrderedDict)
for key, value in columns.iteritems():
self.add_column(key, value)
def update_column(self, key, values):
assert type(key)==type(""), "first argument is column key string"
if self.size() != 0:
#.........这里部分代码省略.........
示例13: XInfo
class XInfo(object):
'''A class to represent all of the input to the xia2dpa system, with
enough information to allow structure solution, as parsed from a
.xinfo file, an example of which is in the source code.'''
def __init__(self, xinfo_file, sweep_ids=None, sweep_ranges=None):
'''Initialise myself from an input .xinfo file.'''
# first initialise all of the data structures which will hold the
# information...
self._project = None
self._crystals = OrderedDict()
if sweep_ids is not None:
sweep_ids = [s.lower() for s in sweep_ids]
if sweep_ranges is not None:
assert sweep_ids is not None
assert len(sweep_ids) == len(sweep_ranges)
self._sweep_ids = sweep_ids
self._sweep_ranges = sweep_ranges
# read the contents of the xinfo file
self._parse_project(xinfo_file)
self._validate()
return
def get_output(self):
'''Generate a string representation of the project.'''
text = 'Project %s\n' % self._project
for crystal in self._crystals.keys():
text += 'Crystal %s\n' % crystal
text += '%s\n' % self._crystals[crystal].get_output()
# remove a trailing newline...
return text[:-1]
def get_project(self):
return self._project
def get_crystals(self):
return self._crystals
def _validate(self):
'''Validate the structure of this object, ensuring that
everything looks right... raise exception if I find something
wrong.'''
return True
def _parse_project(self, xinfo_file):
'''Parse & validate the contents of the .xinfo file. This parses the
project element (i.e. the whole thing..)'''
project_records = []
for r in open(xinfo_file, 'r').readlines():
record = r.strip()
if not record:
pass
elif record[0] == '!' or record[0] == '#':
pass
else :
# then it may contain something useful...
project_records.append(record)
# so now we have loaded the whole file into memory stripping
# out the crud... let's look for something useful
for i in range(len(project_records)):
record = project_records[i]
if 'BEGIN PROJECT' in record:
self._project = record.replace('BEGIN PROJECT', '').strip()
if 'END PROJECT' in record:
if not self._project == record.replace(
'END PROJECT', '').strip():
raise RuntimeError, 'error parsing END PROJECT record'
# next look for crystals
if 'BEGIN CRYSTAL ' in record:
crystal_records = [record]
while True:
i += 1
record = project_records[i]
crystal_records.append(record)
if 'END CRYSTAL ' in record:
break
self._parse_crystal(crystal_records)
# that's everything, because parse_crystal handles
# the rest...
return
#.........这里部分代码省略.........
示例14: __init__
def __init__(self, imageset, size=10):
self.imageset = imageset
self.size = size
self._image_data = OrderedDict()
示例15: multi_crystal_analysis
class multi_crystal_analysis(object):
def __init__(self, unmerged_intensities, batches_all, n_bins=20, d_min=None,
id_to_batches=None):
sel = unmerged_intensities.sigmas() > 0
unmerged_intensities = unmerged_intensities.select(sel)
batches_all = batches_all.select(sel)
unmerged_intensities.setup_binner(n_bins=n_bins)
unmerged_intensities.show_summary()
self.unmerged_intensities = unmerged_intensities
self.merged_intensities = unmerged_intensities.merge_equivalents().array()
separate = separate_unmerged(
unmerged_intensities, batches_all, id_to_batches=id_to_batches)
self.intensities = separate.intensities
self.batches = separate.batches
run_id_to_batch_id = separate.run_id_to_batch_id
self.individual_merged_intensities = OrderedDict()
for k in self.intensities.keys():
self.intensities[k] = self.intensities[k].resolution_filter(d_min=d_min)
self.batches[k] = self.batches[k].resolution_filter(d_min=d_min)
self.individual_merged_intensities[k] = self.intensities[k].merge_equivalents().array()
if run_id_to_batch_id is not None:
labels = run_id_to_batch_id.values()
else:
labels = None
racc = self.relative_anomalous_cc()
if racc is not None:
self.plot_relative_anomalous_cc(racc, labels=labels)
correlation_matrix, linkage_matrix = self.compute_correlation_coefficient_matrix()
self._cluster_dict = self.to_dict(correlation_matrix, linkage_matrix)
self.plot_cc_matrix(correlation_matrix, linkage_matrix, labels=labels)
self.write_output()
def to_dict(self, correlation_matrix, linkage_matrix):
from scipy.cluster import hierarchy
tree = hierarchy.to_tree(linkage_matrix, rd=False)
leaves_list = hierarchy.leaves_list(linkage_matrix)
d = {}
# http://w3facility.org/question/scipy-dendrogram-to-json-for-d3-js-tree-visualisation/
# https://gist.github.com/mdml/7537455
def add_node(node):
if node.is_leaf(): return
cluster_id = node.get_id() - len(linkage_matrix) - 1
row = linkage_matrix[cluster_id]
d[cluster_id+1] = {
'datasets': [i+1 for i in sorted(node.pre_order())],
'height': row[2],
}
# Recursively add the current node's children
if node.left: add_node(node.left)
if node.right: add_node(node.right)
add_node(tree)
return d
def relative_anomalous_cc(self):
if self.unmerged_intensities.anomalous_flag():
d_min = min([ma.d_min() for ma in self.intensities.values()])
racc = flex.double()
full_set_anom_diffs = self.merged_intensities.anomalous_differences()
for i_wedge in self.individual_merged_intensities.keys():
ma_i = self.individual_merged_intensities[i_wedge].resolution_filter(d_min=d_min)
anom_i = ma_i.anomalous_differences()
anom_cc = anom_i.correlation(full_set_anom_diffs, assert_is_similar_symmetry=False).coefficient()
racc.append(anom_cc)
return racc
def plot_relative_anomalous_cc(self, racc, labels=None):
perm = flex.sort_permutation(racc)
fig = pyplot.figure(dpi=1200, figsize=(16,12))
pyplot.bar(range(len(racc)), list(racc.select(perm)))
if labels is None:
labels = ["%.0f" %(j+1) for j in perm]
assert len(labels) == len(racc)
pyplot.xticks([i+0.5 for i in range(len(racc))], labels)
locs, labels = pyplot.xticks()
pyplot.setp(labels, rotation=70)
pyplot.xlabel("Dataset")
pyplot.ylabel("Relative anomalous correlation coefficient")
fig.savefig("racc.png")
def compute_correlation_coefficient_matrix(self):
from scipy.cluster import hierarchy
import scipy.spatial.distance as ssd
correlation_matrix = flex.double(
flex.grid(len(self.intensities), len(self.intensities)))
#.........这里部分代码省略.........