本文整理汇总了Python中hyperspy.misc.utils.DictionaryTreeBrowser类的典型用法代码示例。如果您正苦于以下问题:Python DictionaryTreeBrowser类的具体用法?Python DictionaryTreeBrowser怎么用?Python DictionaryTreeBrowser使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了DictionaryTreeBrowser类的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: emi_reader
def emi_reader(filename, dump_xml=False, verbose=False, **kwds):
# TODO: recover the tags from the emi file. It is easy: just look for
# <ObjectInfo> and </ObjectInfo>. It is standard xml :)
objects = get_xml_info_from_emi(filename)
filename = os.path.splitext(filename)[0]
if dump_xml is True:
for i, obj in enumerate(objects):
with open(filename + '-object-%s.xml' % i, 'w') as f:
f.write(obj)
ser_files = glob(filename + '_[0-9].ser')
sers = []
for f in ser_files:
if verbose is True:
print "Opening ", f
try:
sers.append(ser_reader(f, objects))
except IOError: # Probably a single spectrum that we don't support
continue
index = int(os.path.splitext(f)[0].split("_")[-1]) - 1
op = DictionaryTreeBrowser(sers[-1]['original_metadata'])
emixml2dtb(ET.fromstring(objects[index]), op)
sers[-1]['original_metadata'] = op.as_dictionary()
return sers
示例2: emi_reader
def emi_reader(filename, dump_xml=False, **kwds):
# TODO: recover the tags from the emi file. It is easy: just look for
# <ObjectInfo> and </ObjectInfo>. It is standard xml :)
# xml chunks are identified using UUID, if we can find how these UUID are
# generated then, it will possible to match to the corresponding ser file
# and add the detector information in the metadata
objects = get_xml_info_from_emi(filename)
filename = os.path.splitext(filename)[0]
if dump_xml is True:
for i, obj in enumerate(objects):
with open(filename + '-object-%s.xml' % i, 'w') as f:
f.write(obj)
ser_files = sorted(glob(filename + '_[0-9].ser'))
sers = []
for f in ser_files:
_logger.info("Opening %s", f)
try:
sers.append(ser_reader(f, objects))
except IOError: # Probably a single spectrum that we don't support
continue
index = int(os.path.splitext(f)[0].split("_")[-1]) - 1
op = DictionaryTreeBrowser(sers[-1]['original_metadata'])
emixml2dtb(ET.fromstring(objects[index]), op)
sers[-1]['original_metadata'] = op.as_dictionary()
return sers
示例3: setUp
def setUp(self):
self.shape = (5, 7)
self.s = LocalStrategy('test diffusion strategy')
self.samf = create_artificial_samfire(self.shape)
m = DictionaryTreeBrowser()
m.set_item('chisq.data', np.ones(self.shape) * 5.)
self.samf.model = m
示例4: setUp
def setUp(self):
self.shape = (7, 15)
art_model = DictionaryTreeBrowser()
art_model.set_item("red_chisq.data", np.ones(self.shape))
art_model.red_chisq.data[3, 5] = 0.8
art_model.red_chisq.data[2, 5] = 2.0
self.m = art_model
# have to be imported here, as otherwise crashes nosetools
from hyperspy.samfire_utils.goodness_of_fit_tests.red_chisq import red_chisq_test as rct
self.t = rct(0.9)
示例5: _get_example
def _get_example(date, time, time_zone=None):
md = DictionaryTreeBrowser({'General': {'date': date,
'time': time}})
if time_zone:
md.set_item('General.time_zone', time_zone)
dt = parser.parse('%sT%s' % (date, time))
dt = dt.replace(tzinfo=tz.gettz(time_zone))
iso = dt.isoformat()
else:
iso = '%sT%s' % (date, time)
dt = parser.parse(iso)
return md, dt, iso
示例6: __init__
def __init__(self, model, workers=None, setup=True, **kwargs):
# constants:
if workers is None:
workers = max(1, cpu_count() - 1)
self.model = model
self.metadata = DictionaryTreeBrowser()
self._scale = 1.0
# -1 -> done pixel, use
# -2 -> done, ignore when diffusion
# 0 -> bad fit/no info
# >0 -> select when turn comes
self.metadata.add_node('marker')
self.metadata.add_node('goodness_test')
marker = np.empty(self.model.axes_manager.navigation_shape[::-1])
marker.fill(self._scale)
self.metadata.marker = marker
self.strategies = StrategyList(self)
self.strategies.append(ReducedChiSquaredStrategy())
self.strategies.append(HistogramStrategy())
self._active_strategy_ind = 0
self.update_every = max(10, workers * 2) # some sensible number....
from hyperspy.samfire_utils.fit_tests import red_chisq_test
self.metadata.goodness_test = red_chisq_test(tolerance=1.0)
self.metadata._gt_dump = None
from hyperspy.samfire_utils.samfire_kernel import single_kernel
self.single_kernel = single_kernel
self._workers = workers
if len(kwargs) or setup:
self._setup(**kwargs)
self.refresh_database()
示例7: test_update_date_time_in_metadata
def test_update_date_time_in_metadata():
md = DictionaryTreeBrowser({'General': {}})
# in case of iso, the exact time is lost, only the time offset is kept
md11 = dtt.update_date_time_in_metadata(iso1, md.deepcopy())
assert_deep_almost_equal(md11.General.date, md1.General.date)
assert_deep_almost_equal(md11.General.time, md1.General.time)
assert_deep_almost_equal(md11.General.time_zone, 'UTC')
md12 = dtt.update_date_time_in_metadata(dt1, md.deepcopy())
assert_deep_almost_equal(md12.General.date, md1.General.date)
assert_deep_almost_equal(md12.General.time, md1.General.time)
import locale
if locale.getlocale()[0] in ['en_GB', 'en_US']:
assert md12.General.time_zone in ('UTC', 'Coordinated Universal Time')
md13 = dtt.update_date_time_in_metadata(iso2, md.deepcopy())
assert_deep_almost_equal(md13.General.date, md2.General.date)
assert_deep_almost_equal(md13.General.time, md2.General.time)
assert_deep_almost_equal(md13.General.time_zone, '-05:00')
assert_deep_almost_equal(dtt.update_date_time_in_metadata(dt2, md.deepcopy()).as_dictionary(),
md2.as_dictionary())
assert_deep_almost_equal(dtt.update_date_time_in_metadata(iso3, md.deepcopy()).as_dictionary(),
md3.as_dictionary())
assert_deep_almost_equal(dtt.update_date_time_in_metadata(dt3, md.deepcopy()).as_dictionary(),
md3.as_dictionary())
示例8: file_reader
#.........这里部分代码省略.........
if 'depth-scale' in rpl_info:
scales[idepth] = rpl_info['depth-scale']
# ev-per-chan is the only calibration supported by the original ripple
# format
elif 'ev-per-chan' in rpl_info:
scales[idepth] = rpl_info['ev-per-chan']
if 'depth-origin' in rpl_info:
origins[idepth] = rpl_info['depth-origin']
if 'depth-units' in rpl_info:
units[idepth] = rpl_info['depth-units']
if 'depth-name' in rpl_info:
names[idepth] = rpl_info['depth-name']
if 'width-origin' in rpl_info:
origins[iwidth] = rpl_info['width-origin']
if 'width-scale' in rpl_info:
scales[iwidth] = rpl_info['width-scale']
if 'width-units' in rpl_info:
units[iwidth] = rpl_info['width-units']
if 'width-name' in rpl_info:
names[iwidth] = rpl_info['width-name']
if 'height-origin' in rpl_info:
origins[iheight] = rpl_info['height-origin']
if 'height-scale' in rpl_info:
scales[iheight] = rpl_info['height-scale']
if 'height-units' in rpl_info:
units[iheight] = rpl_info['height-units']
if 'height-name' in rpl_info:
names[iheight] = rpl_info['height-name']
mp = DictionaryTreeBrowser({
'General': {'original_filename': os.path.split(filename)[1],
'date': rpl_info['date'],
'time': rpl_info['time']},
"Signal": {'signal_type': rpl_info['signal'],
'record_by': record_by},
})
if 'convergence-angle' in rpl_info:
mp.set_item('Acquisition_instrument.TEM.convergence_angle',
rpl_info['convergence-angle'])
if 'tilt-stage' in rpl_info:
mp.set_item('Acquisition_instrument.TEM.tilt_stage',
rpl_info['tilt-stage'])
if 'collection-angle' in rpl_info:
mp.set_item('Acquisition_instrument.TEM.Detector.EELS.' +
'collection_angle',
rpl_info['collection-angle'])
if 'beam-energy' in rpl_info:
mp.set_item('Acquisition_instrument.TEM.beam_energy',
rpl_info['beam-energy'])
if 'elevation-angle' in rpl_info:
mp.set_item('Acquisition_instrument.TEM.Detector.EDS.elevation_angle',
rpl_info['elevation-angle'])
if 'azimuth-angle' in rpl_info:
mp.set_item('Acquisition_instrument.TEM.Detector.EDS.azimuth_angle',
rpl_info['azimuth-angle'])
if 'energy-resolution' in rpl_info:
mp.set_item('Acquisition_instrument.TEM.Detector.EDS.' +
'energy_resolution_MnKa',
rpl_info['energy-resolution'])
if 'detector-peak-width-ev' in rpl_info:
mp.set_item('Acquisition_instrument.TEM.Detector.EDS.' +
'energy_resolution_MnKa',
rpl_info['detector-peak-width-ev'])
if 'live-time' in rpl_info:
mp.set_item('Acquisition_instrument.TEM.Detector.EDS.live_time',
rpl_info['live-time'])
axes = []
index_in_array = 0
for i in range(3):
if sizes[i] > 1:
axes.append({
'size': sizes[i],
'index_in_array': index_in_array,
'name': names[i],
'scale': scales[i],
'offset': origins[i],
'units': units[i],
})
index_in_array += 1
dictionary = {
'data': data.squeeze(),
'axes': axes,
'metadata': mp.as_dictionary(),
'original_metadata': rpl_info
}
return [dictionary, ]
示例9: parse_msa_string
def parse_msa_string(string, filename=None):
"""Parse an EMSA/MSA file content.
Parameters
----------
string: string or file object
It must complain with the EMSA/MSA standard.
filename: string or None
The filename.
Returns:
--------
file_data_list: list
The list containts a dictionary that contains the parsed
information. It can be used to create a `:class:Signal`
using `:func:hyperspy.io.dict2signal`.
"""
if not hasattr(string, "readlines"):
string = string.splitlines()
parameters = {}
mapped = DictionaryTreeBrowser({})
y = []
# Read the keywords
data_section = False
for line in string:
if data_section is False:
if line[0] == "#":
try:
key, value = line.split(': ')
value = value.strip()
except ValueError:
key = line
value = None
key = key.strip('#').strip()
if key != 'SPECTRUM':
parameters[key] = value
else:
data_section = True
else:
# Read the data
if line[0] != "#" and line.strip():
if parameters['DATATYPE'] == 'XY':
xy = line.replace(',', ' ').strip().split()
y.append(float(xy[1]))
elif parameters['DATATYPE'] == 'Y':
data = [
float(i) for i in line.replace(',', ' ').strip().split()]
y.extend(data)
# We rewrite the format value to be sure that it complies with the
# standard, because it will be used by the writer routine
parameters['FORMAT'] = "EMSA/MAS Spectral Data File"
# Convert the parameters to the right type and map some
# TODO: the msa format seems to support specifying the units of some
# parametes. We should add this feature here
for parameter, value in parameters.items():
# Some parameters names can contain the units information
# e.g. #AZIMANGLE-dg: 90.
if '-' in parameter:
clean_par, units = parameter.split('-')
clean_par, units = clean_par.strip(), units.strip()
else:
clean_par, units = parameter, None
if clean_par in keywords:
try:
parameters[parameter] = keywords[clean_par]['dtype'](value)
except:
# Normally the offending mispelling is a space in the scientic
# notation, e.g. 2.0 E-06, so we try to correct for it
try:
parameters[parameter] = keywords[clean_par]['dtype'](
value.replace(' ', ''))
except:
_logger.exception(
"The %s keyword value, %s could not be converted to "
"the right type", parameter, value)
if keywords[clean_par]['mapped_to'] is not None:
mapped.set_item(keywords[clean_par]['mapped_to'],
parameters[parameter])
if units is not None:
mapped.set_item(keywords[clean_par]['mapped_to'] +
'_units', units)
# The data parameter needs some extra care
# It is necessary to change the locale to US english to read the date
# keyword
loc = locale.getlocale(locale.LC_TIME)
# Setting locale can raise an exception because
# their name depends on library versions, platform etc.
try:
if os_name == 'posix':
locale.setlocale(locale.LC_TIME, ('en_US', 'utf8'))
elif os_name == 'windows':
locale.setlocale(locale.LC_TIME, 'english')
try:
H, M = time.strptime(parameters['TIME'], "%H:%M")[3:5]
mapped.set_item('General.time', datetime.time(H, M))
#.........这里部分代码省略.........
示例10: create_artificial_samfire
def create_artificial_samfire(shape):
artificial_samfire = DictionaryTreeBrowser()
artificial_samfire.add_node('running_pixels')
artificial_samfire.running_pixels = []
artificial_samfire.add_node('model')
artificial_samfire.add_node('metadata')
artificial_samfire.metadata.add_node('marker')
artificial_samfire.metadata.marker = np.zeros(shape)
artificial_samfire.add_node('_scale')
artificial_samfire._scale = 1.0
return artificial_samfire
示例11: setUp
def setUp(self):
self.w = ReducedChiSquaredWeight()
artificial_model = DictionaryTreeBrowser()
artificial_model.add_node('red_chisq.data')
artificial_model.red_chisq.data = np.arange(35).reshape((5, 7))
self.w.model = artificial_model
示例12: Fitting
class Samfire:
"""Smart Adaptive Multidimensional Fitting (SAMFire) object
SAMFire is a more robust way of fitting multidimensional datasets. By
extracting starting values for each pixel from already fitted pixels,
SAMFire stops the fitting algorithm from getting lost in the parameter
space by always starting close to the optimal solution.
SAMFire only picks starting parameters and the order the pixels (in the
navigation space) are fitted, and does not provide any new minimisation
algorithms.
Attributes
----------
model : Model instance
The complete model
optional_components : list
A list of components that can be switched off at some pixels if it
returns a better Akaike's Information Criterion with correction (AICc)
workers : int
A number of processes that will perform the fitting parallely
pool : samfire_pool instance
A proxy object that manages either multiprocessing or ipyparallel pool
strategies : strategy list
A list of strategies that will be used to select pixel fitting order
and calculate required starting parameters. Strategies come in two
"flavours" - local and global. Local strategies spread the starting
values to the nearest pixels and forces certain pixel fitting order.
Global strategies look for clusters in parameter values, and suggests
most frequent values. Global strategy do not depend on pixel fitting
order, hence it is randomised.
metadata : dictionary
A dictionary for important samfire parameters
active_strategy : strategy
The currently active strategy from the strategies list
update_every : int
If segmenter strategy is running, updates the historams every time
update_every good fits are found.
plot_every : int
When running, samfire plots results every time plot_every good fits are
found.
save_every : int
When running, samfire saves results every time save_every good fits are
found.
Methods
-------
start
start SAMFire
stop
stop SAMFire
plot
force plot of currently selected active strategy
refresh_database
refresh current active strategy database. No previous structure is
preserved
backup
backs up the current version of the model
change_strategy
changes strategy to a new one. Certain rules apply
append
appends strategy to the strategies list
extend
extends strategies list
remove
removes strategy from strategies list
update
updates the current model with values, received from a worker
log
if _log exists, logs the arguments to the list.
generate_values
creates a generator to calculate values to be sent to the workers
"""
__active_strategy_ind = 0
_progressbar = None
pool = None
_figure = None
optional_components = []
running_pixels = []
plot_every = 0
save_every = np.nan
_workers = None
_args = None
count = 0
def __init__(self, model, workers=None, setup=True, **kwargs):
# constants:
if workers is None:
workers = max(1, cpu_count() - 1)
self.model = model
self.metadata = DictionaryTreeBrowser()
self._scale = 1.0
# -1 -> done pixel, use
# -2 -> done, ignore when diffusion
# 0 -> bad fit/no info
#.........这里部分代码省略.........
示例13: __init__
def __init__(self, imdict, file, order="C", record_by=None):
self.imdict = DictionaryTreeBrowser(imdict)
self.file = file
self._order = order if order else "C"
self._record_by = record_by
示例14: ImageObject
class ImageObject(object):
def __init__(self, imdict, file, order="C", record_by=None):
self.imdict = DictionaryTreeBrowser(imdict)
self.file = file
self._order = order if order else "C"
self._record_by = record_by
@property
def shape(self):
dimensions = self.imdict.ImageData.Dimensions
shape = tuple([dimension[1] for dimension in dimensions])
return shape[::-1] # DM uses image indexing X, Y, Z...
# For some image stacks created using plugins in Digital Micrograph
# the metadata under Calibrations.Dimension would not reflect the
# actual dimensions in the dataset, leading to these images not
# loading properly. To allow HyperSpy to load these files, any missing
# dimensions in the metadata is appended with "dummy" values.
# This is done for the offsets, scales and units properties, using
# the len_diff variable
@property
def offsets(self):
dimensions = self.imdict.ImageData.Calibrations.Dimension
len_diff = len(self.shape) - len(dimensions)
origins = np.array([dimension[1].Origin for dimension in dimensions])
origins = np.append(origins, (0.0,) * len_diff)
return -1 * origins[::-1] * self.scales
@property
def scales(self):
dimensions = self.imdict.ImageData.Calibrations.Dimension
len_diff = len(self.shape) - len(dimensions)
scales = np.array([dimension[1].Scale for dimension in dimensions])
scales = np.append(scales, (1.0,) * len_diff)
return scales[::-1]
@property
def units(self):
dimensions = self.imdict.ImageData.Calibrations.Dimension
len_diff = len(self.shape) - len(dimensions)
return (tuple([dimension[1].Units
if dimension[1].Units else ""
for dimension in dimensions]) + ('',) * len_diff)[::-1]
@property
def names(self):
names = [t.Undefined] * len(self.shape)
indices = list(range(len(self.shape)))
if self.signal_type == "EELS":
if "eV" in self.units:
names[indices.pop(self.units.index("eV"))] = "Energy loss"
elif self.signal_type in ("EDS", "EDX"):
if "keV" in self.units:
names[indices.pop(self.units.index("keV"))] = "Energy"
for index, name in zip(indices[::-1], ("x", "y", "z")):
names[index] = name
return names
@property
def title(self):
title = self.imdict.get_item("Name", "")
# ``if title else ""`` below is there to account for when Name
# contains an empty list.
# See https://github.com/hyperspy/hyperspy/issues/1937
return title if title else ""
@property
def record_by(self):
if self._record_by is not None:
return self._record_by
if len(self.scales) == 1:
return "spectrum"
elif (('ImageTags.Meta_Data.Format' in self.imdict and
self.imdict.ImageTags.Meta_Data.Format in ("Spectrum image",
"Spectrum")) or (
"ImageTags.spim" in self.imdict)) and len(self.scales) == 2:
return "spectrum"
else:
return "image"
@property
def to_spectrum(self):
if (('ImageTags.Meta_Data.Format' in self.imdict and
self.imdict.ImageTags.Meta_Data.Format == "Spectrum image") or
("ImageTags.spim" in self.imdict)) and len(self.scales) > 2:
return True
else:
return False
@property
def order(self):
return self._order
@property
def intensity_calibration(self):
ic = self.imdict.ImageData.Calibrations.Brightness.as_dictionary()
if not ic['Units']:
ic['Units'] = ""
return ic
#.........这里部分代码省略.........