本文整理汇总了Python中mvpa2.datasets.Dataset.a['roi_feature_ids']方法的典型用法代码示例。如果您正苦于以下问题:Python Dataset.a['roi_feature_ids']方法的具体用法?Python Dataset.a['roi_feature_ids']怎么用?Python Dataset.a['roi_feature_ids']使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mvpa2.datasets.Dataset
的用法示例。
在下文中一共展示了Dataset.a['roi_feature_ids']方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _proc_block
# 需要导入模块: from mvpa2.datasets import Dataset [as 别名]
# 或者: from mvpa2.datasets.Dataset import a['roi_feature_ids'] [as 别名]
def _proc_block(self, block, ds, measure):
"""Little helper to capture the parts of the computation that can be
parallelized
"""
if __debug__:
debug_slc_ = 'SLC_' in debug.active
debug('SLC',
"Starting computing block for %i elements" % len(block))
if self.ca.is_enabled('roi_sizes'):
roi_sizes = []
else:
roi_sizes = None
results = []
# put rois around all features in the dataset and compute the
# measure within them
for i, f in enumerate(block):
# retrieve the feature ids of all features in the ROI from the query
# engine
roi_fids = self._queryengine[f]
if __debug__ and debug_slc_:
debug('SLC_', 'For %r query returned ids %r' % (f, roi_fids))
# slice the dataset
roi = ds[:, roi_fids]
if self.__add_center_fa:
# add fa to indicate ROI seed if requested
roi_seed = np.zeros(roi.nfeatures, dtype='bool')
roi_seed[roi_fids.index(f)] = True
roi.fa[self.__add_center_fa] = roi_seed
# compute the datameasure and store in results
res = measure(roi)
if self.ca.is_enabled('roi_feature_ids'):
if not is_datasetlike(res):
res = Dataset(np.atleast_1d(res))
# add roi feature ids to intermediate result dataset for later
# aggregation
res.a['roi_feature_ids'] = roi_fids
results.append(res)
# store the size of the roi dataset
if not roi_sizes is None:
roi_sizes.append(roi.nfeatures)
if __debug__:
debug('SLC', "Doing %i ROIs: %i (%i features) [%i%%]" \
% (len(block),
f+1,
roi.nfeatures,
float(i+1)/len(block)*100,), cr=True)
return results, roi_sizes
示例2: _proc_block
# 需要导入模块: from mvpa2.datasets import Dataset [as 别名]
# 或者: from mvpa2.datasets.Dataset import a['roi_feature_ids'] [as 别名]
def _proc_block(self, block, ds, measure, seed=None, iblock='main'):
"""Little helper to capture the parts of the computation that can be
parallelized
Parameters
----------
seed
RNG seed. Should be provided e.g. in child process invocations
to guarantee that they all seed differently to not keep generating
the same sequencies due to reusing the same copy of numpy's RNG
block
Critical for generating non-colliding temp filenames in case
of hdf5 backend. Otherwise RNGs of different processes might
collide in their temporary file names leading to problems.
"""
if seed is not None:
mvpa2.seed(seed)
if __debug__:
debug_slc_ = 'SLC_' in debug.active
debug('SLC',
"Starting computing block for %i elements" % len(block))
results = []
store_roi_feature_ids = self.ca.is_enabled('roi_feature_ids')
store_roi_sizes = self.ca.is_enabled('roi_sizes')
store_roi_center_ids = self.ca.is_enabled('roi_center_ids')
assure_dataset = any([store_roi_feature_ids,
store_roi_sizes,
store_roi_center_ids])
# put rois around all features in the dataset and compute the
# measure within them
for i, f in enumerate(block):
# retrieve the feature ids of all features in the ROI from the query
# engine
roi_specs = self._queryengine[f]
if __debug__ and debug_slc_:
debug('SLC_', 'For %r query returned roi_specs %r'
% (f, roi_specs))
if is_datasetlike(roi_specs):
# TODO: unittest
assert(len(roi_specs) == 1)
roi_fids = roi_specs.samples[0]
else:
roi_fids = roi_specs
# slice the dataset
roi = ds[:, roi_fids]
if is_datasetlike(roi_specs):
for n, v in roi_specs.fa.iteritems():
roi.fa[n] = v
if self.__add_center_fa:
# add fa to indicate ROI seed if requested
roi_seed = np.zeros(roi.nfeatures, dtype='bool')
if f in roi_fids:
roi_seed[roi_fids.index(f)] = True
else:
warning("Center feature attribute id %s not found" % f)
roi.fa[self.__add_center_fa] = roi_seed
# compute the datameasure and store in results
res = measure(roi)
if assure_dataset and not is_datasetlike(res):
res = Dataset(np.atleast_1d(res))
if store_roi_feature_ids:
# add roi feature ids to intermediate result dataset for later
# aggregation
res.a['roi_feature_ids'] = roi_fids
if store_roi_sizes:
res.a['roi_sizes'] = roi.nfeatures
if store_roi_center_ids:
res.a['roi_center_ids'] = f
results.append(res)
if __debug__:
debug('SLC', "Doing %i ROIs: %i (%i features) [%i%%]" \
% (len(block),
f + 1,
roi.nfeatures,
float(i + 1) / len(block) * 100,), cr=True)
if self.results_postproc_fx:
if __debug__:
debug('SLC', "Post-processing %d results in proc_block using %s"
% (len(results), self.results_postproc_fx))
results = self.results_postproc_fx(results)
if self.results_backend == 'native':
pass # nothing special
elif self.results_backend == 'hdf5':
# store results in a temporary file and return a filename
results_file = tempfile.mktemp(prefix=self.tmp_prefix,
suffix='-%s.hdf5' % iblock)
if __debug__:
debug('SLC', "Storing results into %s" % results_file)
h5save(results_file, results)
#.........这里部分代码省略.........
示例3: _proc_block
# 需要导入模块: from mvpa2.datasets import Dataset [as 别名]
# 或者: from mvpa2.datasets.Dataset import a['roi_feature_ids'] [as 别名]
def _proc_block(self, block, ds, measure, iblock='main'):
"""Little helper to capture the parts of the computation that can be
parallelized
Parameters
----------
iblock
Critical for generating non-colliding temp filenames in case
of hdf5 backend. Otherwise RNGs of different processes might
collide in their temporary file names leading to problems.
"""
if __debug__:
debug_slc_ = 'SLC_' in debug.active
debug('SLC',
"Starting computing block for %i elements" % len(block))
if self.ca.is_enabled('roi_sizes'):
roi_sizes = []
else:
roi_sizes = None
results = []
# put rois around all features in the dataset and compute the
# measure within them
for i, f in enumerate(block):
# retrieve the feature ids of all features in the ROI from the query
# engine
roi_fids = self._queryengine[f]
if __debug__ and debug_slc_:
debug('SLC_', 'For %r query returned ids %r' % (f, roi_fids))
# slice the dataset
roi = ds[:, roi_fids]
if self.__add_center_fa:
# add fa to indicate ROI seed if requested
roi_seed = np.zeros(roi.nfeatures, dtype='bool')
roi_seed[roi_fids.index(f)] = True
roi.fa[self.__add_center_fa] = roi_seed
# compute the datameasure and store in results
res = measure(roi)
if self.ca.is_enabled('roi_feature_ids'):
if not is_datasetlike(res):
res = Dataset(np.atleast_1d(res))
# add roi feature ids to intermediate result dataset for later
# aggregation
res.a['roi_feature_ids'] = roi_fids
results.append(res)
# store the size of the roi dataset
if not roi_sizes is None:
roi_sizes.append(roi.nfeatures)
if __debug__:
debug('SLC', "Doing %i ROIs: %i (%i features) [%i%%]" \
% (len(block),
f+1,
roi.nfeatures,
float(i+1)/len(block)*100,), cr=True)
if self.results_backend == 'native':
pass # nothing special
elif self.results_backend == 'hdf5':
# store results in a temporary file and return a filename
results_file = tempfile.mktemp(prefix=self.tmp_prefix,
suffix='-%s.hdf5' % iblock)
if __debug__:
debug('SLC', "Storing results into %s" % results_file)
h5save(results_file, results)
if __debug__:
debug('SLC_', "Results stored")
results = results_file
else:
raise RuntimeError("Must not reach this point")
return results, roi_sizes