本文整理汇总了Python中mvpa2.base.debug函数的典型用法代码示例。如果您正苦于以下问题:Python debug函数的具体用法?Python debug怎么用?Python debug使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了debug函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _train
def _train(self, samples):
"""Train PrototypeMapper
"""
self._proj = np.hstack([similarity.computed(samples, self.prototypes) for similarity in self.similarities])
if __debug__:
debug("MAP", "projected data of shape %s: %s " % (self._proj.shape, self._proj))
示例2: label_voxel
def label_voxel(self, c, levels = None):
if self.__referenceLevel is None:
warning("You did not provide what level to use "
"for reference. Assigning 0th level -- '%s'"
% (self._levels[0],))
self.set_reference_level(0)
# return self.__referenceAtlas.label_voxel(c, levels)
c = self._check_range(c)
# obtain coordinates of the closest voxel
cref = self._data[ self.__referenceLevel.indexes, c[0], c[1], c[2] ]
dist = norm( (cref - c) * self.voxdim )
if __debug__:
debug('ATL__', "Closest referenced point for %r is "
"%r at distance %3.2f" % (c, cref, dist))
if (self.distance - dist) >= 1e-3: # neglect everything smaller
result = self.__referenceAtlas.label_voxel(cref, levels)
result['voxel_referenced'] = c
result['distance'] = dist
else:
result = self.__referenceAtlas.label_voxel(c, levels)
if __debug__:
debug('ATL__', "Closest referenced point is "
"further than desired distance %.2f" % self.distance)
result['voxel_referenced'] = None
result['distance'] = 0
return result
示例3: _suppress_scipy_warnings
def _suppress_scipy_warnings():
# Infiltrate warnings if necessary
numpy_ver = versions['numpy']
scipy_ver = versions['scipy']
# There is way too much deprecation warnings spit out onto the
# user. Lets assume that they should be fixed by scipy 0.7.0 time
if not __debug__ or (__debug__ and 'PY' not in debug.active):
filter_lines = []
if "0.6.0" <= scipy_ver and scipy_ver < "0.7.0" \
and numpy_ver > "1.1.0":
if __debug__:
debug('EXT', "Setting up filters for numpy DeprecationWarnings "
"regarding scipy < 0.7.0")
filter_lines += [
('NumpyTest will be removed in the next release.*',
DeprecationWarning),
('PyArray_FromDims: use PyArray_SimpleNew.',
DeprecationWarning),
('PyArray_FromDimsAndDataAndDescr: use PyArray_NewFromDescr.',
DeprecationWarning),
# Trick re.match, since in warnings absent re.DOTALL in re.compile
('[\na-z \t0-9]*The original semantics of histogram is scheduled to be.*'
'[\na-z \t0-9]*', Warning) ]
if scipy_ver >= "0.15":
filter_lines += [("`scipy.weave` is deprecated, use `weave` instead!",
DeprecationWarning)]
if scipy_ver >= "0.16":
# scipy deprecated it but statsmodels still import it for now
filter_lines += [("`scipy.linalg.calc_lwork` is deprecated!",
DeprecationWarning)]
for f, w in filter_lines:
warnings.filterwarnings('ignore', f, w)
示例4: _level3
def _level3(self, datasets):
params = self.params # for quicker access ;)
# create a mapper per dataset
mappers = [deepcopy(params.alignment) for ds in datasets]
# key different from level-2; the common space is uniform
#temp_commonspace = commonspace
residuals = None
if self.ca['residual_errors'].enabled:
residuals = np.zeros((1, len(datasets)))
self.ca.residual_errors = Dataset(samples=residuals)
# start from original input datasets again
for i, (m, ds_new) in enumerate(zip(mappers, datasets)):
if __debug__:
debug('HPAL_', "Level 3: ds #%i" % i)
# retrain mapper on final common space
ds_new.sa[m.get_space()] = self.commonspace
m.train(ds_new)
# remove common space attribute again to save on memory
del ds_new.sa[m.get_space()]
if residuals is not None:
# obtain final projection
data_mapped = m.forward(ds_new.samples)
residuals[0, i] = np.linalg.norm(data_mapped - self.commonspace)
return mappers
示例5: _prepredict
def _prepredict(self, dataset):
"""Functionality prior prediction
"""
if not ('notrain2predict' in self.__tags__):
# check if classifier was trained if that is needed
if not self.trained:
raise FailedToPredictError(
"Classifier %s wasn't yet trained, therefore can't "
"predict" % self)
nfeatures = dataset.nfeatures #data.shape[1]
# check if number of features is the same as in the data
# it was trained on
if nfeatures != self.__trainednfeatures:
raise ValueError, \
"Classifier %s was trained on data with %d features, " % \
(self, self.__trainednfeatures) + \
"thus can't predict for %d features" % nfeatures
if self.params.retrainable:
if not self.__changedData_isset:
self.__reset_changed_data()
_changedData = self._changedData
data = np.asanyarray(dataset.samples)
_changedData['testdata'] = \
self.__was_data_changed('testdata', data)
if __debug__:
debug('CLF_', "prepredict: Obtained _changedData is %s",
(_changedData,))
示例6: _untrain
def _untrain(self):
if __debug__:
debug("FS_", "Untraining combined FS: %s" % self)
for fs in self.__selectors:
fs.untrain()
# ask base class to do its untrain
super(CombinedFeatureSelection, self)._untrain()
示例7: newfunc
def newfunc(*arg, **kwargs):
nfailed, i = 0, 0 # define i just in case
for i in xrange(niter):
try:
ret = func(*arg, **kwargs)
if i + 1 - nfailed >= niter - nfailures:
# so we know already that we wouldn't go over
# nfailures
break
except AssertionError, e:
nfailed += 1
if __debug__:
debug("TEST", "Upon %i-th run, test %s failed with %s", (i, func.__name__, e))
if nfailed > nfailures:
if __debug__:
debug(
"TEST",
"Ran %s %i times. Got %d failures, "
"while was allowed %d "
"-- re-throwing the last failure %s",
(func.__name__, i + 1, nfailed, nfailures, e),
)
exc_info = sys.exc_info()
raise exc_info[1], None, exc_info[2]
示例8: forward
def forward(self, data):
"""Map data from input to output space.
Parameters
----------
data : Dataset-like, (at least 2D)-array-like
Typically this is a `Dataset`, but it might also be a plain data
array, or even something completely different(TM) that is supported
by a subclass' implementation. If such an object is Dataset-like it
is handled by a dedicated method that also transforms dataset
attributes if necessary. If an array-like is passed, it has to be
at least two-dimensional, with the first axis separating samples
or observations. For single samples `forward1()` might be more
appropriate.
"""
if is_datasetlike(data):
if __debug__:
debug('MAP', "Forward-map %s-shaped dataset through '%s'."
% (data.shape, self))
return self._forward_dataset(data)
else:
if hasattr(data, 'ndim') and data.ndim < 2:
raise ValueError(
'Mapper.forward() only support mapping of data with '
'at least two dimensions, where the first axis '
'separates samples/observations. Consider using '
'Mapper.forward1() instead.')
if __debug__:
debug('MAP', "Forward-map data through '%s'." % (self))
return self._forward_data(data)
示例9: _forward_dataset
def _forward_dataset(self, dataset):
"""Forward-map a dataset.
This is a private method that can be reimplemented in derived
classes. The default implementation forward-maps the dataset samples
and returns a new dataset that is a shallow copy of the input with
the mapped samples.
Parameters
----------
dataset : Dataset-like
"""
if __debug__:
debug('MAP_', "Forward-map %s-shaped samples in dataset with '%s'."
% (dataset.samples.shape, self))
msamples = self._forward_data(dataset.samples)
if __debug__:
debug('MAP_', "Make shallow copy of to-be-forward-mapped dataset "
"and assigned forward-mapped samples ({sf}a_filters: "
"%s, %s, %s)." % (self._sa_filter, self._fa_filter,
self._a_filter))
mds = dataset.copy(deep=False,
sa=self._sa_filter,
fa=self._fa_filter,
a=self._a_filter)
mds.samples = msamples
return mds
示例10: _forward_dataset
def _forward_dataset(self, dataset):
# invoke super class _forward_dataset, this calls, _forward_dataset
# and this calls _forward_data in this class
mds = super(FlattenMapper, self)._forward_dataset(dataset)
# attribute collection needs to have a new length check
mds.fa.set_length_check(mds.nfeatures)
# we need to duplicate all existing feature attribute, as each original
# feature is now spread across the new feature axis
# take all "additional" axes after the actual feature axis and count
# elements a sample -- if not axis exists this will be 1
for k in dataset.fa:
if __debug__:
debug("MAP_", "Forward-mapping fa '%s'." % k)
attr = dataset.fa[k].value
# the maximmum number of axis to flatten in the attr
if not self.__maxdims is None:
maxdim = min(len(self.__origshape), self.__maxdims)
else:
maxdim = len(self.__origshape)
multiplier = mds.nfeatures / np.prod(attr.shape[:maxdim])
if __debug__:
debug("MAP_", "Broadcasting fa '%s' %s %d times" % (k, attr.shape, multiplier))
# broadcast as many times as necessary to get 'matching dimensions'
bced = np.repeat(attr, multiplier, axis=0)
# now reshape as many dimensions as the mapper knows about
mds.fa[k] = bced.reshape((-1,) + bced.shape[maxdim:])
# if there is no inspace return immediately
if self.get_space() is None:
return mds
# otherwise create the coordinates as feature attributes
else:
mds.fa[self.get_space()] = list(np.ndindex(dataset.samples[0].shape))
return mds
示例11: __reverse_single_level
def __reverse_single_level(self, wp):
# local bindings
level_paths = self.__level_paths
# define wavelet packet to use
WP = pywt.WaveletPacket(
data=None, wavelet=self._wavelet,
mode=self._mode, maxlevel=self.__level)
# prepare storage
signal_shape = wp.shape[:1] + self._inshape[1:]
signal = np.zeros(signal_shape)
Ntime_points = self._intimepoints
for indexes in _get_indexes(signal_shape,
self._dim):
if __debug__:
debug('MAP_', " %s" % (indexes,), lf=False, cr=True)
for path, level_data in zip(level_paths, wp[indexes]):
WP[path] = level_data
signal[indexes] = WP.reconstruct(True)[:Ntime_points]
return signal
示例12: _call
def _call(self, dataset):
analyzers = []
# create analyzers
for clf in self.clf.clfs:
if self.__analyzer is None:
analyzer = clf.get_sensitivity_analyzer(**(self._slave_kwargs))
if analyzer is None:
raise ValueError, \
"Wasn't able to figure basic analyzer for clf %r" % \
(clf,)
if __debug__:
debug("SA", "Selected analyzer %r for clf %r" % \
(analyzer, clf))
else:
# XXX shallow copy should be enough...
analyzer = copy.copy(self.__analyzer)
# assign corresponding classifier
analyzer.clf = clf
# if clf was trained already - don't train again
if clf.trained:
analyzer._force_train = False
analyzers.append(analyzer)
self.__combined_analyzer.analyzers = analyzers
# XXX not sure if we don't want to call directly ._call(dataset) to avoid
# double application of transformers/combiners, after all we are just
# 'proxying' here to combined_analyzer...
# YOH: decided -- lets call ._call
return self.__combined_analyzer._call(dataset)
示例13: _train
def _train(self, dataset):
"""Select the most important features
Parameters
----------
dataset : Dataset
used to compute sensitivity maps
"""
# optionally train the analyzer first
if self.__train_analyzer:
self.__sensitivity_analyzer.train(dataset)
sensitivity = self.__sensitivity_analyzer(dataset)
"""Compute the sensitivity map."""
self.ca.sensitivity = sensitivity
# Select features to preserve
selected_ids = self.__feature_selector(sensitivity)
if __debug__:
debug("FS_", "Sensitivity: %s Selected ids: %s" %
(sensitivity, selected_ids))
# XXX not sure if it really has to be sorted
selected_ids.sort()
# announce desired features to the underlying slice mapper
self._safe_assign_slicearg(selected_ids)
# and perform its own training
super(SensitivityBasedFeatureSelection, self)._train(dataset)
示例14: _get_selected_ids
def _get_selected_ids(self, dataset):
"""Given a dataset actually select the features
Returns
-------
indexes of the selected features
"""
# optionally train the analyzer first
if self.__train_analyzer:
self.__sensitivity_analyzer.train(dataset)
sensitivity = self.__sensitivity_analyzer(dataset)
"""Compute the sensitivity map."""
self.ca.sensitivity = sensitivity
# Select features to preserve
selected_ids = self.__feature_selector(sensitivity)
if __debug__:
debug("FS_", "Sensitivity: %s Selected ids: %s" %
(sensitivity, selected_ids))
# XXX not sure if it really has to be sorted
selected_ids.sort()
return selected_ids
示例15: _call
def _call(self, dataset=None):
"""Extract weights from SMLR classifier.
SMLR always has weights available, so nothing has to be computed here.
"""
clf = self.clf
# transpose to have the number of features on the second axis
# (as usual)
weights = clf.weights.T
if __debug__:
debug('SMLR',
"Extracting weights for %d-class SMLR" %
(len(weights) + 1) +
"Result: min=%f max=%f" %\
(np.min(weights), np.max(weights)))
# limit the labels to the number of sensitivity sets, to deal
# with the case of `fit_all_weights=False`
ds = Dataset(weights,
sa={clf.get_space(): clf._ulabels[:len(weights)]})
if clf.params.has_bias:
ds.sa['biases'] = clf.biases
return ds