本文整理汇总了Python中mvpa.base.debug函数的典型用法代码示例。如果您正苦于以下问题:Python debug函数的具体用法?Python debug怎么用?Python debug使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了debug函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _call
def _call(self, dataset):
sensitivities = []
for ind,analyzer in enumerate(self.__analyzers):
if __debug__:
debug("SA", "Computing sensitivity for SA#%d:%s" %
(ind, analyzer))
sensitivity = analyzer(dataset)
sensitivities.append(sensitivity)
if __debug__:
debug("SA",
"Returning combined using %s sensitivity across %d items" %
(self.__combiner, len(sensitivities)))
# TODO Simplify if we go Dataset-only
if len(sensitivities) == 1:
sensitivities = np.asanyarray(sensitivities[0])
else:
if isinstance(sensitivities[0], AttrDataset):
smerged = None
for i, s in enumerate(sensitivities):
s.sa['splits'] = np.repeat(i, len(s))
if smerged is None:
smerged = s
else:
smerged.append(s)
sensitivities = smerged
else:
sensitivities = \
Dataset(sensitivities,
sa={'splits': np.arange(len(sensitivities))})
self.ca.sensitivities = sensitivities
return sensitivities
示例2: _train
def _train(self, samples):
"""Determine the projection matrix onto the SVD components from
a 2D samples x feature data matrix.
"""
X = np.asmatrix(samples)
X = self._demean_data(X)
# singular value decomposition
U, SV, Vh = np.linalg.svd(X, full_matrices=0)
# store the final matrix with the new basis vectors to project the
# features onto the SVD components. And store its .H right away to
# avoid computing it in forward()
self._proj = Vh.H
# also store singular values of all components
self._sv = SV
if __debug__:
debug("MAP", "SVD was done on %s and obtained %d SVs " %
(samples, len(SV)) + " (%d non-0, max=%f)" %
(len(SV.nonzero()), SV[0]))
# .norm might be somewhat expensive to compute
if "MAP_" in debug.active:
debug("MAP_", "Mixing matrix has %s shape and norm=%f" %
(self._proj.shape, np.linalg.norm(self._proj)))
示例3: _call
def _call(self, dataset):
sensitivities = []
for ind, analyzer in enumerate(self.__analyzers):
if __debug__:
debug("SA", "Computing sensitivity for SA#%d:%s" %
(ind, analyzer))
sensitivity = analyzer(dataset)
sensitivities.append(sensitivity)
if __debug__:
debug("SA",
"Returning %d sensitivities from %s" %
(len(sensitivities), self.__class__.__name__))
sa_attr = self._sa_attr
if isinstance(sensitivities[0], AttrDataset):
smerged = None
for i, s in enumerate(sensitivities):
s.sa[sa_attr] = np.repeat(i, len(s))
if smerged is None:
smerged = s
else:
smerged.append(s)
sensitivities = smerged
else:
sensitivities = \
Dataset(sensitivities,
sa={sa_attr: np.arange(len(sensitivities))})
self.ca.sensitivities = sensitivities
return sensitivities
示例4: _prepredict
def _prepredict(self, dataset):
"""Functionality prior prediction
"""
if not ('notrain2predict' in self.__tags__):
# check if classifier was trained if that is needed
if not self.trained:
raise ValueError, \
"Classifier %s wasn't yet trained, therefore can't " \
"predict" % self
nfeatures = dataset.nfeatures #data.shape[1]
# check if number of features is the same as in the data
# it was trained on
if nfeatures != self.__trainednfeatures:
raise ValueError, \
"Classifier %s was trained on data with %d features, " % \
(self, self.__trainednfeatures) + \
"thus can't predict for %d features" % nfeatures
if self.params.retrainable:
if not self.__changedData_isset:
self.__reset_changed_data()
_changedData = self._changedData
data = np.asanyarray(dataset.samples)
_changedData['testdata'] = \
self.__was_data_changed('testdata', data)
if __debug__:
debug('CLF_', "prepredict: Obtained _changedData is %s"
% (_changedData))
示例5: _set
def _set(self, val):
if __debug__ and __mvpadebug__:
# Since this call is quite often, don't convert
# values to strings here, rely on passing them
# withing msgargs
debug("COL", "Setting %(self)s to %(val)s ", msgargs={"self": self, "val": val})
self._value = val
示例6: _SLcholesky_autoreg
def _SLcholesky_autoreg(C, nsteps=None, **kwargs):
"""Simple wrapper around cholesky to incrementally regularize the
matrix until successful computation.
For `nsteps` we boost diagonal 10-fold each time from the
'epsilon' of the respective dtype. If None -- would proceed until
reaching 1.
"""
if nsteps is None:
nsteps = -int(np.floor(np.log10(np.finfo(float).eps)))
result = None
for step in xrange(nsteps):
epsilon_value = (10**step) * np.finfo(C.dtype).eps
epsilon = epsilon_value * np.eye(C.shape[0])
try:
result = SLcholesky(C + epsilon, lower=True)
except SLAError, e:
warning("Cholesky decomposition lead to failure: %s. "
"As requested, performing auto-regularization but "
"for better control you might prefer to regularize "
"yourself by providing lm parameter to GPR" % e)
if step < nsteps-1:
if __debug__:
debug("GPR", "Failed to obtain cholesky on "
"auto-regularization step %d value %g. Got %s."
" Boosting lambda more to reg. C."
% (step, epsilon_value, e))
continue
else:
raise
示例7: _recon_customobj_customrecon
def _recon_customobj_customrecon(hdf, memo):
"""Reconstruct a custom object from HDF using a custom recontructor"""
# we found something that has some special idea about how it wants
# to be reconstructed
mod_name = hdf.attrs['module']
recon_name = hdf.attrs['recon']
if mod_name == '__builtin__':
raise NotImplementedError(
"Built-in reconstructors are not supported (yet). "
"Got: '%s'" % recon_name)
if __debug__:
debug('HDF5', "Load from custom reconstructor '%s.%s' [%s]"
% (mod_name, recon_name, hdf.name))
# turn names into definitions
mod = __import__(mod_name, fromlist=[recon_name])
recon = mod.__dict__[recon_name]
if 'rcargs' in hdf:
recon_args_hdf = hdf['rcargs']
if __debug__:
debug('HDF5', "Load reconstructor args in [%s]"
% recon_args_hdf.name)
recon_args = _hdf_tupleitems_to_obj(recon_args_hdf, memo)
else:
recon_args = ()
# reconstruct
obj = recon(*recon_args)
# TODO Handle potentially avialable state settings
return obj
示例8: __init__
def __init__(self, name=None, enabled=True, doc="State variable"):
CollectableAttribute.__init__(self, name, doc)
self._isenabled = enabled
self._defaultenabled = enabled
if __debug__:
debug("STV",
"Initialized new state variable %s " % name + `self`)
示例9: _set
def _set(self, val, init=False):
different_value = self._value != val
isarray = isinstance(different_value, np.ndarray)
if self._ro and not init:
raise RuntimeError, \
"Attempt to set read-only parameter %s to %s" \
% (self.name, val)
if (isarray and np.any(different_value)) or \
((not isarray) and different_value):
if __debug__:
debug("COL",
"Parameter: setting %s to %s " % (str(self), val))
if not isarray:
if hasattr(self, 'min') and val < self.min:
raise ValueError, \
"Minimal value for parameter %s is %s. Got %s" % \
(self.name, self.min, val)
if hasattr(self, 'max') and val > self.max:
raise ValueError, \
"Maximal value for parameter %s is %s. Got %s" % \
(self.name, self.max, val)
if hasattr(self, 'choices') and (not val in self.choices):
raise ValueError, \
"Valid choices for parameter %s are %s. Got %s" % \
(self.name, self.choices, val)
self._value = val
# Set 'isset' only if not called from initialization routine
self._isset = not init #True
elif __debug__:
debug("COL",
"Parameter: not setting %s since value is the same" \
% (str(self)))
示例10: __new__
def __new__(cls, *args, **kwargs):
if len(args) > 0:
if len(kwargs) > 0:
raise ValueError, \
"Do not mix positional and keyword arguments. " \
"Use a single positional argument -- filename, " \
"or any number of keyword arguments, without having " \
"filename specified"
if len(args) == 1 and isinstance(args[0], basestring):
filename = args[0]
args = args[1:]
if __debug__:
debug('IOH', 'Undigging hamster from %s' % filename)
# compressed or not -- that is the question
if filename.endswith('.gz'):
f = gzip.open(filename)
else:
f = open(filename)
result = cPickle.load(f)
if not isinstance(result, Hamster):
warning("Loaded other than Hamster class from %s" % filename)
return result
else:
raise ValueError, "Hamster accepts only a single positional " \
"argument and it must be a filename. Got %d " \
"arguments" % (len(args),)
else:
return object.__new__(cls)
示例11: dump
def dump(self, filename, compresslevel='auto'):
"""Bury the hamster into the file
Parameters
----------
filename : str
Name of the target file. When writing to a compressed file the
filename gets a '.gz' extension if not already specified. This
is necessary as the constructor uses the extension to decide
whether it loads from a compressed or uncompressed file.
compresslevel : 'auto' or int
Compression level setting passed to gzip. When set to
'auto', if filename ends with '.gz' `compresslevel` is set
to 5, 0 otherwise. However, when `compresslevel` is set to
0 gzip is bypassed completely and everything is written to
an uncompressed file.
"""
if compresslevel == 'auto':
compresslevel = (0, 5)[int(filename.endswith('.gz'))]
if compresslevel > 0 and not filename.endswith('.gz'):
filename += '.gz'
if __debug__:
debug('IOH', 'Burying hamster into %s' % filename)
if compresslevel == 0:
f = open(filename, 'w')
else:
f = gzip.open(filename, 'w', compresslevel)
cPickle.dump(self, f)
f.close()
示例12: _call
def _call(self, dataset):
# local bindings
analyzer = self.__analyzer
insplit_index = self.__insplit_index
sensitivities = []
self.splits = splits = []
store_splits = self.states.isEnabled("splits")
for ind,split in enumerate(self.__splitter(dataset)):
ds = split[insplit_index]
if __debug__ and "SA" in debug.active:
debug("SA", "Computing sensitivity for split %d on "
"dataset %s using %s" % (ind, ds, analyzer))
sensitivity = analyzer(ds)
sensitivities.append(sensitivity)
if store_splits: splits.append(split)
self.sensitivities = sensitivities
if __debug__:
debug("SA",
"Returning sensitivities combined using %s across %d items "
"generated by splitter %s" %
(self.__combiner, len(sensitivities), self.__splitter))
if self.__combiner is not None:
sensitivities = self.__combiner(sensitivities)
else:
# assure that we have an ndarray on output
sensitivities = N.asarray(sensitivities)
return sensitivities
示例13: __reverseSingleLevel
def __reverseSingleLevel(self, wp):
# local bindings
level_paths = self.__level_paths
# define wavelet packet to use
WP = pywt.WaveletPacket(
data=None, wavelet=self._wavelet,
mode=self._mode, maxlevel=self.__level)
# prepare storage
signal_shape = wp.shape[:1] + self.getInSize()
signal = N.zeros(signal_shape)
Ntime_points = self._intimepoints
for indexes in _getIndexes(signal_shape,
self._dim):
if __debug__:
debug('MAP_', " %s" % (indexes,), lf=False, cr=True)
for path, level_data in zip(level_paths, wp[indexes]):
WP[path] = level_data
signal[indexes] = WP.reconstruct(True)[:Ntime_points]
return signal
示例14: train
def train(self, dataset):
"""Train classifier on a dataset
Shouldn't be overridden in subclasses unless explicitly needed
to do so
"""
if dataset.nfeatures == 0 or dataset.nsamples == 0:
raise DegenerateInputError, \
"Cannot train classifier on degenerate data %s" % dataset
if __debug__:
debug("CLF", "Training classifier %(clf)s on dataset %(dataset)s",
msgargs={'clf':self, 'dataset':dataset})
self._pretrain(dataset)
# remember the time when started training
t0 = time.time()
if dataset.nfeatures > 0:
result = self._train(dataset)
else:
warning("Trying to train on dataset with no features present")
if __debug__:
debug("CLF",
"No features present for training, no actual training " \
"is called")
result = None
self.ca.training_time = time.time() - t0
self._posttrain(dataset)
return result
示例15: __init__
def __init__(self, clf, labels=None, confusion_state="training_stats",
**kwargs):
"""Initialization.
Parameters
----------
clf : Classifier
Either trained or untrained classifier
confusion_state
Id of the conditional attribute which stores `ConfusionMatrix`
labels : list
if provided, should be a set of labels to add on top of the
ones present in testdata
"""
ClassifierError.__init__(self, clf, labels, **kwargs)
self.__confusion_state = confusion_state
"""What state to extract from"""
if not clf.ca.has_key(confusion_state):
raise ValueError, \
"Conditional attribute %s is not defined for classifier %r" % \
(confusion_state, clf)
if not clf.ca.is_enabled(confusion_state):
if __debug__:
debug('CERR', "Forcing state %s to be enabled for %r" %
(confusion_state, clf))
clf.ca.enable(confusion_state)