本文整理汇总了Python中mvpa.base.warning函数的典型用法代码示例。如果您正苦于以下问题:Python warning函数的具体用法?Python warning怎么用?Python warning使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了warning函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _SLcholesky_autoreg
def _SLcholesky_autoreg(C, nsteps=None, **kwargs):
"""Simple wrapper around cholesky to incrementally regularize the
matrix until successful computation.
For `nsteps` we boost diagonal 10-fold each time from the
'epsilon' of the respective dtype. If None -- would proceed until
reaching 1.
"""
if nsteps is None:
nsteps = -int(np.floor(np.log10(np.finfo(float).eps)))
result = None
for step in xrange(nsteps):
epsilon_value = (10**step) * np.finfo(C.dtype).eps
epsilon = epsilon_value * np.eye(C.shape[0])
try:
result = SLcholesky(C + epsilon, lower=True)
except SLAError, e:
warning("Cholesky decomposition lead to failure: %s. "
"As requested, performing auto-regularization but "
"for better control you might prefer to regularize "
"yourself by providing lm parameter to GPR" % e)
if step < nsteps-1:
if __debug__:
debug("GPR", "Failed to obtain cholesky on "
"auto-regularization step %d value %g. Got %s."
" Boosting lambda more to reg. C."
% (step, epsilon_value, e))
continue
else:
raise
示例2: __init__
def __init__(self, samples=None, **kwargs):
"""Initialize EEPDataset.
:Parameters:
samples: Filename (string) of a EEP binary file or an `EEPBin`
object
"""
# dataset props defaults
dt = t0 = channelids = None
# default way to use the constructor: with filename
if not samples is None:
if isinstance(samples, str):
# open the eep file
try:
eb = EEPBin(samples)
except RuntimeError, e:
warning("ERROR: EEPDatasets: Cannot open samples file %s" \
% samples) # should we make also error?
raise e
elif isinstance(samples, EEPBin):
# nothing special
eb = samples
else:
raise ValueError, \
"EEPDataset constructor takes the filename of an " \
"EEP file or a EEPBin object as 'samples' argument."
samples = eb.data
dt = eb.dt
channelids = eb.channels
t0 = eb.t0
示例3: test_confusion_based_error
def test_confusion_based_error(self, l_clf):
train = datasets['uni2medium']
train = train[train.sa.train == 1]
# to check if we fail to classify for 3 labels
test3 = datasets['uni3medium']
test3 = test3[test3.sa.train == 1]
err = ConfusionBasedError(clf=l_clf)
terr = TransferMeasure(l_clf, Splitter('train', attr_values=[1,1]),
postproc=BinaryFxNode(mean_mismatch_error,
'targets'))
self.failUnlessRaises(UnknownStateError, err, None)
"""Shouldn't be able to access the state yet"""
l_clf.train(train)
e, te = err(None), terr(train)
te = np.asscalar(te)
self.failUnless(abs(e-te) < 1e-10,
msg="ConfusionBasedError (%.2g) should be equal to TransferError "
"(%.2g) on traindataset" % (e, te))
# this will print nasty WARNING but it is ok -- it is just checking code
# NB warnings are not printed while doing whole testing
warning("Don't worry about the following warning.")
if 'multiclass' in l_clf.__tags__:
self.failIf(terr(test3) is None)
# try copying the beast
terr_copy = copy(terr)
示例4: _getUniqueLengthNCombinations_binary
def _getUniqueLengthNCombinations_binary(L, n=None, sort=True):
"""Find all subsets of data
:Parameters:
L : list
list of unique ids
n : None or int
If None, all possible subsets are returned. If n is specified (int),
then only the ones of the length n are returned
sort : bool
Either to sort the resultant sequence
Adopted from Alex Martelli:
http://mail.python.org/pipermail/python-list/2001-January/067815.html
"""
N = len(L)
if N > 20 and n == 1:
warning("getUniqueLengthNCombinations_binary should not be used for "
"large N")
result = []
for X in range(2**N):
x = [ L[i] for i in range(N) if X & (1L<<i) ]
if n is None or len(x) == n:
# yield x # if we wanted to use it as a generator
result.append(x)
result.sort()
# if __debug__ and n is not None:
# # verify the result
# # would need scipy... screw it
# assert(len(result) == ...)
return result
示例5: train
def train(self, dataset):
"""Train classifier on a dataset
Shouldn't be overridden in subclasses unless explicitly needed
to do so
"""
if dataset.nfeatures == 0 or dataset.nsamples == 0:
raise DegenerateInputError, \
"Cannot train classifier on degenerate data %s" % dataset
if __debug__:
debug("CLF", "Training classifier %(clf)s on dataset %(dataset)s",
msgargs={'clf':self, 'dataset':dataset})
self._pretrain(dataset)
# remember the time when started training
t0 = time.time()
if dataset.nfeatures > 0:
result = self._train(dataset)
else:
warning("Trying to train on dataset with no features present")
if __debug__:
debug("CLF",
"No features present for training, no actual training " \
"is called")
result = None
self.ca.training_time = time.time() - t0
self._posttrain(dataset)
return result
示例6: _load_anynifti
def _load_anynifti(src, ensure=False, enforce_dim=None):
"""Load/access NIfTI data from files or instances.
Parameters
----------
src : str or NiftiImage
Filename of a NIfTI image or a `NiftiImage` instance.
ensure : bool, optional
If True, throw ValueError exception if cannot be loaded.
enforce_dim : int or None
If not None, it is the dimensionality of the data to be enforced,
commonly 4D for the data, and 3D for the mask in case of fMRI.
Returns
-------
NiftiImage or None
If the source is not supported None is returned.
Raises
------
ValueError
If there is a problem with data (variable dimensionality) or
failed to load data and ensure=True.
"""
nifti = None
# figure out what type
if isinstance(src, str):
# open the nifti file
try:
nifti = NiftiImage(src)
except RuntimeError, e:
warning("ERROR: Cannot open NIfTI file %s" % src)
raise e
示例7: __init__
def __init__(self, **kwargs):
"""Initialize an SMLR classifier.
"""
"""
TODO:
# Add in likelihood calculation
# Add kernels, not just direct methods.
"""
# init base class first
Classifier.__init__(self, **kwargs)
if _cStepwiseRegression is None and self.params.implementation == 'C':
warning('SMLR: C implementation is not available.'
' Using pure Python one')
self.params.implementation = 'Python'
# pylint friendly initializations
self._ulabels = None
"""Unigue labels from the training set."""
self.__weights_all = None
"""Contains all weights including bias values"""
self.__weights = None
"""Just the weights, without the biases"""
self.__biases = None
"""The biases, will remain none if has_bias is False"""
示例8: _setdebug
def _setdebug(obj, partname):
"""Helper to set level of debugging output for SG
:Parameters:
obj
In SG debug output seems to be set per every object
partname : basestring
For what kind of object we are talking about... could be automated
later on (TODO)
"""
debugname = "SG_%s" % partname.upper()
switch = {True: (shogun.Kernel.M_DEBUG, 'M_DEBUG', "enable"),
False: (shogun.Kernel.M_ERROR, 'M_ERROR', "disable")}
key = __debug__ and debugname in debug.active
sglevel, slevel, progressfunc = switch[key]
if __debug__:
debug("SG_", "Setting verbosity for shogun.%s instance: %s to %s" %
(partname, `obj`, slevel))
obj.io.set_loglevel(sglevel)
try:
exec "obj.io.%s_progress()" % progressfunc
except:
warning("Shogun version installed has no way to enable progress" +
" reports")
示例9: getNiftiFromAnySource
def getNiftiFromAnySource(src, ensure=False, enforce_dim=None):
"""Load/access NIfTI data from files or instances.
:Parameters:
src: str | NiftiImage
Filename of a NIfTI image or a `NiftiImage` instance.
ensure : bool
If True, through ValueError exception if cannot be loaded.
enforce_dim : int or None
If not None, it is the dimensionality of the data to be enforced,
commonly 4D for the data, and 3D for the mask in case of fMRI.
:Returns:
NiftiImage | None
If the source is not supported None is returned.
"""
nifti = None
# figure out what type
if isinstance(src, str):
# open the nifti file
try:
nifti = NiftiImage(src)
except RuntimeError, e:
warning("ERROR: NiftiDatasets: Cannot open NIfTI file %s" \
% src)
raise e
示例10: labelVoxel
def labelVoxel(self, c, levels = None):
if self.__referenceLevel is None:
warning("You did not provide what level to use "
"for reference. Assigning 0th level -- '%s'"
% (self._levels_dict[0],))
self.setReferenceLevel(0)
# return self.__referenceAtlas.labelVoxel(c, levels)
c = self._checkRange(c)
# obtain coordinates of the closest voxel
cref = self._data[ self.__referenceLevel.indexes, c[2], c[1], c[0] ]
dist = norm( (cref - c) * self.voxdim )
if __debug__:
debug('ATL__', "Closest referenced point for %s is "
"%s at distance %3.2f" % (`c`, `cref`, dist))
if (self.distance - dist) >= 1e-3: # neglect everything smaller
result = self.__referenceAtlas.labelVoxel(cref, levels)
result['voxel_referenced'] = c
result['distance'] = dist
else:
result = self.__referenceAtlas.labelVoxel(c, levels)
if __debug__:
debug('ATL__', "Closest referenced point is "
"further than desired distance %.2f" % self.distance)
result['voxel_referenced'] = None
result['distance'] = 0
return result
示例11: _get_increments
def _get_increments(self, ndim):
"""Creates a list of increments for a given dimensionality
RF: lame yoh just cut-pasted and tuned up because everything
depends on ndim...
"""
# Set element_sizes
element_sizes = self._element_sizes
if element_sizes is None:
element_sizes = np.ones(ndim)
else:
if (ndim != len(element_sizes)):
raise ValueError, \
"Dimensionality mismatch: element_sizes %s provided " \
"to constructor had %i dimensions, whenever queried " \
"coordinate had %i" \
% (element_sizes, len(element_sizes), ndim)
center = np.zeros(ndim)
element_sizes = np.asanyarray(element_sizes)
# What range for each dimension
erange = np.ceil(self._radius / element_sizes).astype(int)
tentative_increments = np.array(list(np.ndindex(tuple(erange*2 + 1)))) \
- erange
# Filter out the ones beyond the "sphere"
res = array([x for x in tentative_increments
if self._inner_radius
< self._distance_func(x * element_sizes, center)
<= self._radius])
if not len(res):
warning("%s defines no neighbors" % self)
return res
示例12: _precall
def _precall(self, testdataset, trainingdataset=None):
"""Generic part which trains the classifier if necessary
"""
if not trainingdataset is None:
if self.__train:
# XXX can be pretty annoying if triggered inside an algorithm
# where it cannot be switched of, but retraining might be
# intended or at least not avoidable.
# Additonally is_trained docs say:
# MUST BE USED WITH CARE IF EVER
#
# switching it off for now
#if self.__clf.is_trained(trainingdataset):
# warning('It seems that classifier %s was already trained' %
# self.__clf + ' on dataset %s. Please inspect' \
# % trainingdataset)
if self.ca.is_enabled('training_stats'):
self.__clf.ca.change_temporarily(
enable_ca=['training_stats'])
self.__clf.train(trainingdataset)
if self.ca.is_enabled('training_stats'):
self.ca.training_stats = \
self.__clf.ca.training_stats
self.__clf.ca.reset_changed_temporarily()
if self.__clf.ca.is_enabled('trained_targets') \
and not self.__clf.__is_regression__ \
and not testdataset is None:
newlabels = set(testdataset.sa[self.clf.get_space()].unique) \
- set(self.__clf.ca.trained_targets)
if len(newlabels)>0:
warning("Classifier %s wasn't trained to classify labels %s" %
(self.__clf, newlabels) +
" present in testing dataset. Make sure that you have" +
" not mixed order/names of the arguments anywhere")
示例13: _call
def _call(self, ds):
# local binding
generator = self._generator
node = self._node
ca = self.ca
space = self.get_space()
concat_as = self._concat_as
if self.ca.is_enabled("stats") and (not node.ca.has_key("stats") or
not node.ca.is_enabled("stats")):
warning("'stats' conditional attribute was enabled, but "
"the assigned node '%s' either doesn't support it, "
"or it is disabled" % node)
# precharge conditional attributes
ca.datasets = []
# run the node an all generated datasets
results = []
for i, sds in enumerate(generator.generate(ds)):
if ca.is_enabled("datasets"):
# store dataset in ca
ca.datasets.append(sds)
# run the beast
result = node(sds)
# callback
if not self._callback is None:
self._callback(data=sds, node=node, result=result)
# subclass postprocessing
result = self._repetition_postcall(sds, node, result)
if space:
# XXX maybe try to get something more informative from the
# processing node (e.g. in 0.5 it used to be 'chunks'->'chunks'
# to indicate what was trained and what was tested. Now it is
# more tricky, because `node` could be anything
result.set_attr(space, (i,))
# store
results.append(result)
if ca.is_enabled("stats") and node.ca.has_key("stats") \
and node.ca.is_enabled("stats"):
if not ca.is_set('stats'):
# create empty stats container of matching type
ca.stats = node.ca['stats'].value.__class__()
# harvest summary stats
ca['stats'].value.__iadd__(node.ca['stats'].value)
# charge condition attribute
self.ca.repetition_results = results
# stack all results into a single Dataset
if concat_as == 'samples':
results = vstack(results)
elif concat_as == 'features':
results = hstack(results)
else:
raise ValueError("Unkown concatenation mode '%s'" % concat_as)
# no need to store the raw results, since the Measure class will
# automatically store them in a CA
return results
示例14: seed
def seed(random_seed):
if __debug__:
debug('SG', "Seeding shogun's RNG with %s" % random_seed)
try:
# reuse the same seed for shogun
shogun.Library.Math_init_random(random_seed)
except Exception, e:
warning('Shogun cannot be seeded due to %s' % (e,))
示例15: corr_error_prob
def corr_error_prob(predicted, target):
"""Computes p-value of correlation between the target and the predicted
values.
"""
from mvpa.base import warning
warning("p-value for correlation is implemented only when scipy is "
"available. Bogus value -1.0 is returned otherwise")
return -1.0