本文整理汇总了Python中mvpa2.base.warning函数的典型用法代码示例。如果您正苦于以下问题:Python warning函数的具体用法?Python warning怎么用?Python warning使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了warning函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _verified_reverse1
def _verified_reverse1(mapper, onesample):
"""Replacement of Mapper.reverse1 with safety net
This function can be called instead of a direct call to a mapper's
``reverse1()``. It wraps a single sample into a dummy axis and calls
``reverse()``. Afterwards it verifies that the first axis of the
returned array has one item only, otherwise it will issue a warning.
This function is useful in any context where it is critical to ensure
that reverse mapping a single sample, yields exactly one sample -- which
isn't guaranteed due to the flexible nature of mappers.
Parameters
----------
mapper : Mapper instance
onesample : array-like
Single sample (in terms of the supplied mapper).
Returns
-------
array
Shape matches a single sample in terms of the mappers input space.
"""
dummy_axis_sample = np.asanyarray(onesample)[None]
rsample = mapper.reverse(dummy_axis_sample)
if not len(rsample) == 1:
warning("Reverse mapping single sample yielded multiple -- can lead to unintended behavior!")
return rsample[0]
示例2: _get_increments
def _get_increments(self, ndim):
"""Creates a list of increments for a given dimensionality
RF: lame yoh just cut-pasted and tuned up because everything
depends on ndim...
"""
# Set element_sizes
element_sizes = self._element_sizes
if element_sizes is None:
element_sizes = np.ones(ndim)
else:
if (ndim != len(element_sizes)):
raise ValueError, \
"Dimensionality mismatch: element_sizes %s provided " \
"to constructor had %i dimensions, whenever queried " \
"coordinate had %i" \
% (element_sizes, len(element_sizes), ndim)
center = np.zeros(ndim)
element_sizes = np.asanyarray(element_sizes)
# What range for each dimension
erange = np.ceil(self._radius / element_sizes).astype(int)
tentative_increments = np.array(list(np.ndindex(tuple(erange*2 + 1)))) \
- erange
# Filter out the ones beyond the "sphere"
res = array([x for x in tentative_increments
if self._inner_radius
< self._distance_func(x * element_sizes, center)
<= self._radius])
if not len(res):
warning("%s defines no neighbors" % self)
return res
示例3: _check_cosmo_dataset
def _check_cosmo_dataset(cosmo):
'''
Helper function to ensure a cosmo input for cosmo_dataset is valid.
Currently does two things:
(1) raise an error if there are no samples
(2) raise a warning if samples have very large or very small values. A use
case is certain MEEG datasets with very small sample values
(in the order of 1e-25) which affects some classifiers
'''
samples = cosmo.get('samples', None)
if samples is None:
raise KeyError("Missing field .samples in %s" % cosmo)
# check for extreme values
warn_for_extreme_values_decimals = 10
# ignore NaNs and infinity
nonzero_msk = np.logical_and(np.isfinite(samples), samples != 0)
if np.any(nonzero_msk):
max_nonzero = np.max(np.abs(samples[nonzero_msk]))
# see how many decimals in the largest absolute value
decimals_nonzero = np.log10(max_nonzero)
if abs(decimals_nonzero) > warn_for_extreme_values_decimals:
msg = (
'Samples have extreme values, maximum absolute value is %s; '
'This may affect some analyses. Considering scaling the samples, '
'e.g. by a factor of 10**%d ' % (
max_nonzero, -decimals_nonzero))
warning(msg)
示例4: append
def append(self, other):
"""This method should not be used and will be removed in the future"""
warning(
"AttrDataset.append() is deprecated and will be removed. "
"Instead of ds.append(x) use: ds = vstack((ds, x), a=0)"
)
if not self.nfeatures == other.nfeatures:
raise DatasetError("Cannot merge datasets, because the number of " "features does not match.")
if not sorted(self.sa.keys()) == sorted(other.sa.keys()):
raise DatasetError(
"Cannot merge dataset. This datasets samples "
"attributes %s cannot be mapped into the other "
"set %s" % (self.sa.keys(), other.sa.keys())
)
# concat the samples as well
self.samples = np.concatenate((self.samples, other.samples), axis=0)
# tell the collection the new desired length of all attributes
self.sa.set_length_check(len(self.samples))
# concat all samples attributes
for k, v in other.sa.iteritems():
self.sa[k].value = np.concatenate((self.sa[k].value, v.value), axis=0)
示例5: _forward_data
def _forward_data(self, data):
params = self.params
try:
mapped = filtfilt(self.__iir_num,
self.__iir_denom,
data,
axis=params.axis,
padtype=params.padtype,
padlen=params.padlen)
except TypeError:
# we have an ancient scipy, do manually
# but is will only support 2d arrays
if params.axis == 0:
data = data.T
if params.axis > 1:
raise ValueError("this version of scipy does not "
"support nd-arrays for filtfilt()")
if not (params['padlen'].is_default and params['padtype'].is_default):
warning("this version of scipy.signal.filtfilt() does not "
"support `padlen` and `padtype` arguments -- ignoring "
"them")
mapped = [filtfilt(self.__iir_num,
self.__iir_denom,
x)
for x in data]
mapped = np.array(mapped)
if params.axis == 0:
mapped = mapped.T
return mapped
示例6: __init__
def __init__(self, **kwargs):
"""Initialize an SMLR classifier.
"""
"""
TODO:
# Add in likelihood calculation
# Add kernels, not just direct methods.
"""
# init base class first
Classifier.__init__(self, **kwargs)
if _cStepwiseRegression is None and self.params.implementation == 'C':
warning('SMLR: C implementation is not available.'
' Using pure Python one')
self.params.implementation = 'Python'
# pylint friendly initializations
self._ulabels = None
"""Unigue labels from the training set."""
self.__weights_all = None
"""Contains all weights including bias values"""
self.__weights = None
"""Just the weights, without the biases"""
self.__biases = None
"""The biases, will remain none if has_bias is False"""
示例7: to_npz
def to_npz(self, filename, compress=True):
"""Save dataset to a .npz file storing all fa/sa/a which are ndarrays
Parameters
----------
filename : str
compress : bool, optional
If True, savez_compressed is used
"""
savez = np.savez_compressed if compress else np.savez
if not filename.endswith('.npz'):
filename += '.npz'
entries = {'samples': self.samples}
skipped = []
for c in ('a', 'fa', 'sa'):
col = getattr(self, c)
for k in col:
v = col[k].value
e = '%s.%s' % (c, k)
if isinstance(v, np.ndarray):
entries[e] = v
else:
skipped.append(e)
if skipped:
warning("Skipping %s since not ndarrays" % (', '.join(skipped)))
return savez(filename, **entries)
示例8: run
def run(args):
if not args.store is None and args.output is None:
raise ValueError("--output is require for result storage")
if not args.data is None:
dss = [arg2ds(d) for d in args.data]
if len(dss):
# convenience short-cut
ds = dss[0]
try:
import nose.tools as nt
except ImportError:
pass
for expr in args.eval:
if expr == '-':
exec sys.stdin
elif os.path.isfile(expr):
execfile(expr, globals(), locals())
else:
exec expr
if not args.store is None:
out = {}
for var in args.store:
try:
out[var] = locals()[var]
except KeyError:
warning("'%s' not found in local name space -- skipped." % var)
if len(out):
ds2hdf5(out, args.output, compression=args.hdf5_compression)
示例9: stability_assurance
def stability_assurance(cdf):
if __debug__ and 'CHECK_STABILITY' in debug.active:
cdf_min, cdf_max = np.min(cdf), np.max(cdf)
if cdf_min < 0 or cdf_max > 1.0:
s = ('', ' for %s' % name)[int(name is not None)]
warning('Stability check of cdf %s failed%s. Min=%s, max=%s' % \
(cdf_func, s, cdf_min, cdf_max))
示例10: _SLcholesky_autoreg
def _SLcholesky_autoreg(C, nsteps=None, **kwargs):
"""Simple wrapper around cholesky to incrementally regularize the
matrix until successful computation.
For `nsteps` we boost diagonal 10-fold each time from the
'epsilon' of the respective dtype. If None -- would proceed until
reaching 1.
"""
if nsteps is None:
nsteps = -int(np.floor(np.log10(np.finfo(float).eps)))
result = None
for step in xrange(nsteps):
epsilon_value = (10**step) * np.finfo(C.dtype).eps
epsilon = epsilon_value * np.eye(C.shape[0])
try:
result = SLcholesky(C + epsilon, lower=True)
except SLAError, e:
warning("Cholesky decomposition lead to failure: %s. "
"As requested, performing auto-regularization but "
"for better control you might prefer to regularize "
"yourself by providing lm parameter to GPR" % e)
if step < nsteps-1:
if __debug__:
debug("GPR", "Failed to obtain cholesky on "
"auto-regularization step %d value %g. Got %s."
" Boosting lambda more to reg. C."
% (step, epsilon_value, e))
continue
else:
raise
示例11: _check
def _check(self):
'''ensures that different fields are sort of consistent'''
fields = ['_v', '_f', '_nv', '_nf']
if not all(hasattr(self, field) for field in fields):
raise Exception("Incomplete surface!")
if self._v.shape != (self._nv, 3):
raise Exception("Wrong shape for vertices")
if self._f.shape != (self._nf, 3):
raise Exception("Wrong shape for faces")
# see if all faces have a corresponding node.
# actually this would not invalidate the surface, so
# we only give a warning
unqf = np.unique(self._f)
if unqf.size != self._nv:
from mvpa2.base import warning
warning("Count mismatch for face range (%d!=%d), "
"faces without node: %r" % (unqf.size, self._nv,
len(set(range(self._nv)) - set(unqf))))
if np.any(unqf != np.arange(self._nv)):
from mvpa2.base import warning
warning("Missing values in faces")
示例12: handle_arg
def handle_arg(arg):
"""Helper which would read in SpatialImage if necessary
"""
if arg is None:
return arg
if isinstance(arg, basestring):
arg = nb.load(arg)
argshape = arg.get_shape()
# Assure that we have 3D (at least)
if len(argshape)<3:
arg = nb.Nifti1Image(
arg.get_data().reshape(argshape + (1,)*(3-len(argshape))),
arg.get_affine(),
arg.get_header())
else:
argshape = arg.shape
if len(argshape) == 4:
if argshape[-1] > 1:
warning("For now plot_lightbox can handle only 3d, 4d data was provided."
" Plotting only the first volume")
if isinstance(arg, SpatialImage):
arg = nb.Nifti1Image(arg.get_data()[..., 0], arg.get_affine(), arg.get_header())
else:
arg = arg[..., 0]
elif len(argshape) != 3:
raise ValueError, "For now just handling 3D volumes"
return arg
示例13: label_voxel
def label_voxel(self, c, levels = None):
if self.__referenceLevel is None:
warning("You did not provide what level to use "
"for reference. Assigning 0th level -- '%s'"
% (self._levels[0],))
self.set_reference_level(0)
# return self.__referenceAtlas.label_voxel(c, levels)
c = self._check_range(c)
# obtain coordinates of the closest voxel
cref = self._data[ self.__referenceLevel.indexes, c[0], c[1], c[2] ]
dist = norm( (cref - c) * self.voxdim )
if __debug__:
debug('ATL__', "Closest referenced point for %r is "
"%r at distance %3.2f" % (c, cref, dist))
if (self.distance - dist) >= 1e-3: # neglect everything smaller
result = self.__referenceAtlas.label_voxel(cref, levels)
result['voxel_referenced'] = c
result['distance'] = dist
else:
result = self.__referenceAtlas.label_voxel(c, levels)
if __debug__:
debug('ATL__', "Closest referenced point is "
"further than desired distance %.2f" % self.distance)
result['voxel_referenced'] = None
result['distance'] = 0
return result
示例14: __init__
def __init__(self, generator, queryengine, errorfx=mean_mismatch_error,
indexsum=None,
reuse_neighbors=False,
splitter=None,
**kwargs):
"""Initialize the base class for "naive" searchlight classifiers
Parameters
----------
generator : `Generator`
Some `Generator` to prepare partitions for cross-validation.
It must not change "targets", thus e.g. no AttributePermutator's
errorfx : func, optional
Functor that computes a scalar error value from the vectors of
desired and predicted values (e.g. subclass of `ErrorFunction`).
indexsum : ('sparse', 'fancy'), optional
What use to compute sums over arbitrary columns. 'fancy'
corresponds to regular fancy indexing over columns, whenever
in 'sparse', product of sparse matrices is used (usually
faster, so is default if `scipy` is available).
reuse_neighbors : bool, optional
Compute neighbors information only once, thus allowing for
efficient reuse on subsequent calls where dataset's feature
attributes remain the same (e.g. during permutation testing)
splitter : Splitter, optional
Which will be used to split partitioned datasets. If None specified
then standard one operating on partitions will be used
"""
# init base class first
BaseSearchlight.__init__(self, queryengine, **kwargs)
self._errorfx = errorfx
self._generator = generator
self._splitter = splitter
# TODO: move into _call since resetting over default None
# obscures __repr__
if indexsum is None:
if externals.exists('scipy'):
indexsum = 'sparse'
else:
indexsum = 'fancy'
else:
if indexsum == 'sparse' and not externals.exists('scipy'):
warning("Scipy.sparse isn't available so taking 'fancy' as "
"'indexsum' method.")
indexsum = 'fancy'
self._indexsum = indexsum
if not self.nproc in (None, 1):
raise NotImplementedError, "For now only nproc=1 (or None for " \
"autodetection) is supported by GNBSearchlight"
self.__pb = None # statistics per each block/label
self.__reuse_neighbors = reuse_neighbors
# Storage to be used for neighborhood information
self.__roi_fids = None
示例15: train
def train(self, ds):
"""
The default implementation calls ``_pretrain()``, ``_train()``, and
finally ``_posttrain()``.
Parameters
----------
ds: Dataset
Training dataset.
Returns
-------
None
"""
got_ds = is_datasetlike(ds)
# TODO remove first condition if all Learners get only datasets
if got_ds and (ds.nfeatures == 0 or len(ds) == 0):
raise DegenerateInputError(
"Cannot train learner on degenerate data %s" % ds)
if __debug__:
debug(
"LRN",
"Training learner %(lrn)s on dataset %(dataset)s",
msgargs={'lrn': self, 'dataset': ds})
self._pretrain(ds)
# remember the time when started training
t0 = time.time()
if got_ds:
# things might have happened during pretraining
if ds.nfeatures > 0:
self._train(ds)
else:
warning("Trying to train on dataset with no features present")
if __debug__:
debug("LRN",
"No features present for training, no actual training "
"is called")
else:
# in this case we claim to have no idea and simply try to train
self._train(ds)
# store timing
self.ca.training_time = time.time() - t0
# and post-proc
self._posttrain(ds)
# finally flag as trained
self._set_trained()
if __debug__:
debug(
"LRN",
"Finished training learner %(lrn)s on dataset %(dataset)s",
msgargs={'lrn': self, 'dataset': ds})