本文整理汇总了Python中mvpa.base.externals.exists函数的典型用法代码示例。如果您正苦于以下问题:Python exists函数的具体用法?Python exists怎么用?Python exists使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了exists函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: plot
def plot(self, label_index=0):
"""
TODO: make it friendly to labels given by values?
should we also treat labels_map?
"""
externals.exists("pylab", raise_=True)
import pylab as pl
self._compute()
labels = self._labels
# select only rocs for the given label
rocs = self.rocs[label_index]
fig = pl.gcf()
ax = pl.gca()
pl.plot([0, 1], [0, 1], 'k:')
for ROC in rocs:
pl.plot(ROC.fp, ROC.tp, linewidth=1)
pl.axis((0.0, 1.0, 0.0, 1.0))
pl.axis('scaled')
pl.title('Label %s. Mean AUC=%.2f' % (label_index, self.aucs[label_index]))
pl.xlabel('False positive rate')
pl.ylabel('True positive rate')
示例2: _postcall
def _postcall(self, dataset, result):
"""Some postprocessing on the result
"""
self.raw_result = result
if not self.__transformer is None:
if __debug__:
debug("SA_", "Applying transformer %s" % self.__transformer)
result = self.__transformer(result)
# estimate the NULL distribution when functor is given
if not self.__null_dist is None:
if __debug__:
debug("SA_", "Estimating NULL distribution using %s"
% self.__null_dist)
# we need a matching datameasure instance, but we have to disable
# the estimation of the null distribution in that child to prevent
# infinite looping.
measure = copy.copy(self)
measure.__null_dist = None
self.__null_dist.fit(measure, dataset)
if self.states.isEnabled('null_t'):
# get probability under NULL hyp, but also request
# either it belong to the right tail
null_prob, null_right_tail = \
self.__null_dist.p(result, return_tails=True)
self.null_prob = null_prob
externals.exists('scipy', raiseException=True)
from scipy.stats import norm
# TODO: following logic should appear in NullDist,
# not here
tail = self.null_dist.tail
if tail == 'left':
acdf = N.abs(null_prob)
elif tail == 'right':
acdf = 1.0 - N.abs(null_prob)
elif tail in ['any', 'both']:
acdf = 1.0 - N.clip(N.abs(null_prob), 0, 0.5)
else:
raise RuntimeError, 'Unhandled tail %s' % tail
# We need to clip to avoid non-informative inf's ;-)
# that happens due to lack of precision in mantissa
# which is 11 bits in double. We could clip values
# around 0 at as low as 1e-100 (correspond to z~=21),
# but for consistency lets clip at 1e-16 which leads
# to distinguishable value around p=1 and max z=8.2.
# Should be sufficient range of z-values ;-)
clip = 1e-16
null_t = norm.ppf(N.clip(acdf, clip, 1.0 - clip))
null_t[~null_right_tail] *= -1.0 # revert sign for negatives
self.null_t = null_t # store
else:
# get probability of result under NULL hypothesis if available
# and don't request tail information
self.null_prob = self.__null_dist.p(result)
return result
示例3: __init__
def __init__(self, sd=0, distribution='rdist', fpp=None, nbins=400, **kwargs):
"""L2-Norm the values, convert them to p-values of a given distribution.
Parameters
----------
sd : int
Samples dimension (if len(x.shape)>1) on which to operate
distribution : string
Which distribution to use. Known are: 'rdist' (later normal should
be there as well)
fpp : float
At what p-value (both tails) if not None, to control for false
positives. It would iteratively prune the tails (tentative real positives)
until empirical p-value becomes less or equal to numerical.
nbins : int
Number of bins for the iterative pruning of positives
WARNING: Highly experimental/slow/etc: no theoretical grounds have been
presented in any paper, nor proven
"""
externals.exists('scipy', raise_=True)
ClassWithCollections.__init__(self, **kwargs)
self.sd = sd
if not (distribution in ['rdist']):
raise ValueError, "Actually only rdist supported at the moment" \
" got %s" % distribution
self.distribution = distribution
self.fpp = fpp
self.nbins = nbins
示例4: __init__
def __init__(self, source):
"""Reader MEG data from texfiles or file-like objects.
Parameters
----------
source : str or file-like
Strings are assumed to be filenames (with `.gz` suffix
compressed), while all other object types are treated as file-like
objects.
"""
self.ntimepoints = None
self.timepoints = None
self.nsamples = None
self.channelids = []
self.data = []
self.samplingrate = None
# open textfiles
if isinstance(source, str):
if source.endswith('.gz'):
externals.exists('gzip', raise_=True)
import gzip
source = gzip.open(source, 'r')
else:
source = open(source, 'r')
# read file
for line in source:
# split ID
colon = line.find(':')
# ignore lines without id
if colon == -1:
continue
id = line[:colon]
data = line[colon+1:].strip()
if id == 'Sample Number':
timepoints = np.fromstring(data, dtype=int, sep='\t')
# one more as it starts with zero
self.ntimepoints = int(timepoints.max()) + 1
self.nsamples = int(len(timepoints) / self.ntimepoints)
elif id == 'Time':
self.timepoints = np.fromstring(data,
dtype=float,
count=self.ntimepoints,
sep='\t')
self.samplingrate = self.ntimepoints \
/ (self.timepoints[-1] - self.timepoints[0])
else:
# load data
self.data.append(
np.fromstring(data, dtype=float, sep='\t').reshape(
self.nsamples, self.ntimepoints))
# store id
self.channelids.append(id)
# reshape data from (channels x samples x timepoints) to
# (samples x chanels x timepoints)
self.data = np.swapaxes(np.array(self.data), 0, 1)
示例5: _data2img
def _data2img(data, hdr=None, imgtype=None):
# input data is t,x,y,z
if externals.exists("nibabel"):
# let's try whether we can get it done with nibabel
import nibabel
if imgtype is None:
# default is NIfTI1
itype = nibabel.Nifti1Image
else:
itype = imgtype
if issubclass(itype, nibabel.spatialimages.SpatialImage) and (hdr is None or hasattr(hdr, "get_data_dtype")):
# we can handle the desired image type and hdr with nibabel
# use of `None` for the affine should cause to pull it from
# the header
return itype(_get_xyzt_shaped(data), None, hdr)
# otherwise continue and see if there is hope ....
if externals.exists("nifti"):
# maybe pynifti can help
import nifti
if imgtype is None:
itype = nifti.NiftiImage
else:
itype = imgtype
if issubclass(itype, nifti.NiftiImage) and (hdr is None or isinstance(hdr, dict)):
# pynifti wants it transposed
return itype(_get_xyzt_shaped(data).T, hdr)
raise RuntimeError(
"Cannot convert data to an MRI image "
"(backends: nibabel(%s), pynifti(%s). Got hdr='%s', "
"imgtype='%s'." % (externals.exists("nibabel"), externals.exists("nifti"), hdr, imgtype)
)
示例6: testExternalsCorrect2ndInvocation
def testExternalsCorrect2ndInvocation(self):
# always fails
externals._KNOWN["checker2"] = "raise ImportError"
self.failUnless(not externals.exists("checker2"), msg="Should be False on 1st invocation")
self.failUnless(not externals.exists("checker2"), msg="Should be False on 2nd invocation as well")
externals._KNOWN.pop("checker2")
示例7: plot
def plot(self):
"""Plot correlation coefficients
"""
externals.exists('pylab', raise_=True)
import pylab as pl
pl.plot(self['corrcoef'])
pl.title('Auto-correlation of the sequence')
pl.xlabel('Offset')
pl.ylabel('Correlation Coefficient')
pl.show()
示例8: test_externals_correct2nd_invocation
def test_externals_correct2nd_invocation(self):
# always fails
externals._KNOWN['checker2'] = 'raise ImportError'
self.failUnless(not externals.exists('checker2'),
msg="Should be False on 1st invocation")
self.failUnless(not externals.exists('checker2'),
msg="Should be False on 2nd invocation as well")
externals._KNOWN.pop('checker2')
示例9: testExternalsNoDoubleInvocation
def testExternalsNoDoubleInvocation(self):
# no external should be checking twice (unless specified
# explicitely)
class Checker(object):
"""Helper class to increment count of actual checks"""
def __init__(self):
self.checked = 0
def check(self):
self.checked += 1
checker = Checker()
externals._KNOWN["checker"] = "checker.check()"
externals.__dict__["checker"] = checker
externals.exists("checker")
self.failUnlessEqual(checker.checked, 1)
externals.exists("checker")
self.failUnlessEqual(checker.checked, 1)
externals.exists("checker", force=True)
self.failUnlessEqual(checker.checked, 2)
externals.exists("checker")
self.failUnlessEqual(checker.checked, 2)
# restore original externals
externals.__dict__.pop("checker")
externals._KNOWN.pop("checker")
示例10: _reverse
def _reverse(self, data):
if __debug__:
debug('MAP', "Converting signal back using DWP")
if self.__level is None:
raise NotImplementedError
else:
if not externals.exists('pywt wp reconstruct'):
raise NotImplementedError, \
"Reconstruction for a single level for versions of " \
"pywt < 0.1.7 (revision 103) is not supported"
if not externals.exists('pywt wp reconstruct fixed'):
warning("Reconstruction using available version of pywt might "
"result in incorrect data in the tails of the signal")
return self.__reverseSingleLevel(data)
示例11: test_chi_square_searchlight
def test_chi_square_searchlight(self):
# only do partial to save time
# Can't yet do this since test_searchlight isn't yet "under nose"
#skip_if_no_external('scipy')
if not externals.exists('scipy'):
return
from mvpa.misc.stats import chisquare
transerror = TransferError(sample_clf_lin)
cv = CrossValidatedTransferError(
transerror,
NFoldSplitter(cvtype=1),
enable_ca=['confusion'])
def getconfusion(data):
cv(data)
return chisquare(cv.ca.confusion.matrix)[0]
sl = sphere_searchlight(getconfusion, radius=0,
center_ids=[3,50])
# run searchlight
results = sl(self.dataset)
self.failUnless(results.nfeatures == 2)
示例12: skip_if_no_external
def skip_if_no_external(dep, ver_dep=None, min_version=None, max_version=None):
"""Raise SkipTest if external is missing
Parameters
----------
dep : string
Name of the external
ver_dep : string, optional
If for version checking use some different key, e.g. shogun:rev.
If not specified, `dep` will be used.
min_version : None or string or tuple
Minimal required version
max_version : None or string or tuple
Maximal required version
"""
if not externals.exists(dep):
raise SkipTest, \
"External %s is not present thus tests battery skipped" % dep
if ver_dep is None:
ver_dep = dep
if min_version is not None and externals.versions[ver_dep] < min_version:
raise SkipTest, \
"Minimal version %s of %s is required. Present version is %s" \
". Test was skipped." \
% (min_version, ver_dep, externals.versions[ver_dep])
if max_version is not None and externals.versions[ver_dep] > max_version:
raise SkipTest, \
"Maximal version %s of %s is required. Present version is %s" \
". Test was skipped." \
% (min_version, ver_dep, externals.versions[ver_dep])
示例13: testResampling
def testResampling(self):
ds = EEPDataset(os.path.join(pymvpa_dataroot, 'eep.bin'),
labels=[1, 2], labels_map={1:100, 2:101})
channelids = N.array(ds.channelids).copy()
self.failUnless(N.round(ds.samplingrate) == 500.0)
if not externals.exists('scipy'):
return
# should puke when called with nothing
self.failUnlessRaises(ValueError, ds.resample)
# now for real -- should divide nsamples into half
rds = ds.resample(sr=250, inplace=False)
# We should have not changed anything
self.failUnless(N.round(ds.samplingrate) == 500.0)
# by default do 'inplace' resampling
ds.resample(sr=250)
for d in [rds, ds]:
self.failUnless(N.round(d.samplingrate) == 250)
self.failUnless(d.nsamples == 2)
self.failUnless(N.abs((d.dt - 1.0/250)/d.dt)<1e-5)
self.failUnless(N.all(d.channelids == channelids))
# lets now see if we still have a mapper
self.failUnless(d.O.shape == (2, len(channelids), 2))
# and labels_map
self.failUnlessEqual(d.labels_map, {1:100, 2:101})
示例14: test_dist_p_value
def test_dist_p_value(self):
"""Basic testing of DistPValue"""
if not externals.exists('scipy'):
return
ndb = 200
ndu = 20
nperd = 2
pthr = 0.05
Nbins = 400
# Lets generate already normed data (on sphere) and add some nonbogus features
datau = (np.random.normal(size=(nperd, ndb)))
dist = np.sqrt((datau * datau).sum(axis=1))
datas = (datau.T / dist.T).T
tn = datax = datas[0, :]
dataxmax = np.max(np.abs(datax))
# now lets add true positive features
tp = [-dataxmax * 1.1] * (ndu/2) + [dataxmax * 1.1] * (ndu/2)
x = np.hstack((datax, tp))
# lets add just pure normal to it
x = np.vstack((x, np.random.normal(size=x.shape))).T
for distPValue in (DistPValue(), DistPValue(fpp=0.05)):
result = distPValue(x)
self.failUnless((result>=0).all)
self.failUnless((result<=1).all)
if cfg.getboolean('tests', 'labile', default='yes'):
self.failUnless(distPValue.ca.positives_recovered[0] > 10)
self.failUnless((np.array(distPValue.ca.positives_recovered) +
np.array(distPValue.ca.nulldist_number) == ndb + ndu).all())
self.failUnlessEqual(distPValue.ca.positives_recovered[1], 0)
示例15: testChiSquareSearchlight
def testChiSquareSearchlight(self):
# only do partial to save time
if not externals.exists('scipy'):
return
from mvpa.misc.stats import chisquare
transerror = TransferError(sample_clf_lin)
cv = CrossValidatedTransferError(
transerror,
NFoldSplitter(cvtype=1),
enable_states=['confusion'])
def getconfusion(data):
cv(data)
return chisquare(cv.confusion.matrix)[0]
# contruct radius 1 searchlight
sl = Searchlight(getconfusion, radius=1.0,
center_ids=[3,50])
# run searchlight
results = sl(self.dataset)
self.failUnless(len(results) == 2)