本文整理汇总了Python中mvpa2.base.verbose函数的典型用法代码示例。如果您正苦于以下问题:Python verbose函数的具体用法?Python verbose怎么用?Python verbose使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了verbose函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run
def run(args):
verbose(1, "Loading %d result files" % len(args.data))
# TODO: support hdf5 datasets
nis = [nib.load(f) for f in args.data]
data = np.asarray([ni.get_data() for ni in nis])
if args.mask:
mask = nib.load(args.mask).get_data()
out_of_mask = mask == 0
else:
# just take where no voxel had a value
out_of_mask = np.sum(data != 0, axis=0)==0
t, p = ttest_1samp(data, popmean=args.chance_level, axis=0,
alternative=args.alternative)
if args.stat == 'z':
if args.alternative == 'two-sided':
s = stats.norm.isf(p/2)
else:
s = stats.norm.isf(p)
# take the sign of the original t
s = np.abs(s) * np.sign(t)
elif args.stat == 'p':
s = p
elif args.stat == 't':
s = t
else:
raise ValueError('WTF you gave me? have no clue about %r' % (args.stat,))
s[out_of_mask] = 0
verbose(1, "Saving to %s" % args.output)
nib.Nifti1Image(s, None, header=nis[0].header).to_filename(args.output)
return s
示例2: test_verbose_below
def test_verbose_below(self):
"""Test if outputs at lower levels and indents
by default with spaces
"""
verbose(2, self.msg)
self.failUnlessEqual(self.sout.getvalue(),
" %s\n" % self.msg)
示例3: test_verbose_indent
def test_verbose_indent(self):
"""Test indent symbol
"""
verbose.indent = "."
verbose(2, self.msg)
self.failUnlessEqual(self.sout.getvalue(), "..%s\n" % self.msg)
verbose.indent = " " # restore
示例4: run
def run(args):
print args.data
dss = [arg2ds(d)[:,:100] for d in args.data]
verbose(1, "Loaded %i input datasets" % len(dss))
if __debug__:
for i, ds in enumerate(dss):
debug('CMDLINE', "dataset %i: %s" % (i, str(ds)))
# TODO at this point more check could be done, e.g. ref_ds > len(dss)
# assemble parameters
params = dict([(param, getattr(args, param)) for param in _supported_parameters])
if __debug__:
debug('CMDLINE', "configured parameters: '%s'" % params)
# assemble CAs
enabled_ca = [ca for ca in _supported_cas if getattr(args, ca)]
if __debug__:
debug('CMDLINE', "enabled conditional attributes: '%s'" % enabled_ca)
hyper = Hyperalignment(enable_ca=enabled_ca,
alignment=ProcrusteanMapper(svd='dgesvd',
space='commonspace'),
**params)
verbose(1, "Running hyperalignment")
promappers = hyper(dss)
verbose(2, "Alignment reference is dataset %i" % hyper.ca.chosen_ref_ds)
verbose(1, "Writing output")
# save on memory and remove the training data
del dss
if args.commonspace:
if __debug__:
debug('CMDLINE', "write commonspace as hdf5")
h5save('%s%s.hdf5' % (args.output_prefix,
_output_specs['commonspace']['output_suffix']),
hyper.commonspace,
compression=args.hdf5_compression)
for ca in _supported_cas:
if __debug__:
debug('CMDLINE', "check conditional attribute: '%s'" % ca)
if getattr(args, ca):
if __debug__:
debug('CMDLINE', "store conditional attribute: '%s'" % ca)
np.savetxt('%s%s' % (args.output_prefix,
_supported_cas[ca]['output_suffix']),
hyper.ca[ca].value.samples)
if args.store_transformation:
for i, pm in enumerate(promappers):
if __debug__:
debug('CMDLINE', "store mapper %i: %s" % (i, str(pm)))
h5save('%s%s.hdf5' % (args.output_prefix, '_map%.3i' % i),
pm, compression=args.hdf5_compression)
if args.transform:
tdss, dss = _transform_dss(args.transform, promappers, args)
del dss
verbose(1, "Store transformed datasets")
for i, td in enumerate(tdss):
if __debug__:
debug('CMDLINE', "store transformed data %i: %s" % (i, str(td)))
h5save('%s%s.hdf5' % (args.output_prefix, '_transformed%.3i' % i),
td, compression=args.hdf5_compression)
示例5: _transform_dss
def _transform_dss(srcs, mappers, args):
if __debug__:
debug('CMDLINE', "loading to-be-transformed data from %s" % srcs)
dss = [arg2ds(d) for d in srcs]
verbose(1, "Loaded %i to-be-transformed datasets" % len(dss))
if __debug__:
debug('CMDLINE', "transform datasets")
tdss = [ mappers[i].forward(td) for i, td in enumerate(dss)]
return tdss, dss
示例6: run
def run(args):
"""Run it"""
verbose(1, "Loading %d result files" % len(args.data))
filetype_in = guess_backend(args.data[0])
if filetype_in == 'nifti':
dss = [fmri_dataset(f) for f in args.data]
elif filetype_in == 'hdf5':
dss = [h5load(f) for f in args.data]
data = np.asarray([d.samples[args.isample] for d in dss])
if args.mask:
filetype_mask = guess_backend(args.mask)
if filetype_mask == 'nifti':
mask = nib.load(args.mask).get_data()
elif filetype_mask == 'hdf5':
mask = h5load(args.mask).samples
out_of_mask = mask == 0
else:
# just take where no voxel had a value
out_of_mask = np.sum(data != 0, axis=0)==0
t, p = ttest_1samp(data, popmean=args.chance_level, axis=0,
alternative=args.alternative)
if args.stat == 'z':
if args.alternative == 'two-sided':
s = stats.norm.isf(p/2)
else:
s = stats.norm.isf(p)
# take the sign of the original t
s = np.abs(s) * np.sign(t)
elif args.stat == 'p':
s = p
elif args.stat == 't':
s = t
else:
raise ValueError('WTF you gave me? have no clue about %r' % (args.stat,))
if s.shape != out_of_mask.shape:
try:
out_of_mask = out_of_mask.reshape(s.shape)
except ValueError:
raise ValueError('Cannot use mask of shape {0} with '
'data of shape {1}'.format(out_of_mask.shape, s.shape))
s[out_of_mask] = 0
verbose(1, "Saving to %s" % args.output)
filetype_out = guess_backend(args.output)
if filetype_out == 'nifti':
map2nifti(dss[0], data=s).to_filename(args.output)
else: # filetype_out is hdf5
s = Dataset(np.atleast_2d(s), fa=dss[0].fa, a=dss[0].a)
h5save(args.output, s)
return s
示例7: test_cr
def test_cr(self):
"""Test if works fine with carriage return (cr) symbol"""
verbose(2, self.msg, cr=True)
verbose(2, "rewrite", cr=True)
verbose(1, "rewrite 2", cr=True)
verbose(1, " add", cr=False, lf=False)
verbose(1, " finish")
target = '\r %s\r \rrewrite' % self.msg + \
'\r \rrewrite 2 add finish\n'
self.failUnlessEqual(self.sout.getvalue(), target)
示例8: run
def run(args):
ds = arg2ds(args.data)
verbose(3, "Concatenation yielded %i samples with %i features" % ds.shape)
if args.numpy_xfm is not None:
from mvpa2.mappers.fx import FxMapper
fx, axis = args.numpy_xfm
mapper = FxMapper(axis, fx)
ds = ds.get_mapped(mapper)
info_fx[args.report](ds, args)
示例9: _set_active
def _set_active(self, active):
"""Set active logging set
"""
# just unique entries... we could have simply stored Set I guess,
# but then smth like debug.active += ["BLAH"] would not work
from mvpa2.base import verbose
self.__active = []
registered_keys = self.__registered.keys()
for item in list(set(active)):
if item == '':
continue
if isinstance(item, basestring):
if item in ['?', 'list', 'help']:
self.print_registered(detailed=(item != '?'))
raise SystemExit(0)
if item.upper() == "ALL":
verbose(2, "Enabling all registered debug handlers")
self.__active = registered_keys
break
# try to match item as it is regexp
regexp_str = "^%s$" % item
try:
regexp = re.compile(regexp_str)
except:
raise ValueError, \
"Unable to create regular expression out of %s" % item
matching_keys = filter(regexp.match, registered_keys)
toactivate = matching_keys
if len(toactivate) == 0:
ids = self.registered.keys()
ids.sort()
raise ValueError, \
"Unknown debug ID '%s' was asked to become active," \
" or regular expression '%s' did not get any match" \
" among known ids: %s" \
% (item, regexp_str, ids)
else:
toactivate = [item]
# Lets check if asked items are known
for item_ in toactivate:
if not (item_ in registered_keys):
raise ValueError, \
"Unknown debug ID %s was asked to become active" \
% item_
self.__active += toactivate
self.__active = list(set(self.__active)) # select just unique ones
self.__maxstrlength = max([len(str(x)) for x in self.__active] + [0])
if len(self.__active):
verbose(2, "Enabling debug handlers: %s" % `self.__active`)
示例10: run
def run(args):
import pylab as pl
from mvpa2.base import verbose
# segments x [subjects x timepoints x properties]
data = [np.array(s) for s in args.segment]
# put in standard property order: first translation, then rotation
if args.estimate_order == 'rottrans':
data = [d[:, :, (3, 4, 5, 0, 1, 2)] for d in data]
# convert rotations, now known to be last
if args.rad2deg:
for d in data:
v = d[:, :, 3:]
np.rad2deg(v, v)
# and plot
# figure setup
fig = pl.figure(figsize=(12, 5))
# translation
ax = pl.subplot(211)
outlier = motionqc_plot(
[d[..., :3] for d in data],
args.outlier_minthresh,
args.outlier_stdthresh,
"translation\nestimate L2-norm")
if outlier:
verbose(
0,
"Detected per-segment translation outlier input samples {0} (zero-based)".format(
outlier))
# rotation
ax = pl.subplot(212)
outlier = motionqc_plot(
[d[..., 3:] for d in data],
args.outlier_minthresh,
args.outlier_stdthresh,
"rotation\nestimate L2-norm")
if outlier:
verbose(
0,
"Detected per-segment rotation outlier input samples {0} (zero-based)".format(
outlier))
if args.savefig is None:
pl.show()
else:
pl.savefig(args.savefig[0])
示例11: test_no_lf
def test_no_lf(self):
"""Test if it works fine with no newline (LF) symbol"""
verbose(2, self.msg, lf=False)
verbose(2, " continue ", lf=False)
verbose(2, "end")
verbose(0, "new %s" % self.msg)
self.failUnlessEqual(self.sout.getvalue(),
" %s continue end\nnew %s\n" % \
(self.msg, self.msg))
示例12: bxplot
def bxplot(stats, label):
stats = concat_ts_boxplot_stats(stats)
verbose(0, "List of outlier time series follows (if any)")
for i, run in enumerate([np.where(np.sum(np.logical_not(o.mask), axis=0))
for o in stats[1]]):
sids = run[0]
if len(sids):
verbose(0, "%s r%.3i: %s"
% (label, i + 1, [s + 1 for s in sids]))
timeseries_boxplot(stats[0]['median'],
mean=stats[0]['mean'], std=stats[0]['std'], n=stats[0]['n'],
min=stats[0]['min'], max=stats[0]['max'],
p25=stats[0]['p25'], p75=stats[0]['p75'],
outlierd=stats[1], segment_sizes=segment_sizes)
pl.title(label)
xp, xl = pl.xticks()
pl.xticks(xp, ['' for i in xl])
pl.xlim((0, len(stats[0]['n'])))
pl.ylabel(plt_props[label])
示例13: ds2hdf5
def ds2hdf5(ds, fname, compression=None):
"""Save one or more datasets into an HDF5 file.
Parameters
----------
ds : Dataset or list(Dataset)
One or more datasets to store
fname : str
Filename of the output file. If it doesn't end with '.hdf5', such an
extension will be appended.
compression : {'gzip','lzf','szip'} or 1-9
compression type for HDF5 storage. Available values depend on the specific
HDF5 installation.
"""
# this one doesn't actually check what it stores
from mvpa2.base.hdf5 import h5save
if not fname.endswith('.hdf5'):
fname = '%s.hdf5' % fname
verbose(1, "Save dataset to '%s'" % fname)
h5save(fname, ds, mkdir=True, compression=compression)
示例14: run
def run(args):
dss = hdf2ds(args.data)
verbose(3, 'Loaded %i dataset(s)' % len(dss))
ds = vstack(dss)
verbose(3, 'Concatenation yielded %i samples with %i features' % ds.shape)
# get CV instance
cv = get_crossvalidation_instance(
args.learner, args.partitioner, args.errorfx, args.sampling_repetitions,
args.learner_space, args.balance_training, args.permutations,
args.avg_datafold_results, args.prob_tail)
res = cv(ds)
# some meaningful output
# XXX make condition on classification analysis only?
print cv.ca.stats
print 'Results\n-------'
if args.permutations > 0:
nprob = cv.ca.null_prob.samples
if res.shape[1] == 1:
# simple result structure
if args.permutations > 0:
p=', p-value (%s tail)' % args.prob_tail
else:
p=''
print 'Fold, Result%s' % p
for i in xrange(len(res)):
if args.permutations > 0:
p = ', %f' % nprob[i, 0]
else:
p = ''
print '%s, %f%s' % (res.sa.cvfolds[i], res.samples[i, 0], p)
# and store
ds2hdf5(res, args.output, compression=args.hdf5_compression)
if args.permutations > 0:
if args.output.endswith('.hdf5'):
args.output = args.output[:-5]
ds2hdf5(cv.ca.null_prob, '%s_nullprob' % args.output,
compression=args.hdf5_compression)
return res
示例15: run
def run(args):
dss = hdf2ds(args.data)
verbose(3, 'Loaded %i dataset(s)' % len(dss))
ds = vstack(dss)
verbose(3, 'Concatenation yielded %i samples with %i features' % ds.shape)
# slicing
sliceme = {'samples': slice(None), 'features': slice(None)}
# indices
for opt, col, which in ((args.samples_by_index, ds.sa, 'samples'),
(args.features_by_index, ds.fa, 'features')):
if opt is None:
continue
if len(opt) == 1 and opt[0].count(':'):
# slice spec
arg = opt[0].split(':')
spec = []
for a in arg:
if not len(a):
spec.append(None)
else:
spec.append(int(a))
sliceme[which] = slice(*spec)
else:
# actual indices
sliceme[which] = [int(o) for o in opt]
# attribute evaluation
for opt, col, which in ((args.samples_by_attr, ds.sa, 'samples'),
(args.features_by_attr, ds.fa, 'features')):
if opt is None:
continue
sliceme[which] = _eval_attr_expr(opt, col)
# apply selection
ds = ds.__getitem__((sliceme['samples'], sliceme['features']))
verbose(1, 'Selected %i samples with %i features' % ds.shape)
# strip attributes
for attrarg, col, descr in ((args.strip_sa, ds.sa, 'sample '),
(args.strip_fa, ds.fa, 'feature '),
(args.strip_da, ds.a, '')):
if not attrarg is None:
for attr in attrarg:
try:
del col[attr]
except KeyError:
warning("dataset has no %sattribute '%s' to remove"
% (descr, attr))
# and store
ds2hdf5(ds, args.output, compression=args.hdf5_compression)
return ds