本文整理汇总了Python中mvpa2.generators.partition.NFoldPartitioner.get_space方法的典型用法代码示例。如果您正苦于以下问题:Python NFoldPartitioner.get_space方法的具体用法?Python NFoldPartitioner.get_space怎么用?Python NFoldPartitioner.get_space使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mvpa2.generators.partition.NFoldPartitioner
的用法示例。
在下文中一共展示了NFoldPartitioner.get_space方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_gnbsearchlight_permutations
# 需要导入模块: from mvpa2.generators.partition import NFoldPartitioner [as 别名]
# 或者: from mvpa2.generators.partition.NFoldPartitioner import get_space [as 别名]
def test_gnbsearchlight_permutations():
import mvpa2
from mvpa2.base.node import ChainNode
from mvpa2.clfs.gnb import GNB
from mvpa2.generators.base import Repeater
from mvpa2.generators.partition import NFoldPartitioner, OddEvenPartitioner
#import mvpa2.generators.permutation
#reload(mvpa2.generators.permutation)
from mvpa2.generators.permutation import AttributePermutator
from mvpa2.testing.datasets import datasets
from mvpa2.measures.base import CrossValidation
from mvpa2.measures.gnbsearchlight import sphere_gnbsearchlight
from mvpa2.measures.searchlight import sphere_searchlight
from mvpa2.mappers.fx import mean_sample
from mvpa2.misc.errorfx import mean_mismatch_error
from mvpa2.clfs.stats import MCNullDist
from mvpa2.testing.tools import assert_raises, ok_, assert_array_less
# mvpa2.debug.active = ['APERM', 'SLC'] #, 'REPM']
# mvpa2.debug.metrics += ['pid']
count = 10
nproc = 1 + int(mvpa2.externals.exists('pprocess'))
ds = datasets['3dsmall'].copy()
ds.fa['voxel_indices'] = ds.fa.myspace
slkwargs = dict(radius=3, space='voxel_indices', enable_ca=['roi_sizes'],
center_ids=[1, 10, 70, 100])
mvpa2.seed(mvpa2._random_seed)
clf = GNB()
splt = NFoldPartitioner(cvtype=2, attr='chunks')
repeater = Repeater(count=count)
permutator = AttributePermutator('targets', limit={'partitions': 1}, count=1)
null_sl = sphere_gnbsearchlight(clf, ChainNode([splt, permutator], space=splt.get_space()),
postproc=mean_sample(), errorfx=mean_mismatch_error,
**slkwargs)
distr_est = MCNullDist(repeater, tail='left', measure=null_sl,
enable_ca=['dist_samples'])
sl = sphere_gnbsearchlight(clf, splt,
reuse_neighbors=True,
null_dist=distr_est, postproc=mean_sample(),
errorfx=mean_mismatch_error,
**slkwargs)
if __debug__: # assert is done only without -O mode
assert_raises(NotImplementedError, sl, ds)
# "ad-hoc searchlights can't handle yet varying targets across partitions"
if False:
# after above limitation is removed -- enable
sl_map = sl(ds)
sl_null_prob = sl.ca.null_prob.samples.copy()
mvpa2.seed(mvpa2._random_seed)
### 'normal' Searchlight
clf = GNB()
splt = NFoldPartitioner(cvtype=2, attr='chunks')
repeater = Repeater(count=count)
permutator = AttributePermutator('targets', limit={'partitions': 1}, count=1)
# rng=np.random.RandomState(0)) # to trigger failure since the same np.random state
# would be reused across all pprocesses
null_cv = CrossValidation(clf, ChainNode([splt, permutator], space=splt.get_space()),
postproc=mean_sample())
null_sl_normal = sphere_searchlight(null_cv, nproc=nproc, **slkwargs)
distr_est_normal = MCNullDist(repeater, tail='left', measure=null_sl_normal,
enable_ca=['dist_samples'])
cv = CrossValidation(clf, splt, errorfx=mean_mismatch_error,
enable_ca=['stats'], postproc=mean_sample() )
sl = sphere_searchlight(cv, nproc=nproc, null_dist=distr_est_normal, **slkwargs)
sl_map_normal = sl(ds)
sl_null_prob_normal = sl.ca.null_prob.samples.copy()
# For every feature -- we should get some variance in estimates In
# case of failure they are all really close to each other (up to
# numerical precision), so variance will be close to 0
assert_array_less(-np.var(distr_est_normal.ca.dist_samples.samples[0],
axis=1), -1e-5)
for s in distr_est_normal.ca.dist_samples.samples[0]:
ok_(len(np.unique(s)) > 1)
示例2: setup_classifier
# 需要导入模块: from mvpa2.generators.partition import NFoldPartitioner [as 别名]
# 或者: from mvpa2.generators.partition.NFoldPartitioner import get_space [as 别名]
def setup_classifier(**kwargs):
'''
Thinked!
'''
for arg in kwargs:
if arg == 'clf_type':
clf_type = kwargs[arg]
if arg == 'fsel':
f_sel = kwargs[arg]
if arg == 'cv_type':
cv_approach = kwargs[arg]
if arg == 'cv_folds':
if np.int(kwargs[arg]) == 0:
cv_type = np.float(kwargs[arg])
else:
cv_type = np.int(kwargs[arg])
if arg == 'permutations':
permutations = np.int(kwargs[arg])
if arg == 'cv_attribute':
attribute = kwargs[arg]
cv_n = cv_type
################# Classifier #######################
if clf_type == 'SVM':
clf = LinearCSVMC(C=1, probability=1, enable_ca=['probabilities'])
elif clf_type == 'GNB':
clf = GNB()
elif clf_type == 'LDA':
clf = LDA()
elif clf_type == 'QDA':
clf = QDA()
elif clf_type == 'SMLR':
clf = SMLR()
elif clf_type == 'RbfSVM':
sk_clf = SVC(gamma=0.1, C=1)
clf = SKLLearnerAdapter(sk_clf, enable_ca=['probabilities'])
elif clf_type == 'GP':
clf = GPR()
else:
clf = LinearCSVMC(C=1, probability=1, enable_ca=['probabilities'])
############## Feature Selection #########################
if f_sel == 'True':
logger.info('Feature Selection selected.')
fsel = SensitivityBasedFeatureSelection(OneWayAnova(),
FractionTailSelector(0.05,
mode='select',
tail='upper'))
fclf = FeatureSelectionClassifier(clf, fsel)
elif f_sel == 'Fixed':
logger.info('Fixed Feature Selection selected.')
fsel = SensitivityBasedFeatureSelection(OneWayAnova(),
FixedNElementTailSelector(100,
mode='select',
tail='upper'))
fclf = FeatureSelectionClassifier(clf, fsel)
elif f_sel == 'PCA':
from mvpa2.mappers.skl_adaptor import SKLTransformer
from sklearn.decomposition import PCA
logger.info('Fixed Feature Selection selected.')
fsel = SKLTransformer(PCA(n_components=45))
fclf = FeatureSelectionClassifier(clf, fsel)
else:
fclf = clf
######################### Permutations #############################
if permutations != 0:
if __debug__:
debug.active += ["STATMC"]
repeater = Repeater(count=permutations)
permutator = AttributePermutator('targets', limit={'partitions': 1},
count=1)
partitioner = NFoldPartitioner(cvtype=cv_n, attr=attribute)
null_cv = CrossValidation(
clf,
ChainNode([partitioner, permutator],
space=partitioner.get_space()),
errorfx=mean_mismatch_error)
distr_est = MCNullDist(repeater, tail='left', measure=null_cv,
enable_ca=['dist_samples'])
#postproc = mean_sample()
else:
distr_est = None
#postproc = None
########################################################
if cv_approach == 'n_fold':
if cv_type != 0:
splitter_used = NFoldPartitioner(cvtype=cv_type, attr=attribute)
else:
splitter_used = NFoldPartitioner(cvtype=1, attr=attribute)
else:
#.........这里部分代码省略.........