本文整理汇总了Python中mne.stats.cluster_level.permutation_cluster_1samp_test函数的典型用法代码示例。如果您正苦于以下问题:Python permutation_cluster_1samp_test函数的具体用法?Python permutation_cluster_1samp_test怎么用?Python permutation_cluster_1samp_test使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了permutation_cluster_1samp_test函数的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_permutation_step_down_p
def test_permutation_step_down_p():
"""Test cluster level permutations with step_down_p
"""
try:
try:
from sklearn.feature_extraction.image import grid_to_graph
except ImportError:
from scikits.learn.feature_extraction.image import grid_to_graph
except ImportError:
return
rng = np.random.RandomState(0)
# subjects, time points, spatial points
X = rng.randn(9, 2, 10)
# add some significant points
X[:, 0:2, 0:2] += 2 # span two time points and two spatial points
X[:, 1, 5:9] += 0.5 # span four time points with 4x smaller amplitude
thresh = 2
# make sure it works when we use ALL points in step-down
t, clusters, p, H0 = \
permutation_cluster_1samp_test(X, threshold=thresh,
step_down_p=1.0)
# make sure using step-down will actually yield improvements sometimes
t, clusters, p_old, H0 = \
permutation_cluster_1samp_test(X, threshold=thresh,
step_down_p=0.0)
assert_equal(np.sum(p_old < 0.05), 1) # just spatial cluster
t, clusters, p_new, H0 = \
permutation_cluster_1samp_test(X, threshold=thresh,
step_down_p=0.05)
assert_equal(np.sum(p_new < 0.05), 2) # time one rescued
assert_true(np.all(p_old >= p_new))
示例2: test_cache_dir
def test_cache_dir():
"""Test use of cache dir
"""
tempdir = _TempDir()
orig_dir = os.getenv('MNE_CACHE_DIR', None)
orig_size = os.getenv('MNE_MEMMAP_MIN_SIZE', None)
rng = np.random.RandomState(0)
X = rng.randn(9, 2, 10)
try:
os.environ['MNE_MEMMAP_MIN_SIZE'] = '1K'
os.environ['MNE_CACHE_DIR'] = tempdir
# Fix error for #1507: in-place when memmapping
with catch_logging() as log_file:
permutation_cluster_1samp_test(
X, buffer_size=None, n_jobs=2, n_permutations=1,
seed=0, stat_fun=ttest_1samp_no_p, verbose=False)
# ensure that non-independence yields warning
stat_fun = partial(ttest_1samp_no_p, sigma=1e-3)
assert_true('independently' not in log_file.getvalue())
with warnings.catch_warnings(record=True): # independently
permutation_cluster_1samp_test(
X, buffer_size=10, n_jobs=2, n_permutations=1,
seed=0, stat_fun=stat_fun, verbose=False)
assert_true('independently' in log_file.getvalue())
finally:
if orig_dir is not None:
os.environ['MNE_CACHE_DIR'] = orig_dir
else:
del os.environ['MNE_CACHE_DIR']
if orig_size is not None:
os.environ['MNE_MEMMAP_MIN_SIZE'] = orig_size
else:
del os.environ['MNE_MEMMAP_MIN_SIZE']
示例3: test_cache_dir
def test_cache_dir():
"""Test use of cache dir
"""
tempdir = _TempDir()
orig_dir = os.getenv("MNE_CACHE_DIR", None)
orig_size = os.getenv("MNE_MEMMAP_MIN_SIZE", None)
rng = np.random.RandomState(0)
X = rng.randn(9, 2, 10)
log_file = op.join(tempdir, "log.txt")
try:
os.environ["MNE_MEMMAP_MIN_SIZE"] = "1K"
os.environ["MNE_CACHE_DIR"] = tempdir
# Fix error for #1507: in-place when memmapping
permutation_cluster_1samp_test(
X, buffer_size=None, n_jobs=2, n_permutations=1, seed=0, stat_fun=ttest_1samp_no_p, verbose=False
)
# ensure that non-independence yields warning
stat_fun = partial(ttest_1samp_no_p, sigma=1e-3)
set_log_file(log_file)
permutation_cluster_1samp_test(
X, buffer_size=10, n_jobs=2, n_permutations=1, seed=0, stat_fun=stat_fun, verbose=False
)
with open(log_file, "r") as fid:
assert_true("independently" in "".join(fid.readlines()))
finally:
if orig_dir is not None:
os.environ["MNE_CACHE_DIR"] = orig_dir
else:
del os.environ["MNE_CACHE_DIR"]
if orig_size is not None:
os.environ["MNE_MEMMAP_MIN_SIZE"] = orig_size
else:
del os.environ["MNE_MEMMAP_MIN_SIZE"]
set_log_file(None)
示例4: test_tfce_thresholds
def test_tfce_thresholds():
rng = np.random.RandomState(0)
data = rng.randn(7, 10, 1) - 0.5
# if tail==-1, step must also be negative
assert_raises(ValueError, permutation_cluster_1samp_test, data, tail=-1,
threshold=dict(start=0, step=0.1))
# this works (smoke test)
permutation_cluster_1samp_test(data, tail=-1,
threshold=dict(start=0, step=-0.1))
# thresholds must be monotonically increasing
assert_raises(ValueError, permutation_cluster_1samp_test, data, tail=1,
threshold=dict(start=1, step=-0.5))
示例5: test_thresholds
def test_thresholds():
"""Test automatic threshold calculations."""
# within subjects
rng = np.random.RandomState(0)
X = rng.randn(10, 1, 1) + 0.08
want_thresh = -stats.t.ppf(0.025, len(X) - 1)
assert 0.03 < stats.ttest_1samp(X[:, 0, 0], 0)[1] < 0.05
my_fun = partial(ttest_1samp_no_p)
with catch_logging() as log:
with pytest.warns(RuntimeWarning, match='threshold is only valid'):
out = permutation_cluster_1samp_test(X, stat_fun=my_fun,
verbose=True)
log = log.getvalue()
assert str(want_thresh)[:6] in log
assert len(out[1]) == 1 # 1 cluster
assert 0.03 < out[2] < 0.05
# between subjects
Y = rng.randn(10, 1, 1)
Z = rng.randn(10, 1, 1) - 0.7
X = [X, Y, Z]
want_thresh = stats.f.ppf(1. - 0.05, 2, sum(len(a) for a in X) - len(X))
p = stats.f_oneway(*X)[1]
assert 0.03 < p < 0.05
my_fun = partial(f_oneway) # just to make the check fail
with catch_logging() as log:
with pytest.warns(RuntimeWarning, match='threshold is only valid'):
out = permutation_cluster_test(X, tail=1, stat_fun=my_fun,
verbose=True)
log = log.getvalue()
assert str(want_thresh)[:6] in log
assert len(out[1]) == 1 # 1 cluster
assert 0.03 < out[2] < 0.05
with pytest.warns(RuntimeWarning, match='Ignoring argument "tail"'):
permutation_cluster_test(X, tail=0)
示例6: test_cluster_permutation_t_test
def test_cluster_permutation_t_test():
"""Test cluster level permutations T-test."""
my_condition1 = condition1[:, :, None] # to test 2D also
T_obs, clusters, cluster_p_values, hist = permutation_cluster_1samp_test(
my_condition1, n_permutations=500, tail=0)
assert_equal(np.sum(cluster_p_values < 0.05), 1)
T_obs_pos, _, cluster_p_values_pos, _ = permutation_cluster_1samp_test(
my_condition1, n_permutations=500, tail=1,
threshold=1.67)
T_obs_neg, _, cluster_p_values_neg, _ = permutation_cluster_1samp_test(
-my_condition1, n_permutations=500, tail=-1,
threshold=-1.67)
assert_array_equal(T_obs_pos, -T_obs_neg)
assert_array_equal(cluster_p_values_pos < 0.05,
cluster_p_values_neg < 0.05)
示例7: test_cluster_permutation_t_test
def test_cluster_permutation_t_test():
"""Test cluster level permutations T-test
"""
condition1_1d, condition2_1d, condition1_2d, condition2_2d = \
_get_conditions()
# use a very large sigma to make sure Ts are not independent
stat_funs = [ttest_1samp_no_p,
partial(ttest_1samp_no_p, sigma=1e-1)]
for stat_fun in stat_funs:
for condition1 in (condition1_1d, condition1_2d):
# these are so significant we can get away with fewer perms
T_obs, clusters, cluster_p_values, hist =\
permutation_cluster_1samp_test(condition1, n_permutations=100,
tail=0, seed=1,
buffer_size=None)
assert_equal(np.sum(cluster_p_values < 0.05), 1)
T_obs_pos, c_1, cluster_p_values_pos, _ =\
permutation_cluster_1samp_test(condition1, n_permutations=100,
tail=1, threshold=1.67, seed=1,
stat_fun=stat_fun,
buffer_size=None)
T_obs_neg, _, cluster_p_values_neg, _ =\
permutation_cluster_1samp_test(-condition1, n_permutations=100,
tail=-1, threshold=-1.67,
seed=1, stat_fun=stat_fun,
buffer_size=None)
assert_array_equal(T_obs_pos, -T_obs_neg)
assert_array_equal(cluster_p_values_pos < 0.05,
cluster_p_values_neg < 0.05)
# test with 2 jobs and buffer_size enabled
buffer_size = condition1.shape[1] // 10
T_obs_neg_buff, _, cluster_p_values_neg_buff, _ = \
permutation_cluster_1samp_test(-condition1, n_permutations=100,
tail=-1, threshold=-1.67,
seed=1, n_jobs=2,
stat_fun=stat_fun,
buffer_size=buffer_size)
assert_array_equal(T_obs_neg, T_obs_neg_buff)
assert_array_equal(cluster_p_values_neg, cluster_p_values_neg_buff)
示例8: test_cluster_permutation_t_test_with_connectivity
def test_cluster_permutation_t_test_with_connectivity():
"""Test cluster level permutations T-test with connectivity matrix."""
try:
try:
from sklearn.feature_extraction.image import grid_to_graph
except ImportError:
from scikits.learn.feature_extraction.image import grid_to_graph
except ImportError:
return
out = permutation_cluster_1samp_test(condition1, n_permutations=500)
connectivity = grid_to_graph(1, condition1.shape[1])
out_connectivity = permutation_cluster_1samp_test(condition1,
n_permutations=500, connectivity=connectivity)
assert_array_equal(out[0], out_connectivity[0])
for a, b in zip(out_connectivity[1], out[1]):
assert_true(np.sum(out[0][a]) == np.sum(out[0][b]))
assert_true(np.all(a[b]))
示例9: test_permutation_large_n_samples
def test_permutation_large_n_samples():
"""Test that non-replacement works with large N."""
X = np.random.RandomState(0).randn(72, 1) + 1
for n_samples in (11, 72):
tails = (0, 1) if n_samples <= 20 else (0,)
for tail in tails:
H0 = permutation_cluster_1samp_test(
X[:n_samples], threshold=1e-4, tail=tail)[-1]
assert H0.shape == (1024,)
assert len(np.unique(H0)) >= 1024 - (H0 == 0).sum()
示例10: test_permutation_connectivity_equiv
def test_permutation_connectivity_equiv():
"""Test cluster level permutations with and without connectivity
"""
try:
try:
from sklearn.feature_extraction.image import grid_to_graph
except ImportError:
from scikits.learn.feature_extraction.image import grid_to_graph
except ImportError:
return
rng = np.random.RandomState(0)
# subjects, time points, spatial points
X = rng.randn(7, 2, 10)
# add some significant points
X[:, 0:2, 0:2] += 10 # span two time points and two spatial points
X[:, 1, 5:9] += 10 # span four time points
max_steps = [1, 1, 1, 2]
# This will run full algorithm in two ways, then the ST-algorithm in 2 ways
# All of these should give the same results
conns = [None, grid_to_graph(2, 10),
grid_to_graph(1, 10), grid_to_graph(1, 10)]
stat_map = None
thresholds = [2, dict(start=0.5, step=0.5)]
sig_counts = [2, 8]
sdps = [0, 0.05, 0.05]
ots = ['mask', 'mask', 'indices']
for thresh, count in zip(thresholds, sig_counts):
cs = None
ps = None
for max_step, conn in zip(max_steps, conns):
for stat_fun in [ttest_1samp_no_p,
partial(ttest_1samp_no_p, sigma=1e-3)]:
for sdp, ot in zip(sdps, ots):
t, clusters, p, H0 = \
permutation_cluster_1samp_test(X,
threshold=thresh,
connectivity=conn,
n_jobs=2,
max_step=max_step,
stat_fun=stat_fun,
step_down_p=sdp,
out_type=ot)
# make sure our output datatype is correct
if ot == 'mask':
assert_true(isinstance(clusters[0], np.ndarray))
assert_true(clusters[0].dtype == bool)
assert_array_equal(clusters[0].shape, X.shape[1:])
else: # ot == 'indices'
assert_true(isinstance(clusters[0], tuple))
# make sure all comparisons were done; for TFCE, no perm
# should come up empty
if count == 8:
assert_true(not np.any(H0 == 0))
inds = np.where(p < 0.05)[0]
assert_true(len(inds) == count)
this_cs = [clusters[ii] for ii in inds]
this_ps = p[inds]
this_stat_map = np.zeros((2, 10), dtype=bool)
for ci, c in enumerate(this_cs):
if isinstance(c, tuple):
this_c = np.zeros((2, 10), bool)
for x, y in zip(c[0], c[1]):
this_stat_map[x, y] = True
this_c[x, y] = True
this_cs[ci] = this_c
c = this_c
this_stat_map[c] = True
if cs is None:
ps = this_ps
cs = this_cs
if stat_map is None:
stat_map = this_stat_map
assert_array_equal(ps, this_ps)
assert_true(len(cs) == len(this_cs))
for c1, c2 in zip(cs, this_cs):
assert_array_equal(c1, c2)
assert_array_equal(stat_map, this_stat_map)
示例11: test_permutation_connectivity_equiv
def test_permutation_connectivity_equiv():
"""Test cluster level permutations with and without connectivity."""
try:
try:
from sklearn.feature_extraction.image import grid_to_graph
except ImportError:
from scikits.learn.feature_extraction.image import grid_to_graph
except ImportError:
return
rng = np.random.RandomState(0)
# subjects, time points, spatial points
n_time = 2
n_space = 4
X = rng.randn(6, n_time, n_space)
# add some significant points
X[:, :, 0:2] += 10 # span two time points and two spatial points
X[:, 1, 3] += 20 # span one time point
max_steps = [1, 1, 1, 2, 1]
# This will run full algorithm in two ways, then the ST-algorithm in 2 ways
# All of these should give the same results
conns = [None,
grid_to_graph(n_time, n_space),
grid_to_graph(1, n_space),
grid_to_graph(1, n_space),
None]
stat_map = None
thresholds = [2, 2, 2, 2, dict(start=0.01, step=1.0)]
sig_counts = [2, 2, 2, 2, 5]
stat_fun = partial(ttest_1samp_no_p, sigma=1e-3)
cs = None
ps = None
for thresh, count, max_step, conn in zip(thresholds, sig_counts,
max_steps, conns):
t, clusters, p, H0 = \
permutation_cluster_1samp_test(
X, threshold=thresh, connectivity=conn, n_jobs=2,
max_step=max_step, stat_fun=stat_fun)
# make sure our output datatype is correct
assert_true(isinstance(clusters[0], np.ndarray))
assert_true(clusters[0].dtype == bool)
assert_array_equal(clusters[0].shape, X.shape[1:])
# make sure all comparisons were done; for TFCE, no perm
# should come up empty
inds = np.where(p < 0.05)[0]
assert_equal(len(inds), count)
if isinstance(thresh, dict):
assert_equal(len(clusters), n_time * n_space)
assert_true(np.all(H0 != 0))
continue
this_cs = [clusters[ii] for ii in inds]
this_ps = p[inds]
this_stat_map = np.zeros((n_time, n_space), dtype=bool)
for ci, c in enumerate(this_cs):
if isinstance(c, tuple):
this_c = np.zeros((n_time, n_space), bool)
for x, y in zip(c[0], c[1]):
this_stat_map[x, y] = True
this_c[x, y] = True
this_cs[ci] = this_c
c = this_c
this_stat_map[c] = True
if cs is None:
ps = this_ps
cs = this_cs
if stat_map is None:
stat_map = this_stat_map
assert_array_equal(ps, this_ps)
assert_true(len(cs) == len(this_cs))
for c1, c2 in zip(cs, this_cs):
assert_array_equal(c1, c2)
assert_array_equal(stat_map, this_stat_map)
示例12: test_cluster_permutation_t_test_with_connectivity
def test_cluster_permutation_t_test_with_connectivity():
"""Test cluster level permutations T-test with connectivity matrix."""
try:
try:
from sklearn.feature_extraction.image import grid_to_graph
except ImportError:
from scikits.learn.feature_extraction.image import grid_to_graph
except ImportError:
return
# we don't care about p-values in any of these, so do fewer permutations
out = permutation_cluster_1samp_test(condition1_1d, n_permutations=50)
connectivity = grid_to_graph(1, condition1_1d.shape[1])
out_connectivity = permutation_cluster_1samp_test(condition1_1d,
n_permutations=50, connectivity=connectivity)
assert_array_equal(out[0], out_connectivity[0])
for a, b in zip(out_connectivity[1], out[1]):
assert_true(np.sum(out[0][a]) == np.sum(out[0][b]))
assert_true(np.all(a[b]))
# test spatio-temporal with no time connectivity (repeat spatial pattern)
connectivity_2 = sparse.coo_matrix(
linalg.block_diag(connectivity.asfptype().todense(),
connectivity.asfptype().todense()))
condition1_2 = np.concatenate((condition1_1d,
condition1_1d), axis=1)
out_connectivity_2 = permutation_cluster_1samp_test(condition1_2,
n_permutations=50, connectivity=connectivity_2)
# make sure we were operating on the same values
split = len(out[0])
assert_array_equal(out[0], out_connectivity_2[0][:split])
assert_array_equal(out[0], out_connectivity_2[0][split:])
# make sure we really got 2x the number of original clusters
n_clust_orig = len(out[1])
assert_true(len(out_connectivity_2[1]) == 2 * n_clust_orig)
# Make sure that we got the old ones back
n_pts = condition1_1d.shape[1]
data_1 = set([np.sum(out[0][b[:n_pts]]) for b in out[1]])
data_2 = set([np.sum(out_connectivity_2[0][a[:n_pts]]) for a in
out_connectivity_2[1][:]])
assert_true(len(data_1.intersection(data_2)) == len(data_1))
# now use the other algorithm
condition1_3 = np.reshape(condition1_2, (40, 2, 350))
out_connectivity_3 = spatio_temporal_cluster_1samp_test(
condition1_3, n_permutations=50,
connectivity=connectivity, max_step=0,
threshold=1.67, check_disjoint=True)
# make sure we were operating on the same values
split = len(out[0])
assert_array_equal(out[0], out_connectivity_3[0][0])
assert_array_equal(out[0], out_connectivity_3[0][1])
# make sure we really got 2x the number of original clusters
assert_true(len(out_connectivity_3[1]) == 2 * n_clust_orig)
# Make sure that we got the old ones back
data_1 = set([np.sum(out[0][b[:n_pts]]) for b in out[1]])
data_2 = set([np.sum(out_connectivity_3[0][a[0], a[1]]) for a in
out_connectivity_3[1]])
assert_true(len(data_1.intersection(data_2)) == len(data_1))
# test new versus old method old method
out_connectivity_4 = spatio_temporal_cluster_1samp_test(
condition1_3, n_permutations=50,
connectivity=connectivity, max_step=2,
threshold=1.67)
out_connectivity_5 = spatio_temporal_cluster_1samp_test(
condition1_3, n_permutations=50,
connectivity=connectivity, max_step=1,
threshold=1.67)
# clutsers could be in a different order
sums_4 = [np.sum(out_connectivity_4[0][a]) for a in out_connectivity_4[1]]
sums_5 = [np.sum(out_connectivity_4[0][a]) for a in out_connectivity_5[1]]
sums_4 = np.sort(sums_4)
sums_5 = np.sort(sums_5)
assert_array_almost_equal(sums_4, sums_5)