本文整理汇总了Python中mvpa2.datasets.base.Dataset.sa['chunks']方法的典型用法代码示例。如果您正苦于以下问题:Python Dataset.sa['chunks']方法的具体用法?Python Dataset.sa['chunks']怎么用?Python Dataset.sa['chunks']使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mvpa2.datasets.base.Dataset
的用法示例。
在下文中一共展示了Dataset.sa['chunks']方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create_mvpa_dataset
# 需要导入模块: from mvpa2.datasets.base import Dataset [as 别名]
# 或者: from mvpa2.datasets.base.Dataset import sa['chunks'] [as 别名]
def create_mvpa_dataset(aXData1, aXData2, chunks, labels):
feat_list = []
for x1, x2, chunk in zip(aXData1, aXData2, chunks):
feat_list.append([x1, x2])
data = Dataset(samples=feat_list)
data.sa['id'] = range(0,len(labels))
data.sa['chunks'] = chunks
data.sa['targets'] = labels
return data
示例2: test_featuregroup_mapper
# 需要导入模块: from mvpa2.datasets.base import Dataset [as 别名]
# 或者: from mvpa2.datasets.base.Dataset import sa['chunks'] [as 别名]
def test_featuregroup_mapper():
ds = Dataset(np.arange(24).reshape(3,8))
ds.fa['roi'] = [0, 1] * 4
# just to check
ds.sa['chunks'] = np.arange(3)
# correct results
csamples = [[3, 4], [11, 12], [19, 20]]
croi = [0, 1]
cchunks = np.arange(3)
m = mean_group_feature(['roi'])
mds = m.forward(ds)
assert_equal(mds.shape, (3, 2))
assert_array_equal(mds.samples, csamples)
assert_array_equal(mds.fa.roi, np.unique([0, 1] * 4))
# FAs should simply remain the same
assert_array_equal(mds.sa.chunks, np.arange(3))
# now without grouping
m = mean_feature()
# forwarding just the samples should yield the same result
assert_array_equal(m.forward(ds.samples),
m.forward(ds).samples)
# And when operating on a dataset with >1D samples, then operate
# only across "features", i.e. 1st dimension
ds = Dataset(np.arange(24).reshape(3,2,2,2))
mapped = ds.get_mapped(m)
assert_array_equal(m.forward(ds.samples),
mapped.samples)
assert_array_equal(mapped.samples.shape, (3, 2, 2))
assert_array_equal(mapped.samples, np.mean(ds.samples, axis=1))
# and still could map back? ;) not ATM, so just to ensure consistency
assert_raises(NotImplementedError,
mapped.a.mapper.reverse, mapped.samples)
# but it should also work with standard 2d sample arrays
ds = Dataset(np.arange(24).reshape(3,8))
mapped = ds.get_mapped(m)
assert_array_equal(mapped.samples.shape, (3, 1))
示例3: simple_sim1
# 需要导入模块: from mvpa2.datasets.base import Dataset [as 别名]
# 或者: from mvpa2.datasets.base.Dataset import sa['chunks'] [as 别名]
#.........这里部分代码省略.........
# fisher
dissims = np.arctanh(dissims)
# generate target clean "picture"
d = np.asanyarray(dissims[0])
signal_clean = np.zeros(shape + (len(vector_form(d)),))
# generate ground truth for clustering
cluster_truth = np.zeros(shape, dtype='int')
if rois_arrangement == 'circle':
radius = min(shape[:2])/4.
center = np.array((radius*2,) * len(shape)).astype(int)
# arrange at quarter distance from center
for i, dissim in enumerate(dissims):
dissim = vector_form(dissim)
# that is kinda boring -- the same dissimilarity to each
# voxel???
#
# TODO: come up with a better arrangement/idea, e.g. to
# generate an MVPA pattern which would satisfy the
# dissimilarity (not exactly but at least close). That
# would make more sense
roi_center = center.copy()
roi_center[0] += int(radius * np.cos(2*np.pi*i/ndissims))
roi_center[1] += int(radius * np.sin(2*np.pi*i/ndissims))
for coords in roi_neighborhood(roi_center):
acoords = np.asanyarray(coords)
if np.all(acoords >= [0]*len(coords)) and \
np.all(acoords < signal_clean.shape[:len(coords)]):
signal_clean.__setitem__(coords, dissim)
cluster_truth.__setitem__(coords, i+1)
else:
raise ValueError("I know only circle")
# generated randomly and will be mixed into subjects with different weights
# TODO: static across runs within subject?? if so -- would be no different
# from having RSAs?
common_noises = get_intrinsic_noises(
signal_clean.shape,
std=noise_common_std,
sigma=noise_common_smooth,
n=noise_common_n)
assert common_noises[0].ndim == 3, "There should be no time comp"
# Now lets generate per subject and per run data by adding some noise(s)
# all_signals = []
dss = []
for isubject in xrange(nsubjects):
# Interesting noise, simulating some underlying process which has nothing
# to do with original design/similarity but having spatial structure which
# repeats through runs with random weights (consider it to be a principal component)
# generated randomly for each subject separately, but they should have
# common structure across runs
subj_specific_noises = get_intrinsic_noises(signal_clean.shape,
std=noise_subject_std,
sigma=noise_subject_smooth,
n=noise_subject_n)
assert subj_specific_noises[0].ndim == 3, "There should be no time comp"
# subject_signals = []
dss_subject = []
subj_common_noises = [noise * np.random.normal()
for noise in common_noises]
subj_specific_mixins = generate_mixins(nruns)
subj_common_mixins = generate_mixins(nruns)
for run in range(nruns):
signal_run = signal_clean.copy()
for noise in subj_specific_noises:
signal_run += noise * subj_specific_mixins[run]
for noise in subj_common_noises:
signal_run += noise * subj_common_mixins[run]
# generic noise -- no common structure across subjects/runs
signal_run += filter_each_2d(
np.random.normal(size=signal_clean.shape)*noise_independent_std,
noise_independent_smooth)
# go back to correlations with inverse of fisher
signal_run = np.tanh(signal_run)
# rollaxis to bring similarities into leading dimension
ds = Dataset(np.rollaxis(signal_run, 2, 0))
ds.sa['chunks'] = [run]
ds.sa['dissimilarity'] = np.arange(len(dissim)) # Lame one for now
ds_flat = ds.get_mapped(FlattenMapper(shape=ds.shape[1:],
space='pixel_indices'))
dss_subject.append(ds_flat)
#subject_signals.append(signal_run)
#all_signals.append(subject_signals)
ds = dsvstack(dss_subject)
ds.a['mapper'] = dss_subject[0].a.mapper # .a are not transferred by vstack
dss.append(ds)
# Instrumental noise -- the most banal
assert(len(dss) == nsubjects)
assert(len(dss) == nsubjects)
assert(len(dss[0]) == nruns*len(dissim))
return np.tanh(signal_clean), cluster_truth, dss