本文整理汇总了Python中mvpa2.datasets.base.Dataset.a['mapper']方法的典型用法代码示例。如果您正苦于以下问题:Python Dataset.a['mapper']方法的具体用法?Python Dataset.a['mapper']怎么用?Python Dataset.a['mapper']使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mvpa2.datasets.base.Dataset
的用法示例。
在下文中一共展示了Dataset.a['mapper']方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: simple_sim1
# 需要导入模块: from mvpa2.datasets.base import Dataset [as 别名]
# 或者: from mvpa2.datasets.base.Dataset import a['mapper'] [as 别名]
#.........这里部分代码省略.........
# fisher
dissims = np.arctanh(dissims)
# generate target clean "picture"
d = np.asanyarray(dissims[0])
signal_clean = np.zeros(shape + (len(vector_form(d)),))
# generate ground truth for clustering
cluster_truth = np.zeros(shape, dtype='int')
if rois_arrangement == 'circle':
radius = min(shape[:2])/4.
center = np.array((radius*2,) * len(shape)).astype(int)
# arrange at quarter distance from center
for i, dissim in enumerate(dissims):
dissim = vector_form(dissim)
# that is kinda boring -- the same dissimilarity to each
# voxel???
#
# TODO: come up with a better arrangement/idea, e.g. to
# generate an MVPA pattern which would satisfy the
# dissimilarity (not exactly but at least close). That
# would make more sense
roi_center = center.copy()
roi_center[0] += int(radius * np.cos(2*np.pi*i/ndissims))
roi_center[1] += int(radius * np.sin(2*np.pi*i/ndissims))
for coords in roi_neighborhood(roi_center):
acoords = np.asanyarray(coords)
if np.all(acoords >= [0]*len(coords)) and \
np.all(acoords < signal_clean.shape[:len(coords)]):
signal_clean.__setitem__(coords, dissim)
cluster_truth.__setitem__(coords, i+1)
else:
raise ValueError("I know only circle")
# generated randomly and will be mixed into subjects with different weights
# TODO: static across runs within subject?? if so -- would be no different
# from having RSAs?
common_noises = get_intrinsic_noises(
signal_clean.shape,
std=noise_common_std,
sigma=noise_common_smooth,
n=noise_common_n)
assert common_noises[0].ndim == 3, "There should be no time comp"
# Now lets generate per subject and per run data by adding some noise(s)
# all_signals = []
dss = []
for isubject in xrange(nsubjects):
# Interesting noise, simulating some underlying process which has nothing
# to do with original design/similarity but having spatial structure which
# repeats through runs with random weights (consider it to be a principal component)
# generated randomly for each subject separately, but they should have
# common structure across runs
subj_specific_noises = get_intrinsic_noises(signal_clean.shape,
std=noise_subject_std,
sigma=noise_subject_smooth,
n=noise_subject_n)
assert subj_specific_noises[0].ndim == 3, "There should be no time comp"
# subject_signals = []
dss_subject = []
subj_common_noises = [noise * np.random.normal()
for noise in common_noises]
subj_specific_mixins = generate_mixins(nruns)
subj_common_mixins = generate_mixins(nruns)
for run in range(nruns):
signal_run = signal_clean.copy()
for noise in subj_specific_noises:
signal_run += noise * subj_specific_mixins[run]
for noise in subj_common_noises:
signal_run += noise * subj_common_mixins[run]
# generic noise -- no common structure across subjects/runs
signal_run += filter_each_2d(
np.random.normal(size=signal_clean.shape)*noise_independent_std,
noise_independent_smooth)
# go back to correlations with inverse of fisher
signal_run = np.tanh(signal_run)
# rollaxis to bring similarities into leading dimension
ds = Dataset(np.rollaxis(signal_run, 2, 0))
ds.sa['chunks'] = [run]
ds.sa['dissimilarity'] = np.arange(len(dissim)) # Lame one for now
ds_flat = ds.get_mapped(FlattenMapper(shape=ds.shape[1:],
space='pixel_indices'))
dss_subject.append(ds_flat)
#subject_signals.append(signal_run)
#all_signals.append(subject_signals)
ds = dsvstack(dss_subject)
ds.a['mapper'] = dss_subject[0].a.mapper # .a are not transferred by vstack
dss.append(ds)
# Instrumental noise -- the most banal
assert(len(dss) == nsubjects)
assert(len(dss) == nsubjects)
assert(len(dss[0]) == nruns*len(dissim))
return np.tanh(signal_clean), cluster_truth, dss