本文整理汇总了Python中mvpa2.datasets.base.Dataset.fa['node_indices']方法的典型用法代码示例。如果您正苦于以下问题:Python Dataset.fa['node_indices']方法的具体用法?Python Dataset.fa['node_indices']怎么用?Python Dataset.fa['node_indices']使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mvpa2.datasets.base.Dataset
的用法示例。
在下文中一共展示了Dataset.fa['node_indices']方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _get_test_dataset
# 需要导入模块: from mvpa2.datasets.base import Dataset [as 别名]
# 或者: from mvpa2.datasets.base.Dataset import fa['node_indices'] [as 别名]
def _get_test_dataset(include_nodes=True):
# returns test dataset matching the contents of _get_test_sample_node_data
samples, nodes, _ = _get_test_sample_node_data()
ds = Dataset(np.asarray(samples))
if include_nodes:
ds.fa['node_indices'] = np.asarray(nodes)
nsamples = ds.nsamples
ds.sa['intents'] = ['NIFTI_INTENT_NONE'] * nsamples
return ds
示例2: test_niml_dset_stack
# 需要导入模块: from mvpa2.datasets.base import Dataset [as 别名]
# 或者: from mvpa2.datasets.base.Dataset import fa['node_indices'] [as 别名]
def test_niml_dset_stack(self):
values = map(lambda x:np.random.normal(size=x), [(10, 3), (10, 4), (10, 5)])
indices = [[0, 1, 2], [3, 2, 1, 0], None]
dsets = []
for v, i in zip(values, indices):
dset = Dataset(v)
if not i is None:
dset.fa['node_indices'] = i
dsets.append(dset)
dset = niml.hstack(dsets)
assert_equal(dset.nfeatures, 12)
assert_equal(dset.nsamples, 10)
indices = np.asarray([ 0, 1, 2, 6, 5, 4, 3, 7, 8, 9, 10, 11])
assert_array_equal(dset.fa['node_indices'], indices)
dset = niml.hstack(dsets, 10)
dset = niml.hstack(dsets, 10) # twice to ensure not overwriting
assert_equal(dset.nfeatures, 30)
indices = np.asarray([0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
13, 12, 11, 10, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29])
assert_array_equal(dset.fa['node_indices'], indices)
assert_true(np.all(dset[:, 4].samples == 0))
assert_array_equal(dset[:, 10:14].samples, dsets[1].samples)
# If not enough space it should raise an error
stacker = (lambda x: niml.hstack(dsets, x))
assert_raises(ValueError, stacker, 2)
# If sparse then with no padding it should fail
dsets[0].fa.node_indices[0] = 3
assert_raises(ValueError, stacker, None)
# Using an illegal node index should raise an error
dsets[1].fa.node_indices[0] = 666
assert_raises(ValueError, stacker, 10)
示例3: get_testdata
# 需要导入模块: from mvpa2.datasets.base import Dataset [as 别名]
# 或者: from mvpa2.datasets.base.Dataset import fa['node_indices'] [as 别名]
def get_testdata(self):
# rs = np.random.RandomState(0)
rs = np.random.RandomState()
nt = 200
n_triangles = 4
ns = 10
nv = n_triangles * 3
vertices = np.zeros((nv, 3)) # 4 separated triangles
faces = []
for i in range(n_triangles):
vertices[i*3] = [i*2, 0, 0]
vertices[i*3+1] = [i*2+1, 1/np.sqrt(3), 0]
vertices[i*3+2] = [i*2+1, -1/np.sqrt(3), 0]
faces.append([i*3, i*3+1, i*3+2])
faces = np.array(faces)
surface = Surface(vertices, faces)
ds_orig = np.zeros((nt, nv))
# add coarse-scale information
for i in range(n_triangles):
ds_orig[:, i*3:(i+1)*3] += rs.normal(size=(nt, 1))
# add fine-scale information
ds_orig += rs.normal(size=(nt, nv))
dss_train, dss_test = [], []
for i in range(ns):
ds = np.zeros_like(ds_orig)
for j in range(n_triangles):
ds[:, j*3:(j+1)*3] = np.dot(ds_orig[:, j*3:(j+1)*3],
get_random_rotation(3))
# special_ortho_group.rvs(3, random_state=rs))
ds = Dataset(ds)
ds.fa['node_indices'] = np.arange(nv)
ds_train, ds_test = ds[:nt//2, :], ds[nt//2:, :]
zscore(ds_train, chunks_attr=None)
zscore(ds_test, chunks_attr=None)
dss_train.append(ds_train)
dss_test.append(ds_test)
return dss_train, dss_test, surface