本文整理汇总了Python中mvpa2.datasets.base.Dataset.samples方法的典型用法代码示例。如果您正苦于以下问题:Python Dataset.samples方法的具体用法?Python Dataset.samples怎么用?Python Dataset.samples使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mvpa2.datasets.base.Dataset
的用法示例。
在下文中一共展示了Dataset.samples方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _forward_dataset
# 需要导入模块: from mvpa2.datasets.base import Dataset [as 别名]
# 或者: from mvpa2.datasets.base.Dataset import samples [as 别名]
def _forward_dataset(self, ds):
chunks_attr = self.__chunks_attr
mds = Dataset([])
mds.a = ds.a
# mds.sa =ds.sa
# mds.fa =ds.fa
if chunks_attr is None:
# global kmeans
mds.samples = self._kmeans(ds.samples).labels_
print max(mds.samples)
else:
# per chunk kmeans
for c in ds.sa[chunks_attr].unique:
slicer = np.where(ds.sa[chunks_attr].value == c)[0]
mds.samples = ds.samples[0,:]
mds.samples[slicer] = self._kmeans(ds.samples[slicer]).labels_
return mds
示例2: _forward_dataset
# 需要导入模块: from mvpa2.datasets.base import Dataset [as 别名]
# 或者: from mvpa2.datasets.base.Dataset import samples [as 别名]
def _forward_dataset(self, ds):
out_ds = Dataset([])
out_ds.a = ds.a
pdb.set_trace()
iv = np.nonzero(ds.samples)[0]
coords = ds.sa.values()[0][iv]
out_ds.fa = coords
dim = ds.a.voxel_dim
nbdim = self.__neighbor_shape.nbdim
nbsize = self.__neighbor_shape.nbsize
shape_type = self.__neighbor_shape.shape_type
volnb = volneighbors(coords, dim, nbdim, nbsize, shape_type)
distmsk = volnb.compute_offsets()
if self.__outsparse == True:
out_ds.samples = distmask
elif self.__outsparse == False:
distmask = distmask.todense()
out_ds.samples = distmask
else:
raise RuntimeError('%outsparse should be True or False.')
return out_ds
示例3: test_pcamapper
# 需要导入模块: from mvpa2.datasets.base import Dataset [as 别名]
# 或者: from mvpa2.datasets.base.Dataset import samples [as 别名]
def test_pcamapper():
# data: 40 sample feature line in 20d space (40x20; samples x features)
ndlin = Dataset(np.concatenate([np.arange(40)
for i in range(20)]).reshape(20,-1).T)
pm = PCAMapper()
# train PCA
assert_raises(mdp.NodeException, pm.train, ndlin)
ndlin.samples = ndlin.samples.astype('float')
ndlin_noise = ndlin.copy()
ndlin_noise.samples += np.random.random(size=ndlin.samples.shape)
# we have no variance for more than one PCA component, hence just one
# actual non-zero eigenvalue
assert_raises(mdp.NodeException, pm.train, ndlin)
pm.train(ndlin_noise)
assert_equal(pm.proj.shape, (20, 20))
# now project data into PCA space
p = pm.forward(ndlin.samples)
assert_equal(p.shape, (40, 20))
# check that the mapped data can be fully recovered by 'reverse()'
assert_array_almost_equal(pm.reverse(p), ndlin)
示例4: _forward_dataset
# 需要导入模块: from mvpa2.datasets.base import Dataset [as 别名]
# 或者: from mvpa2.datasets.base.Dataset import samples [as 别名]
def _forward_dataset(self, ds):
mds = Dataset([])
mds.a = ds.a
vectordist = self._fdistance(ds.samples)
mds.samples = squareform(vectordist, force='no', checks=True)
return mds
示例5: main
# 需要导入模块: from mvpa2.datasets.base import Dataset [as 别名]
# 或者: from mvpa2.datasets.base.Dataset import samples [as 别名]
def main():
'''
Spectral clustering...
'''
st = time.time()
tmpset = Dataset([])
# hfilename = "/nfs/j3/userhome/dangxiaobin/workingdir/cutROI/%s/fdt_matrix2_targets_sc.T.hdf5"%(id)
hfilename = 'fdt_matrix2.T.hdf5'
print hfilename
#load connectivity profile of seed mask voxels
conn = open_conn_mat(hfilename)
tmpset.a = conn.a
print conn.shape,conn.a
#remove some features
mask = create_mask(conn.samples,0.5,1)
# print mask,mask.shape
conn_m = mask_feature(conn.samples,mask)
# print conn_m
map = conn_m.T
print "map:"
print map.shape,map.max(),map.min()
voxel = np.array(conn.fa.values())
print voxel[0]
v = voxel[0]
spacedist = ds.cdist(v,v,'euclidean')
print spacedist
"""
similar_mat = create_similarity_mat(map,conn.fa,0.1,2)
X = np.array(similar_mat)
print "similarity matrix: shape:",X.shape
print X
"""
corr = np.corrcoef(map)
corr = np.abs(corr)
corr = 0.1*corr + 0.9/(spacedist+1)
print "Elaspsed time: ", time.time() - st
print corr.shape,corr
plt.imshow(corr,interpolation='nearest',cmap=cm.jet)
cb = plt.colorbar()
pl.xticks(())
pl.yticks(())
pl.show()
cnum = 3
near = 100
sc = SpectralClustering(cnum,'arpack',None,100,1,'precomputed',near,None,True)
#sc.fit(map)
sc.fit_predict(corr)
'''
cnum = 3
near = 100
sc = SpectralClustering(cnum,'arpack',None,100,1,'nearest_neighbors',near,None,True)
sc.fit(map)
# sc.fit_predict(X)
# param = sc.get_params(deep=True)
'''
tmpset.samples = sc.labels_+1
# print sc.affinity_matrix_
#print list(sc.labels_)
print "Elaspsed time: ", time.time() - st
print "Number of voxels: ", sc.labels_.size
print "Number of clusters: ", np.unique(sc.labels_).size
result = map2nifti(tmpset)
result.to_filename("fg_parcel_S0006.nii.gz")
print ".....The end........"
示例6: spectral_seg
# 需要导入模块: from mvpa2.datasets.base import Dataset [as 别名]
# 或者: from mvpa2.datasets.base.Dataset import samples [as 别名]
def spectral_seg(hfilename,outf):
'''
Spectral clustering...
'''
tmpset = Dataset([])
#pdb.set_trace()
print "hdf name:",hfilename
st = time.time()
###1.load connectivity profile of seed mask voxels
conn = h5load(hfilename)
tmpset.a = conn.a
print "connection matrix shape:"
print conn.shape
###2.features select
mask = create_mask(conn.samples,5)
conn_m = conn.samples[mask]
map = conn_m.T
print "masked conn matrix:"
print map.shape,map.max(),map.min()
###3.average the connection profile.
temp = np.zeros(map.shape)
voxel = np.array(conn.fa.values())
v = voxel[0]
v = v.tolist()
shape = [256,256,256]
i = 0
for coor in v:
mean_f = map[i]
#print mean_f.shape
#plt.plot(mean_f)
#plt.show()
neigh =get_neighbors(coor,2,shape)
#print "neigh:",neigh
count = 1
for n in neigh:
if n in v:
mean_f = (mean_f*count + map[v.index(n)])/(count+1)
count+=1
temp[i] = mean_f
i+=1
#sys.exit(0)
map = temp
print "average connection matrix"
###4.spacial distance
spacedist = ds.cdist(v,v,'euclidean')
#print spacedist
###5.correlation matrix
corr = np.corrcoef(map)
corr = np.abs(corr)
###6.mix similariry matrix.
corr = 0.7*corr + 0.3/(spacedist+1)
#plt.imshow(corr,interpolation='nearest',cmap=cm.jet)
#cb = plt.colorbar()
#pl.xticks(())
#pl.yticks(())
#pl.show()
print "mix up the corr and spacial matrix"
#sys.exit(0)
###7.spectral segmentation
print "do segmentation"
cnum = 3
near = 100
sc = SpectralClustering(cnum,'arpack',None,100,1,'precomputed',near,None,True)
sc.fit_predict(corr)
tmpset.samples = sc.labels_+1
print "Number of voxels: ", sc.labels_.size
print "Number of clusters: ", np.unique(sc.labels_).size
print "Elapsed time: ", time.time() - st
###8.save the segmentation result.
print "save the result to xxx_parcel.nii.gz"
result = map2nifti(tmpset)
result.to_filename(outf)
print ".....Segment end........"
return True