当前位置: 首页>>代码示例>>Python>>正文


Python dataset.vstack函数代码示例

本文整理汇总了Python中mvpa2.base.dataset.vstack函数的典型用法代码示例。如果您正苦于以下问题:Python vstack函数的具体用法?Python vstack怎么用?Python vstack使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了vstack函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_mergeds

def test_mergeds():
    data0 = Dataset.from_wizard(np.ones((5, 5)), targets=1)
    data0.fa['one'] = np.ones(5)
    data1 = Dataset.from_wizard(np.ones((5, 5)), targets=1, chunks=1)
    data1.fa['one'] = np.zeros(5)
    data2 = Dataset.from_wizard(np.ones((3, 5)), targets=2, chunks=1)
    data3 = Dataset.from_wizard(np.ones((4, 5)), targets=2)
    data4 = Dataset.from_wizard(np.ones((2, 5)), targets=3, chunks=2)
    data4.fa['test'] = np.arange(5)

    merged = vstack((data1.copy(), data2))

    ok_(merged.nfeatures == 5)
    l12 = [1] * 5 + [2] * 3
    l1 = [1] * 8
    ok_((merged.targets == l12).all())
    ok_((merged.chunks == l1).all())

    data_append = vstack((data1.copy(), data2))

    ok_(data_append.nfeatures == 5)
    ok_((data_append.targets == l12).all())
    ok_((data_append.chunks == l1).all())

    #
    # vstacking
    #
    if __debug__:
        # we need the same samples attributes in both datasets
        assert_raises(ValueError, vstack, (data2, data3))

        # tested only in __debug__
        assert_raises(ValueError, vstack, (data0, data1, data2, data3))
    datasets = (data1, data2, data4)
    merged = vstack(datasets)
    assert_equal(merged.shape,
                 (np.sum([len(ds) for ds in datasets]), data1.nfeatures))
    assert_true('test' in merged.fa)
    assert_array_equal(merged.sa.targets, [1] * 5 + [2] * 3 + [3] * 2)

    #
    # hstacking
    #
    assert_raises(ValueError, hstack, datasets)
    datasets = (data0, data1)
    merged = hstack(datasets)
    assert_equal(merged.shape,
                 (len(data1), np.sum([ds.nfeatures for ds in datasets])))
    assert_true('chunks' in merged.sa)
    assert_array_equal(merged.fa.one, [1] * 5 + [0] * 5)
开发者ID:Arthurkorn,项目名称:PyMVPA,代码行数:50,代码来源:test_datasetng.py

示例2: _forward_dataset

    def _forward_dataset(self, ds):
        sliced_ds = [ds[sample_ids, feature_ids]
                            for sample_ids, feature_ids in
                                    zip(*(self._slice_sample_ids,
                                    self._slice_feature_ids))]

        return vstack(sliced_ds, True)
开发者ID:Arthurkorn,项目名称:PyMVPA,代码行数:7,代码来源:base.py

示例3: _call

    def _call(self, dataset):
        """Computes featurewise f-scores using compound comparisons."""

        targets_sa = dataset.sa[self.get_space()]
        orig_labels = targets_sa.value
        labels = orig_labels.copy()

        # Lets create a very shallow copy of a dataset with just
        # samples and targets_attr
        dataset_mod = Dataset(dataset.samples,
                              sa={self.get_space() : labels})
        results = []
        for ul in targets_sa.unique:
            labels[orig_labels == ul] = 1
            labels[orig_labels != ul] = 2
            f_ds = OneWayAnova._call(self, dataset_mod)
            if 'fprob' in f_ds.fa:
                # rename the fprob attribute to something label specific
                # to survive final aggregation stage
                f_ds.fa['fprob_' + str(ul)] = f_ds.fa.fprob
                del f_ds.fa['fprob']
            results.append(f_ds)

        results = vstack(results)
        results.sa[self.get_space()] = targets_sa.unique
        return results
开发者ID:PepGardiola,项目名称:PyMVPA,代码行数:26,代码来源:anova.py

示例4: test_usecase_concordancesl

    def test_usecase_concordancesl(self):
        import numpy as np
        from mvpa2.base.dataset import vstack
        from mvpa2.mappers.fx import mean_sample

        # Take our sample 3d dataset
        ds1 = datasets['3dsmall'].copy(deep=True)
        ds1.fa['voxel_indices'] = ds1.fa.myspace
        ds1.sa['subject'] = [1]  # not really necessary -- but let's for clarity
        ds1 = mean_sample()(ds1) # so we get just a single representative sample

        def corr12(ds):
            corr = np.corrcoef(ds.samples)
            assert(corr.shape == (2, 2)) # for paranoid ones
            return corr[0, 1]

        for nsc, thr, thr_mean in (
            (0, 1.0, 1.0),
            (0.1, 0.3, 0.8)):   # just a bit of noise
            ds2 = ds1.copy(deep=True)    # make a copy for the 2nd subject
            ds2.sa['subject'] = [2]
            ds2.samples += nsc * np.random.normal(size=ds1.shape)

            # make sure that both have the same voxel indices
            assert(np.all(ds1.fa.voxel_indices == ds2.fa.voxel_indices))
            ds_both = vstack((ds1, ds2))# join 2 images into a single dataset
                                        # with .sa.subject distinguishing both

            sl = sphere_searchlight(corr12, radius=2)
            slmap = sl(ds_both)
            ok_(np.all(slmap.samples >= thr))
            ok_(np.mean(slmap.samples) >= thr)
开发者ID:kirty,项目名称:PyMVPA,代码行数:32,代码来源:test_searchlight.py

示例5: _call

    def _call(self, dataset):
        sensitivities = []
        for ind, analyzer in enumerate(self.__analyzers):
            if __debug__:
                debug("SA", "Computing sensitivity for SA#%d:%s" %
                      (ind, analyzer))
            sensitivity = analyzer(dataset)
            sensitivities.append(sensitivity)

        if __debug__:
            debug("SA",
                  "Returning %d sensitivities from %s" %
                  (len(sensitivities), self.__class__.__name__))

        sa_attr = self._sa_attr
        if isinstance(sensitivities[0], AttrDataset):
            smerged = []
            for i, s in enumerate(sensitivities):
                s.sa[sa_attr] = np.repeat(i, len(s))
                smerged.append(s)
            sensitivities = vstack(smerged)
        else:
            sensitivities = \
                Dataset(sensitivities,
                        sa={sa_attr: np.arange(len(sensitivities))})

        self.ca.sensitivities = sensitivities

        return sensitivities
开发者ID:thomastweets,项目名称:PyMVPA,代码行数:29,代码来源:base.py

示例6: loadsubdata

def loadsubdata(p, s, m=None, c=None):
    from mvpa2.base import dataset
    fds = {}
    for sub in s.keys():
        print 'loading ' + sub
        rds = [loadrundata(p, sub, r, m, c) for r in s[sub]]
        fds[sub] = dataset.vstack(rds, a=0)
    return fds
开发者ID:njchiang,项目名称:LanguageMVPA,代码行数:8,代码来源:lmvpautils.py

示例7: _forward_data

    def _forward_data(self, data):
        sliced_data = [np.vstack(data[sample_id, feature_ids]
                         for sample_id in sample_ids)
                                for sample_ids, feature_ids in
                                    zip(*(self._slice_sample_ids,
                                    self._slice_feature_ids))]

        return vstack(sliced_data)
开发者ID:Arthurkorn,项目名称:PyMVPA,代码行数:8,代码来源:base.py

示例8: test_ex_from_masked

def test_ex_from_masked():
    ds = Dataset.from_wizard(samples=np.atleast_2d(np.arange(5)).view(myarray),
                             targets=1, chunks=1)
    # simple sequence has to be a single pattern
    assert_equal(ds.nsamples, 1)
    # array subclass survives
    ok_(isinstance(ds.samples, myarray))

    # check correct pattern layout (1x5)
    assert_array_equal(ds.samples, [[0, 1, 2, 3, 4]])

    # check for single label and origin
    assert_array_equal(ds.targets, [1])
    assert_array_equal(ds.chunks, [1])

    # now try adding pattern with wrong shape
    assert_raises(ValueError, vstack,
                  (ds, Dataset.from_wizard(np.ones((2, 3)), targets=1, chunks=1)))

    # now add two real patterns
    ds = vstack((ds, Dataset.from_wizard(np.random.standard_normal((2, 5)),
                                         targets=2, chunks=2)))
    assert_equal(ds.nsamples, 3)
    assert_array_equal(ds.targets, [1, 2, 2])
    assert_array_equal(ds.chunks, [1, 2, 2])

    # test unique class labels
    ds = vstack((ds, Dataset.from_wizard(np.random.standard_normal((2, 5)),
                                         targets=3, chunks=5)))
    assert_array_equal(ds.sa['targets'].unique, [1, 2, 3])

    # test wrong attributes length
    assert_raises(ValueError, Dataset.from_wizard,
                  np.random.standard_normal((4, 2, 3, 4)), targets=[1, 2, 3],
                  chunks=2)
    assert_raises(ValueError, Dataset.from_wizard,
                  np.random.standard_normal((4, 2, 3, 4)), targets=[1, 2, 3, 4],
                  chunks=[2, 2, 2])

    # no test one that is using from_masked
    ds = datasets['3dlarge']
    for a in ds.sa:
        assert_equal(len(ds.sa[a].value), len(ds))
    for a in ds.fa:
        assert_equal(len(ds.fa[a].value), ds.nfeatures)
开发者ID:Arthurkorn,项目名称:PyMVPA,代码行数:45,代码来源:test_datasetng.py

示例9: arg2ds

def arg2ds(sources):
    """Convert a sequence of dataset sources into a dataset.

    This function would be used to used to convert a single --input
    multidata specification into a dataset. For multiple --input
    arguments execute this function in a loop.
    """
    from mvpa2.base.dataset import vstack
    return vstack(hdf2ds(sources))
开发者ID:neurosbh,项目名称:PyMVPA,代码行数:9,代码来源:helpers.py

示例10: test_ifs

    def test_ifs(self, svm):

        # measure for feature selection criterion and performance assesment
        # use the SAME clf!
        errorfx = mean_mismatch_error
        fmeasure = CrossValidation(svm, NFoldPartitioner(), postproc=mean_sample())
        pmeasure = ProxyMeasure(svm, postproc=BinaryFxNode(errorfx, 'targets'))

        ifs = IFS(fmeasure,
                  pmeasure,
                  Splitter('purpose', attr_values=['train', 'test']),
                  fselector=
                    # go for lower tail selection as data_measure will return
                    # errors -> low is good
                    FixedNElementTailSelector(1, tail='lower', mode='select'),
                  )
        wdata = self.get_data()
        wdata.sa['purpose'] = np.repeat('train', len(wdata))
        tdata = self.get_data()
        tdata.sa['purpose'] = np.repeat('test', len(tdata))
        ds = vstack((wdata, tdata))
        orig_nfeatures = ds.nfeatures

        ifs.train(ds)
        resds = ifs(ds)

        # fail if orig datasets are changed
        self.assertTrue(ds.nfeatures == orig_nfeatures)

        # check that the features set with the least error is selected
        self.assertTrue(len(ifs.ca.errors))
        e = np.array(ifs.ca.errors)
        self.assertTrue(resds.nfeatures == e.argmin() + 1)


        # repeat with dataset where selection order is known
        wsignal = datasets['dumb2'].copy()
        wsignal.sa['purpose'] = np.repeat('train', len(wsignal))
        tsignal = datasets['dumb2'].copy()
        tsignal.sa['purpose'] = np.repeat('test', len(tsignal))
        signal = vstack((wsignal, tsignal))
        ifs.train(signal)
        resds = ifs(signal)
        self.assertTrue((resds.samples[:,0] == signal.samples[:,0]).all())
开发者ID:Anhmike,项目名称:PyMVPA,代码行数:44,代码来源:test_ifs.py

示例11: perm_hist

def perm_hist(subj):
	conf = AnalysisConfiguration()
	data_dir = os.environ.get('DATA_DIR') or '/home/user/data'
	sub_dir = _opj(data_dir,conf.study_name,'sub{:0>3d}'.format(subj))
	directory = _opj(data_dir,'LP/sub{:0>3d}/results/'.format(subj))
	print conf.dir_name()
	for pair in conf.conditions_to_compare:
			#print _opj(directory,conf.dir_name(),'{}*{}{}*.p'.format(conf.mask_name,pair[0],pair[1]))
			files = sorted(glob(_opj(directory,conf.dir_name(),'{}*{}{}*.p'.format(conf.mask_name,pair[0],pair[1]))))
			plt.figure()
			plt.subplot(211)
			plt.title('sub{:0>3d}-{}{}'.format(subj,pair[0],pair[1]))
			print pair, " ", len(files)
			all_maps = []
			for f in files[:-1]:
				f_h = file(f,'r')
				m = pickle.load(f_h)
				all_maps.append(m)
				if 'perm' in f:
					color = 'black'
					line_width = 1
				else:
					color = 'crimson'
					line_width = 2
				plt.hist(np.transpose(m),bins=20,histtype='step',color=[color], lw = line_width)
			perms = vstack(all_maps)
			real_f = files[-1]
			f_h = file(real_f,'r')
			real_map = pickle.load(f_h)
			color = 'crimson'
			line_width = 2
			plt.hist(np.transpose(real_map),bins=20,histtype='step',color=[color], lw = line_width)
			percentiles = np.zeros((1,len(real_map.samples[0])))
			for i,vox in enumerate(real_map.samples[0]):
			    percentiles[0,i]=percentileofscore(perms[:,i].samples.flat,vox)
			plt.subplot(212)
			print len(percentiles[0])
			plt.hist(percentiles[0],bins=20,histtype='step')
			real_map.samples=percentiles
			nii = real_f.replace("_sl_map.p", "-acc.nii.gz")
			nii_file = nib.load(nii)
			perc_results = map2nifti(real_map, imghdr=nii_file.header)
			perc_nii_filename =real_f.replace("_sl_map.p", "-percentiles_sub{:0>3d}.nii.gz".format(subj))
			perc_results.to_filename(perc_nii_filename)
			thr_prc_filename = perc_nii_filename.replace(".nii.gz","_p0.01.nii.gz")
			thr = fsl.maths.Threshold(in_file=perc_nii_filename, thresh=100,
						  out_file=thr_prc_filename)
			thr.run()
			mni_thr_filename = thr_prc_filename.replace(".nii.gz","_mni.nii.gz")
			apply_warp(sub_dir,thr_prc_filename, mni_thr_filename)

			
	plt.show()
	#plt.savefig('/tmp/sub{:0>3d}_{}{}'.format(subj,pair[0],pair[1]))
	raw_input()
开发者ID:ronimaimon,项目名称:mvpa_analysis,代码行数:55,代码来源:perm_hist.py

示例12: test_mergeds2

def test_mergeds2():
    """Test composition of new datasets by addition of existing ones
    """
    data = dataset_wizard([range(5)], targets=1, chunks=1)

    assert_array_equal(data.UT, [1])

    # simple sequence has to be a single pattern
    assert_equal(data.nsamples, 1)
    # check correct pattern layout (1x5)
    assert_array_equal(data.samples, [[0, 1, 2, 3, 4]])

    # check for single labels and origin
    assert_array_equal(data.targets, [1])
    assert_array_equal(data.chunks, [1])

    # now try adding pattern with wrong shape
    assert_raises(ValueError,
                  vstack,
                  (data, dataset_wizard(np.ones((2, 3)), targets=1, chunks=1)))

    # now add two real patterns
    dss = datasets['uni2large'].samples
    data = vstack((data, dataset_wizard(dss[:2, :5], targets=2, chunks=2)))
    assert_equal(data.nfeatures, 5)
    assert_array_equal(data.targets, [1, 2, 2])
    assert_array_equal(data.chunks, [1, 2, 2])

    # test automatic origins
    data = vstack((data, (dataset_wizard(dss[3:5, :5], targets=3, chunks=[0, 1]))))
    assert_array_equal(data.chunks, [1, 2, 2, 0, 1])

    # test unique class labels
    assert_array_equal(data.UT, [1, 2, 3])

    # test wrong label length
    assert_raises(ValueError, dataset_wizard, dss[:4, :5], targets=[ 1, 2, 3 ],
                                         chunks=2)

    # test wrong origin length
    assert_raises(ValueError, dataset_wizard, dss[:4, :5],
                  targets=[ 1, 2, 3, 4 ], chunks=[ 2, 2, 2 ])
开发者ID:Arthurkorn,项目名称:PyMVPA,代码行数:42,代码来源:test_datasetng.py

示例13: load_subjectwise_ds

def load_subjectwise_ds(path, 
                       subjects, 
                       conf_file, 
                       task, 
                       extra_sa=None,  
                       **kwargs):
    """
    extra_sa: dict or None, sample attributes added to the final dataset, they should be
    the same length as the subjects.
    
    subject: either a list of subjects or a csv file
    
    """
    
    conf = read_configuration(os.path.join(path, conf_file), task)
           
    conf.update(kwargs)
    logger.debug(conf)
    
    data_path = conf['data_path']
    
    
    if isinstance(subjects, str):        
        subjects, extra_sa = load_subject_file(subjects)
        
    
    logger.info('Merging subjects from '+data_path)
    
    for i, subj in enumerate(subjects):
        
        ds = load_dataset(data_path, subj, task, **conf)
        
        ds = detrend_dataset(ds, task, **conf)
        ds = normalize_dataset(ds, **conf)
        
        # add extra samples
        if extra_sa != None:
            for k, v in extra_sa.iteritems():
                if len(v) == len(subjects):
                    ds.sa[k] = [v[i] for _ in range(ds.samples.shape[0])]
        
        
        # First subject
        if i == 0:
            ds_merged = ds.copy()
        else:
            ds_merged = vstack((ds_merged, ds))
            ds_merged.a.update(ds.a)
            
        
        del ds

    return ds_merged, ['group'], conf
开发者ID:robbisg,项目名称:mvpa_itab_wu,代码行数:53,代码来源:base.py

示例14: load_subject_ds

def load_subject_ds(conf_file, 
                    task, 
                    extra_sa=None,
                    prepro=StandardPreprocessingPipeline(), 
                    **kwargs):
    
    """
    This is identical to load_subjectwise_ds but we can
    specify a preprocessing pipeline to manage data
    
    """
    
    # TODO: conf file should include the full path
    conf = read_configuration(conf_file, task)
           
    conf.update(kwargs)
    logger.debug(conf)
    
    data_path = conf['data_path']
    
    # Subject file should be included in configuration
    subject_file = conf['subjects']
    subjects, extra_sa = load_subject_file(subject_file)
        
    logger.info('Merging %s subjects from %s' % (str(len(subjects)), data_path))
    
    for i, subj in enumerate(subjects):
        
        ds = load_dataset(data_path, subj, task, **conf)
        
        if ds == None:
            continue
        
        ds = prepro.transform(ds)
        
        # add extra samples
        if extra_sa != None:
            for k, v in extra_sa.iteritems():
                if len(v) == len(subjects):
                    ds.sa[k] = [v[i] for _ in range(ds.samples.shape[0])]
        
        
        # First subject
        if i == 0:
            ds_merged = ds.copy()
        else:
            ds_merged = vstack((ds_merged, ds))
            ds_merged.a.update(ds.a)
            
        
        del ds

    return ds_merged
开发者ID:robbisg,项目名称:mvpa_itab_wu,代码行数:53,代码来源:base.py

示例15: test_stack_add_dataset_attributes

def test_stack_add_dataset_attributes():
    data0 = Dataset.from_wizard(np.ones((5, 5)), targets=1)
    data0.a['one'] = np.ones(2)
    data0.a['two'] = 2
    data0.a['three'] = 'three'
    data0.a['common'] = range(10)
    data0.a['array'] = np.arange(10)
    data1 = Dataset.from_wizard(np.ones((5, 5)), targets=1)
    data1.a['one'] = np.ones(3)
    data1.a['two'] = 3
    data1.a['four'] = 'four'
    data1.a['common'] = range(10)
    data1.a['array'] = np.arange(10)


    vstacker = lambda x: vstack((data0, data1), a=x)
    hstacker = lambda x: hstack((data0, data1), a=x)

    add_params = (1, None, 'unique', 'uniques', 'all', 'drop_nonunique')

    for stacker in (vstacker, hstacker):
        for add_param in add_params:
            if add_param == 'unique':
                assert_raises(DatasetError, stacker, add_param)
                continue

            r = stacker(add_param)

            if add_param == 1:
                assert_array_equal(data1.a.one, r.a.one)
                assert_equal(r.a.two, 3)
                assert_equal(r.a.four, 'four')
                assert_true('three' not in r.a.keys())
                assert_true('array' in r.a.keys())
            elif add_param == 'uniques':
                assert_equal(set(r.a.keys()),
                             set(['one', 'two', 'three',
                                  'four', 'common', 'array']))
                assert_equal(r.a.two, (2, 3))
                assert_equal(r.a.four, ('four',))
            elif add_param == 'all':
                assert_equal(set(r.a.keys()),
                             set(['one', 'two', 'three',
                                  'four', 'common', 'array']))
                assert_equal(r.a.two, (2, 3))
                assert_equal(r.a.three, ('three', None))
            elif add_param == 'drop_nonunique':
                assert_equal(set(r.a.keys()),
                             set(['common', 'three', 'four', 'array']))
                assert_equal(r.a.three, 'three')
                assert_equal(r.a.four, 'four')
                assert_equal(r.a.common, range(10))
                assert_array_equal(r.a.array, np.arange(10))
开发者ID:andreirusu,项目名称:PyMVPA,代码行数:53,代码来源:test_datasetng.py


注:本文中的mvpa2.base.dataset.vstack函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。