当前位置: 首页>>代码示例>>Python>>正文


Python zscore.zscore函数代码示例

本文整理汇总了Python中mvpa2.mappers.zscore.zscore函数的典型用法代码示例。如果您正苦于以下问题:Python zscore函数的具体用法?Python zscore怎么用?Python zscore使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了zscore函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: prepare_subject_for_hyperalignment

def prepare_subject_for_hyperalignment(subject_label, bold_fname, mask_fname, out_dir):
    print('Loading data %s with mask %s' % (bold_fname, mask_fname))
    ds = fmri_dataset(samples=bold_fname, mask=mask_fname)
    zscore(ds, chunks_attr=None)
    out_fname = os.path.join(out_dir, 'sub-%s_data.hdf5' % subject_label)
    print('Saving to %s' % out_fname)
    h5save(out_fname, ds)
开发者ID:BIDS-Apps,项目名称:hyperalignment,代码行数:7,代码来源:run.py

示例2: test_mapper_vs_zscore

def test_mapper_vs_zscore():
    """Test by comparing to results of elderly z-score function
    """
    # data: 40 sample feature line in 20d space (40x20; samples x features)
    dss = [
        dataset_wizard(np.concatenate(
            [np.arange(40) for i in range(20)]).reshape(20,-1).T,
                targets=1, chunks=1),
        ] + datasets.values()

    for ds in dss:
        ds1 = deepcopy(ds)
        ds2 = deepcopy(ds)

        zsm = ZScoreMapper(chunks_attr=None)
        assert_raises(RuntimeError, zsm.forward, ds1.samples)
        idhashes = (idhash(ds1), idhash(ds1.samples))
        zsm.train(ds1)
        idhashes_train = (idhash(ds1), idhash(ds1.samples))
        assert_equal(idhashes, idhashes_train)

        # forward dataset
        ds1z_ds = zsm.forward(ds1)
        idhashes_forwardds = (idhash(ds1), idhash(ds1.samples))
        # must not modify samples in place!
        assert_equal(idhashes, idhashes_forwardds)

        # forward samples explicitly
        ds1z = zsm.forward(ds1.samples)
        idhashes_forward = (idhash(ds1), idhash(ds1.samples))
        assert_equal(idhashes, idhashes_forward)

        zscore(ds2, chunks_attr=None)
        assert_array_almost_equal(ds1z, ds2.samples)
        assert_array_equal(ds1.samples, ds.samples)
开发者ID:Anhmike,项目名称:PyMVPA,代码行数:35,代码来源:test_zscoremapper.py

示例3: _get_seed_means

 def _get_seed_means(self, measure, queryengine, dataset, seed_indices):
     # Computing seed data as mean timeseries in each SL
     seed_data = Searchlight(measure, queryengine=queryengine,
                             nproc=self.params.nproc, roi_ids=seed_indices)
     seed_data = seed_data(dataset)
     zscore(seed_data, chunks_attr=None)
     return seed_data
开发者ID:PyMVPA,项目名称:PyMVPA,代码行数:7,代码来源:connectivity_hyperalignment.py

示例4: test_zscore_withoutchunks

def test_zscore_withoutchunks():
    # just a smoke test to see if all issues of
    # https://github.com/PyMVPA/PyMVPA/issues/26
    # are fixed
    from mvpa2.datasets import Dataset
    ds = Dataset(np.arange(32).reshape((8,-1)), sa=dict(targets=range(8)))
    zscore(ds, chunks_attr=None)
    assert(np.any(ds.samples != np.arange(32).reshape((8,-1))))
    ds_summary = ds.summary()
    assert(ds_summary is not None)
开发者ID:Anhmike,项目名称:PyMVPA,代码行数:10,代码来源:test_zscoremapper.py

示例5: compute_connectivity_profile_similarity

 def compute_connectivity_profile_similarity(self, dss):
     # from scipy.spatial.distance import pdist, squareform
     # conns = [1 - squareform(pdist(ds.samples.T, 'correlation')) for ds in dss]
     conns = [np.corrcoef(ds.samples.T) for ds in dss]
     conn_sum = np.sum(conns, axis=0)
     sim = np.zeros((len(dss), dss[0].shape[1]))
     for i, conn in enumerate(conns):
         conn_diff = conn_sum - conn
         zscore(conn_diff, chunks_attr=None)
         zscore(conn, chunks_attr=None)
         sim[i] = np.mean(conn_diff * conn, axis=0)
     return sim
开发者ID:PyMVPA,项目名称:PyMVPA,代码行数:12,代码来源:test_connectivity_hyperalignment.py

示例6: create_betas_per_trial_with_pymvpa_roni

def create_betas_per_trial_with_pymvpa_roni(study_path, subj, conf, mask_name, flavor, TR):
    dhandle = OpenFMRIDataset(study_path)
    model = 1
    task = 1
    # Do this for other tasks as well. not only the first
    mask_fname = _opj(study_path, "sub{:0>3d}".format(subj), "masks", conf.mvpa_tasks[0], "{}.nii.gz".format(mask_name))
    print mask_fname
    run_datasets = []
    for run_id in dhandle.get_task_bold_run_ids(task)[subj]:
        if type(run_id) == str:
            continue

            # all_events = dhandle.get_bold_run_model(model, subj, run_id)
        all_events = get_bold_run_model(dhandle, 2, subj, run_id)
        run_events = []
        i = 0
        for event in all_events:
            if event["task"] == task:
                event["condition"] = "{}-{}".format(event["condition"], event["id"])
                run_events.append(event)
                i += 1

                # load BOLD data for this run (with masking); add 0-based chunk ID
        run_ds = dhandle.get_bold_run_dataset(subj, task, run_id, flavor=flavor, chunks=run_id - 1, mask=mask_fname)
        # convert event info into a sample attribute and assign as 'targets'
        run_ds.sa.time_coords = run_ds.sa.time_indices * TR
        run_ds.sa["targets"] = events2sample_attr(run_events, run_ds.sa.time_coords, noinfolabel="rest")
        # additional time series preprocessing can go here
        poly_detrend(run_ds, polyord=1, chunks_attr="chunks")
        zscore(run_ds, chunks_attr="chunks", param_est=("targets", ["rest"]), dtype="float32")
        glm_dataset = fit_event_hrf_model(run_ds, run_events, time_attr="time_coords", condition_attr="condition")
        glm_dataset.sa["targets"] = [x[: x.find("-")] for x in glm_dataset.sa.condition]
        glm_dataset.sa["id"] = [x[x.find("-") + 1 :] for x in glm_dataset.sa.condition]
        glm_dataset.sa.condition = glm_dataset.sa["targets"]
        glm_dataset.sa["chunks"] = [run_id - 1] * len(glm_dataset.samples)

        # If a trial was dropped (the subject pressed on a button) than the counter trial from the
        # other condition should also be dropped
        for pair in conf.conditions_to_compare:
            cond_bool = np.array([c in pair for c in glm_dataset.sa["condition"]])
            sub_dataset = glm_dataset[cond_bool]
            c = Counter(sub_dataset.sa.id)
            for value in c:
                if c[value] < 2:
                    id_bool = np.array([value in cond_id for cond_id in glm_dataset.sa["id"]])
                    glm_dataset = glm_dataset[np.bitwise_not(np.logical_and(id_bool, cond_bool))]

        run_datasets.append(glm_dataset)

    return vstack(run_datasets, 0)
开发者ID:ronimaimon,项目名称:mvpa_analysis,代码行数:50,代码来源:ds_creation.py

示例7: _level1

    def _level1(self, datasets, commonspace, ref_ds, mappers, residuals):
        params = self.params            # for quicker access ;)
        data_mapped = [ds.samples for ds in datasets]
        counts = 1  # number of datasets used so far for generating commonspace
        for i, (m, ds_new) in enumerate(zip(mappers, datasets)):
            if __debug__:
                debug('HPAL_', "Level 1: ds #%i" % i)
            if i == ref_ds:
                continue
            # assign common space to ``space`` of the mapper, because this is
            # where it will be looking for it
            ds_new.sa[m.get_space()] = commonspace
            # find transformation of this dataset into the current common space
            m.train(ds_new)
            # remove common space attribute again to save on memory when the
            # common space is updated for the next iteration
            del ds_new.sa[m.get_space()]
            # project this dataset into the current common space
            ds_ = m.forward(ds_new.samples)
            if params.zscore_common:
                zscore(ds_, chunks_attr=None)
            # replace original dataset with mapped one -- only the reference
            # dataset will remain unchanged
            data_mapped[i] = ds_

            # compute first-level residuals wrt to the initial common space
            if residuals is not None:
                residuals[0, i] = np.linalg.norm(ds_ - commonspace)

            # Update the common space. This is an incremental update after
            # processing each 1st-level dataset. Maybe there should be a flag
            # to make a batch update after processing all 1st-level datasets
            # to an identical 1st-level common space
            # TODO: make just a function so we dont' waste space
            if params.level1_equal_weight:
                commonspace = params.combiner1(ds_, commonspace,
                                               weights=(float(counts), 1.0))
            else:
                commonspace = params.combiner1(ds_, commonspace)
            counts += 1
            if params.zscore_common:
                zscore(commonspace, chunks_attr=None)
        return data_mapped
开发者ID:PyMVPA,项目名称:PyMVPA,代码行数:43,代码来源:hyperalignment.py

示例8: test_hyper_input_dataset_check

 def test_hyper_input_dataset_check(self):
     # If supplied with only one dataset during training,
     # make sure it doesn't run multiple levels and crap out
     ha = Hyperalignment()
     ds_all = [datasets['uni4small'] for i in range(3)]
     # Make sure it raises TypeError if a list is not passed
     self.assertRaises(TypeError, ha, ds_all[0])
     self.assertRaises(TypeError, ha.train, ds_all[0])
     # And it doesn't crap out with a single dataset for training
     ha.train([ds_all[0]])
     zscore(ds_all[0], chunks_attr=None)
     assert_array_equal(ha.commonspace, ds_all[0].samples)
     # make sure it accepts tuple of ndarray
     ha = Hyperalignment()
     m = ha(tuple(ds_all))
     ha = Hyperalignment()
     dss_arr = np.empty(len(ds_all), dtype=object)
     for i in range(len(ds_all)):
         dss_arr[i] = ds_all[i]
     m = ha(dss_arr)
开发者ID:swaroopgj,项目名称:PyMVPA,代码行数:20,代码来源:test_hyperalignment.py

示例9: detrend

def detrend(ds):
	#print ds.summary()
	ds.samples = ds.samples.astype('float')
	pl.figure()
	pl.subplot(221)
	plot_samples_distance(ds, sortbyattr='chunks')
	#plot_samples_distance(ds)
	pl.title('Sample distances (sorted by chunks)')
	poly_detrend(ds, polyord=2, chunks_attr='chunks')
	pl.subplot(222)
	plot_samples_distance(ds, sortbyattr='chunks')
	pl.show()
	zscore(ds, chunks_attr='chunks', dtype='float32')
	pl.subplot(223)
	plot_samples_distance(ds, sortbyattr='chunks')
	pl.subplot(224)
#	plot_samples_distance(ds, sortbyattr='targets')
	pl.title('Sample distances (sorted by condition)')
	pl.show()
	#poly_detrend(ds, polyord=1, chunks_attr='chunks')
	#zscore(ds, chunks_attr='chunks', dtype='float32')
	return ds
开发者ID:ronimaimon,项目名称:mvpa_analysis,代码行数:22,代码来源:visualize_data.py

示例10: get_testdata

    def get_testdata(self):
        # rs = np.random.RandomState(0)
        rs = np.random.RandomState()
        nt = 200
        n_triangles = 4
        ns = 10
        nv = n_triangles * 3
        vertices = np.zeros((nv, 3))  # 4 separated triangles
        faces = []
        for i in range(n_triangles):
            vertices[i*3] = [i*2, 0, 0]
            vertices[i*3+1] = [i*2+1, 1/np.sqrt(3), 0]
            vertices[i*3+2] = [i*2+1, -1/np.sqrt(3), 0]
            faces.append([i*3, i*3+1, i*3+2])
        faces = np.array(faces)
        surface = Surface(vertices, faces)

        ds_orig = np.zeros((nt, nv))
        # add coarse-scale information
        for i in range(n_triangles):
            ds_orig[:, i*3:(i+1)*3] += rs.normal(size=(nt, 1))
        # add fine-scale information
        ds_orig += rs.normal(size=(nt, nv))
        dss_train, dss_test = [], []
        for i in range(ns):
            ds = np.zeros_like(ds_orig)
            for j in range(n_triangles):
                ds[:, j*3:(j+1)*3] = np.dot(ds_orig[:, j*3:(j+1)*3],
                                            get_random_rotation(3))
                                            # special_ortho_group.rvs(3, random_state=rs))
            ds = Dataset(ds)
            ds.fa['node_indices'] = np.arange(nv)
            ds_train, ds_test = ds[:nt//2, :], ds[nt//2:, :]
            zscore(ds_train, chunks_attr=None)
            zscore(ds_test, chunks_attr=None)
            dss_train.append(ds_train)
            dss_test.append(ds_test)
        return dss_train, dss_test, surface
开发者ID:PyMVPA,项目名称:PyMVPA,代码行数:38,代码来源:test_connectivity_hyperalignment.py

示例11: test_linear_svm_weights_per_class

    def test_linear_svm_weights_per_class(self, svm):
        # assumming many defaults it is as simple as
        kwargs = dict(enable_ca=["sensitivities"])
        sana_split = svm.get_sensitivity_analyzer(
            split_weights=True, **kwargs)
        sana_full = svm.get_sensitivity_analyzer(
            force_train=False, **kwargs)

        # and lets look at all sensitivities
        ds2 = datasets['uni4large'].copy()
        zscore(ds2, param_est=('targets', ['L2', 'L3']))
        ds2 = ds2[np.logical_or(ds2.sa.targets == 'L0', ds2.sa.targets == 'L1')]

        senssplit = sana_split(ds2)
        sensfull = sana_full(ds2)

        self.assertEqual(senssplit.shape, (2, ds2.nfeatures))
        self.assertEqual(sensfull.shape, (1, ds2.nfeatures))

        # just to verify that we split properly and if we reconstruct
        # manually we obtain the same
        dmap = (-1 * senssplit.samples[1] + senssplit.samples[0]) \
               - sensfull.samples
        self.assertTrue((np.abs(dmap) <= 1e-10).all())
        #print "____"
        #print senssplit
        #print SMLR().get_sensitivity_analyzer(combiner=None)(ds2)

        # for now we can do split weights for binary tasks only, so
        # lets check if we raise a concern
        # we temporarily shutdown warning, since it is going to complain
        # otherwise, but we do it on purpose here
        handlers = warning.handlers
        warning.handlers = []
        self.assertRaises(NotImplementedError,
                              sana_split, datasets['uni3medium'])
        # reenable the warnings
        warning.handlers = handlers
开发者ID:andreirusu,项目名称:PyMVPA,代码行数:38,代码来源:test_datameasure.py

示例12: test_connectivity_hyperalignment

    def test_connectivity_hyperalignment(self):
        skip_if_no_external('scipy')
        skip_if_no_external('hdf5')  # needed for default results backend hdf5

        dss_train, dss_test, surface = self.get_testdata()
        qe = SurfaceQueryEngine(surface, 10, fa_node_key='node_indices')
        cha = ConnectivityHyperalignment(
            mask_ids=[0, 3, 6, 9],
            seed_indices=[0, 3, 6, 9],
            seed_queryengines=qe,
            queryengine=qe)
        mappers = cha(dss_train)
        aligned_train = [mapper.forward(ds) for ds, mapper in zip(dss_train, mappers)]
        aligned_test = [mapper.forward(ds) for ds, mapper in zip(dss_test, mappers)]
        for ds in aligned_train + aligned_test:
            zscore(ds, chunks_attr=None)
        sim_train_before = self.compute_connectivity_profile_similarity(dss_train)
        sim_train_after = self.compute_connectivity_profile_similarity(aligned_train)
        sim_test_before = self.compute_connectivity_profile_similarity(dss_test)
        sim_test_after = self.compute_connectivity_profile_similarity(aligned_test)
        # ISC should be higher after CHA for both training and testing data
        self.assertTrue(sim_train_after.mean() > sim_train_before.mean())
        self.assertTrue(sim_test_after.mean() > sim_test_before.mean())
开发者ID:PyMVPA,项目名称:PyMVPA,代码行数:23,代码来源:test_connectivity_hyperalignment.py

示例13: create_betas_per_trial_with_pymvpa

def create_betas_per_trial_with_pymvpa(study_path, subj, conf, mask_name, flavor, TR):
    dhandle = OpenFMRIDataset(study_path)
    model = 1
    task = 1
    # Do this for other tasks as well. not only the first
    mask_fname = _opj(study_path, "sub{:0>3d}".format(subj), "masks", conf.mvpa_tasks[0], "{}.nii.gz".format(mask_name))
    print mask_fname
    run_datasets = []
    for run_id in dhandle.get_task_bold_run_ids(task)[subj]:
        if type(run_id) == str:
            continue
        all_events = dhandle.get_bold_run_model(model, subj, run_id)
        run_events = []
        i = 0
        for event in all_events:
            if event["task"] == task:
                event["condition"] = "{}-{}".format(event["condition"], i)
                run_events.append(event)
                i += 1

                # load BOLD data for this run (with masking); add 0-based chunk ID
        run_ds = dhandle.get_bold_run_dataset(subj, task, run_id, flavor=flavor, chunks=run_id - 1, mask=mask_fname)
        # convert event info into a sample attribute and assign as 'targets'
        run_ds.sa.time_coords = run_ds.sa.time_indices * TR
        print run_id

        run_ds.sa["targets"] = events2sample_attr(run_events, run_ds.sa.time_coords, noinfolabel="rest")
        # additional time series preprocessing can go here
        poly_detrend(run_ds, polyord=1, chunks_attr="chunks")
        zscore(run_ds, chunks_attr="chunks", param_est=("targets", ["rest"]), dtype="float32")
        glm_dataset = fit_event_hrf_model(run_ds, run_events, time_attr="time_coords", condition_attr="condition")
        glm_dataset.sa["targets"] = [x[: x.find("-")] for x in glm_dataset.sa.condition]
        glm_dataset.sa.condition = glm_dataset.sa["targets"]
        glm_dataset.sa["chunks"] = [run_id - 1] * len(glm_dataset.samples)
        run_datasets.append(glm_dataset)
    return vstack(run_datasets, 0)
开发者ID:ronimaimon,项目名称:mvpa_analysis,代码行数:36,代码来源:ds_creation.py

示例14: _level2

    def _level2(self, datasets, lvl1_data, mappers, residuals):
        params = self.params            # for quicker access ;)
        data_mapped = lvl1_data
        # aggregate all processed 1st-level datasets into a new 2nd-level
        # common space
        commonspace = params.combiner2(data_mapped)

        # XXX Why is this commented out? Who knows what combiner2 is doing and
        # whether it changes the distribution of the data
        #if params.zscore_common:
        #zscore(commonspace, chunks_attr=None)

        ndatasets = len(datasets)
        for loop in xrange(params.level2_niter):
            # 2nd-level alignment starts from the original/unprojected datasets
            # again
            for i, (m, ds_new) in enumerate(zip(mappers, datasets)):
                if __debug__:
                    debug('HPAL_', "Level 2 (%i-th iteration): ds #%i" % (loop, i))

                # Optimization speed up heuristic
                # Slightly modify the common space towards other feature
                # spaces and reduce influence of this feature space for the
                # to-be-computed projection
                temp_commonspace = (commonspace * ndatasets - data_mapped[i]) \
                                    / (ndatasets - 1)

                if params.zscore_common:
                    zscore(temp_commonspace, chunks_attr=None)
                # assign current common space
                ds_new.sa[m.get_space()] = temp_commonspace
                # retrain the mapper for this dataset
                m.train(ds_new)
                # remove common space attribute again to save on memory when the
                # common space is updated for the next iteration
                del ds_new.sa[m.get_space()]
                # obtain the 2nd-level projection
                ds_ =  m.forward(ds_new.samples)
                if params.zscore_common:
                    zscore(ds_, chunks_attr=None)
                # store for 2nd-level combiner
                data_mapped[i] = ds_
                # compute residuals
                if residuals is not None:
                    residuals[1+loop, i] = np.linalg.norm(ds_ - commonspace)

            commonspace = params.combiner2(data_mapped)

        # and again
        if params.zscore_common:
            zscore(commonspace, chunks_attr=None)

        # return the final common space
        return commonspace
开发者ID:adamatus,项目名称:PyMVPA,代码行数:54,代码来源:hyperalignment.py

示例15: test_hypal_michael_caused_problem

    def test_hypal_michael_caused_problem(self):
        from mvpa2.misc import data_generators
        from mvpa2.mappers.zscore import zscore
        # Fake data
        ds = data_generators.normal_feature_dataset(nfeatures=20)
        ds_all = [data_generators.random_affine_transformation(ds) for i in range(3)]
        _ = [zscore(sd, chunks_attr=None) for sd in ds_all]
        # Making random data per subject for testing with bias added to first subject
        ds_test = [np.random.rand(1, ds.nfeatures) for i in range(len(ds_all))]
        ds_test[0] += np.arange(1, ds.nfeatures + 1) * 100
        assert(np.corrcoef(ds_test[2], ds_test[1])[0, 1] < 0.99)  # that would have been rudiculous if it was

        # Test with varying alpha so we for sure to not have that issue now
        for alpha in (0, 0.01, 0.5, 0.99, 1.0):
            hyper09 = Hyperalignment(alpha=alpha)
            mappers = hyper09([sd for sd in ds_all])
            ds_test_a = [m.forward(sd) for m, sd in zip(mappers, ds_test)]
            ds_test_a = [mappers[0].reverse(sd) for sd in ds_test_a]
            corr = np.corrcoef(ds_test_a[2], ds_test_a[1])[0, 1]
            assert(corr < 0.99)
开发者ID:hanke,项目名称:PyMVPA,代码行数:20,代码来源:test_hyperalignment.py


注:本文中的mvpa2.mappers.zscore.zscore函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。