当前位置: 首页>>代码示例>>Python>>正文


Python Dataset.load方法代码示例

本文整理汇总了Python中neurosynth.base.dataset.Dataset.load方法的典型用法代码示例。如果您正苦于以下问题:Python Dataset.load方法的具体用法?Python Dataset.load怎么用?Python Dataset.load使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在neurosynth.base.dataset.Dataset的用法示例。


在下文中一共展示了Dataset.load方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_dataset_save_and_load

# 需要导入模块: from neurosynth.base.dataset import Dataset [as 别名]
# 或者: from neurosynth.base.dataset.Dataset import load [as 别名]
 def test_dataset_save_and_load(self):
     # smoke test of saving and loading
     t = tempfile.mktemp()
     self.dataset.save(t, keep_mappables=True)
     self.assertTrue(os.path.exists(t))
     dataset = Dataset.load(t)
     self.assertIsNotNone(dataset)
     self.assertIsNotNone(dataset.mappables)
     self.assertEqual(len(dataset.mappables), 5)
     # Now with the mappables deleted
     dataset.save(t)
     self.assertTrue(os.path.exists(t))
     dataset = Dataset.load(t)
     self.assertEqual(len(dataset.mappables), 0)
     os.unlink(t)
开发者ID:chrisfilo,项目名称:Neurosynth,代码行数:17,代码来源:test_base.py

示例2: create_voxel_x_feature_matrix

# 需要导入模块: from neurosynth.base.dataset import Dataset [as 别名]
# 或者: from neurosynth.base.dataset.Dataset import load [as 别名]
def create_voxel_x_feature_matrix(path_to_dataset, path_to_image_files):
        dataset = Dataset.load(path_to_dataset)
	feature_list = dataset.get_feature_names()
	vox_feat_matrix = zeros((dataset.volume.num_vox_in_mask, len(feature_list)), dtype=int16)
	for (i,feature) in enumerate(feature_list):
		image_path = path_to_image_files + feature + '_pFgA_z.nii.gz'
		vox_feat_matrix[:,i] = dataset.volume.mask(image_path)
	return vox_feat_matrix
开发者ID:acley,项目名称:neuro-data-matrix-factorization,代码行数:10,代码来源:voxel-x-feature-matrix.py

示例3: test_dataset_save_and_load

# 需要导入模块: from neurosynth.base.dataset import Dataset [as 别名]
# 或者: from neurosynth.base.dataset.Dataset import load [as 别名]
 def test_dataset_save_and_load(self):
     # smoke test of saving and loading
     t = tempfile.mktemp()
     self.dataset.save(t)
     self.assertTrue(os.path.exists(t))
     dataset = Dataset.load(t)
     self.assertIsNotNone(dataset)
     self.assertEqual(len(dataset.image_table.ids), 5)
     os.unlink(t)
开发者ID:MQMQ0229,项目名称:neurosynth,代码行数:11,代码来源:test_base.py

示例4: __init__

# 需要导入模块: from neurosynth.base.dataset import Dataset [as 别名]
# 或者: from neurosynth.base.dataset.Dataset import load [as 别名]
    def __init__(self, db, dataset=None, studies=None, features=None,
                 reset_db=False, reset_dataset=False, download_data=True):
        """
        Initialize instance from a pickled Neurosynth Dataset instance or a
        pair of study and analysis .txt files.

        Args:
            db: the SQLAlchemy database connection to use.
            dataset: an optional filename of a pickled neurosynth Dataset
                instance.
                Note that the Dataset must contain the list of Mappables (i.e.,
                    save() must have been called with keep_mappables set to
                    True).
            studies: name of file containing activation data. If passed, a new
                Dataset instance will be constructed.
            features: name of file containing feature data.
            reset_db: if True, will drop and re-create all database tables
                before adding new content. If False (default), will add content
                incrementally.
            reset_dataset: if True, will regenerate the pickled Neurosynth
                dataset.
            download_data: if True, ignores any existing files and downloads
                the latest Neurosynth data files from GitHub.
        """

        if (studies is not None and not os.path.exists(studies)) \
                or settings.RESET_ASSETS:
            print "WARNING: RESETTING ALL NEUROSYNTH ASSETS!"
            self.reset_assets(download_data)

        # Load or create Neurosynth Dataset instance
        if dataset is None or reset_dataset or (isinstance(dataset, basestring) and not os.path.exists(dataset)):

            print "\tInitializing a new Dataset..."
            if (studies is None) or (features is None):
                raise ValueError(
                    "To generate a new Dataset instance, both studies and "
                    "analyses must be provided.")
            dataset = Dataset(studies)
            dataset.add_features(features)
            dataset.save(settings.PICKLE_DATABASE, keep_mappables=True)
        else:
            print "\tLoading existing Dataset..."
            dataset = Dataset.load(dataset)
            if features is not None:
                dataset.add_features(features)

        self.dataset = dataset
        self.db = db

        if reset_db:
            print "WARNING: RESETTING DATABASE!!!"
            self.reset_database()
开发者ID:UCL-CS35,项目名称:incdb-poc,代码行数:55,代码来源:database_builder.py

示例5: __init__

# 需要导入模块: from neurosynth.base.dataset import Dataset [as 别名]
# 或者: from neurosynth.base.dataset.Dataset import load [as 别名]
    def __init__(
        self,
        metric="emd",
        image_type="pAgF",
        name=None,
        multi=True,
        image_transform="block_reduce",
        downsample=8,
        auto_save=True,
    ):
        self.image_type = image_type
        self.multi = multi
        self.downsample = downsample
        self.auto_save = auto_save

        if callable(metric):
            self.metric = metric
        elif metric == "emd":
            self.metric = euclidean_emd
        else:
            raise ValueError("{metric} is not a valid metric".format(**locals()))

        if callable(image_transform):
            self.image_transform = image_transform
        elif image_transform == "block_reduce":
            from functools import partial

            self.image_transform = partial(block_reduce, factor=downsample)
            # def block_reduce_transform(image):
            # """The default transformation."""
            # return block_reduce(image, downsample, blur)
            # self.image_transform = block_reduce_transform
        else:
            raise ValueError(("{image_transform} is not a valid" "transform function").format(**locals()))
        self.name = name if name else time.strftime("analysis_from_%m-%d_%H-%M-%S")

        try:
            self.data = Dataset.load("data/dataset.pkl")
        except FileNotFoundError:
            self.data = _getdata()
开发者ID:fredcallaway,项目名称:brain_matrix,代码行数:42,代码来源:brain_matrix.py

示例6: shuffle_data

# 需要导入模块: from neurosynth.base.dataset import Dataset [as 别名]
# 或者: from neurosynth.base.dataset.Dataset import load [as 别名]
###
# This script shuffle the classification labels and reruns classification many times to get data to calculate a confidence interval around the null hypothesis

from sklearn.linear_model import RidgeClassifier
from base.classifiers import OnevsallClassifier
from neurosynth.base.dataset import Dataset
from sklearn.metrics import roc_auc_score
import pickle
from random import shuffle

def shuffle_data(classifier):
	for region in classifier.c_data:
		shuffle(region[1])


d_abs_topics_filt = Dataset.load('../data/datasets/abs_topics_filt_july.pkl')

results = []

clf = OnevsallClassifier(d_abs_topics_filt, '../masks/Ward/50.nii.gz', cv='4-Fold',
	 thresh=10, thresh_low=0, memsave=True, classifier=RidgeClassifier())
clf.load_data(None, None)
clf.initalize_containers(None, None, None)


for i in range(0, 500):
	shuffle_data(clf)
	clf.classify(scoring=roc_auc_score, processes=8, class_weight=None)
	results = list(clf.class_score) + results
	print(i),
开发者ID:margulies,项目名称:NS_Classify,代码行数:32,代码来源:resample_ova.py

示例7:

# 需要导入模块: from neurosynth.base.dataset import Dataset [as 别名]
# 或者: from neurosynth.base.dataset.Dataset import load [as 别名]
from neurosynth.analysis.cluster import magic
from neurosynth.base.dataset import Dataset

# mydir = "/home/delavega/projects/permutation_clustering/"

mydir = "../data/"

dataset = Dataset.load(mydir + '/datasets/abs_60topics_filt_jul.pkl')

roi_mask = mydir + '../masks/new_medial_fc_30.nii.gz'
global_mask = mydir +  "../masks/MNI152_T1_2mm_brain.nii.gz"

magic(dataset, 10, method='coactivation', features=['topic57', 'topic32', 'topic39', 'topic44'], output_dir='../results/ef_cluster/all_cluster/', min_studies_per_voxel=100, filename='okay')
开发者ID:csddzh,项目名称:NS_Classify,代码行数:15,代码来源:ef_cluster.py

示例8: dlmread

# 需要导入模块: from neurosynth.base.dataset import Dataset [as 别名]
# 或者: from neurosynth.base.dataset.Dataset import load [as 别名]
t = dlmread('/Users/lukechang/Dropbox/Github/toolbox/Python/ImageProcessing/tal2icbm_fsl.mat');
dat.volInfo.mat = inv(t)*dat.volInfo.mat;
dat.fullpath = '/Users/lukechang/Research/Trust_Friend/Analyses/NeurosynthDecode/Friend.nii';
write(dat)

# 2) Reorient using FSL - Unix
fslreorient2std Friend Friend_Or

# 3) Coregister to 2mm MNI space - Unix
/usr/local/fsl/bin/flirt -in /Users/lukechang/Research/Trust_Friend/Analyses/NeurosynthDecode/Friend_Or.nii.gz -ref /usr/local/fsl/data/standard/MNI152_T1_2mm_brain -out /Users/lukechang/Research/Trust_Friend/Analyses/NeurosynthDecode/Friend_Or_Mni.nii.gz -omat /Users/lukechang/Research/Trust_Friend/Analyses/NeurosynthDecode/Friend_Or_Mni.mat -bins 256 -cost corratio -searchrx -90 90 -searchry -90 90 -searchrz -90 90 -dof 12  -interp trilinear

# 4) Decode - Python
DATASET_FILE = '/Users/lukechang/Dropbox/Github/neurosynth/topics.pkl'
PREFIX = '/Users/lukechang/Research/Trust_Friend/Analyses/NeurosynthDecode/'
INFILE = 'Friend_Or_Mni.nii.gz'
dataset = Dataset.load(DATASET_FILE)
decoder = decode.Decoder(dataset) #takes awhile to load, should only do this once.
img = imageutils.load_imgs(PREFIX + INFILE, decoder.mask)
result = decoder.decode(img)
np.savetxt(PREFIX + 'Friend_Decoded.txt', result)

# 5) Threshold at .001 - unix
fslmaths Friend_Or_Mni -thr 3 Friend_Or_Mni_001

# 6) Decode thresholded map - python
DATASET_FILE = '/Users/lukechang/Dropbox/Github/neurosynth/topics.pkl'
PREFIX = '/Users/lukechang/Research/Trust_Friend/Analyses/NeurosynthDecode/'
INFILE = 'Friend_Or_Mni_001.nii.gz'
dataset = Dataset.load(DATASET_FILE)
decoder = decode.Decoder(dataset) #takes awhile to load, should only do this once.
img = imageutils.load_imgs(PREFIX + INFILE, decoder.mask)
开发者ID:GordonMatthewson,项目名称:CosanlabToolbox,代码行数:33,代码来源:tal2mni.py

示例9: __init__

# 需要导入模块: from neurosynth.base.dataset import Dataset [as 别名]
# 或者: from neurosynth.base.dataset.Dataset import load [as 别名]
# -*- coding: utf-8 -*-

# Here I use Yeo to test Neurosynth's classify functions
from neurosynth.base.dataset import Dataset
from neurosynth.analysis import classify
import os
import itertools
import re
import numpy as np
import pdb
import sys
from nipype.interfaces import fsl
from sklearn.ensemble import GradientBoostingClassifier


dataset = Dataset.load('../data/pickled.pkl')

masklist = ['7Networks_Liberal_1.nii.gz', '7Networks_Liberal_2.nii.gz',
            '7Networks_Liberal_3.nii.gz', '7Networks_Liberal_4.nii.gz',
            '7Networks_Liberal_5.nii.gz', '7Networks_Liberal_6.nii.gz',
            '7Networks_Liberal_7.nii.gz']

rootdir = '../masks/Yeo_JNeurophysiol11_MNI152/standardized/'


class maskClassifier:
    def __init__(self, classifier=GradientBoostingClassifier(), param_grid={'max_features': np.arange(2, 140, 44), 'n_estimators': np.arange(5, 141, 50),
          'learning_rate': np.arange(0.05, 1, 0.1)}, thresh = 0.08)


diffs = {}
开发者ID:margulies,项目名称:NS_Classify,代码行数:33,代码来源:Yeo_Test.py


注:本文中的neurosynth.base.dataset.Dataset.load方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。