当前位置: 首页>>代码示例>>Python>>正文


Python dataset.Dataset类代码示例

本文整理汇总了Python中neurosynth.base.dataset.Dataset的典型用法代码示例。如果您正苦于以下问题:Python Dataset类的具体用法?Python Dataset怎么用?Python Dataset使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了Dataset类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: neurosynthInit

def neurosynthInit(dbsize):
    print "Initializing Neurosynth database..."
    dataset = Dataset('data/' + dbsize + 'terms/database.txt')
    dataset.add_features('data/' + dbsize + 'terms/features.txt')    

    #print "Loading standard space brain..."
    #img = nb.load("data/MNI152_T1_2mm_brain.nii.gz")
    #standard = img.get_data()
    return dataset
开发者ID:vsoch,项目名称:neuro2gene,代码行数:9,代码来源:neuro2gene.py

示例2: test_dataset_save_and_load

 def test_dataset_save_and_load(self):
     # smoke test of saving and loading
     t = tempfile.mktemp()
     self.dataset.save(t, keep_mappables=True)
     self.assertTrue(os.path.exists(t))
     dataset = Dataset.load(t)
     self.assertIsNotNone(dataset)
     self.assertIsNotNone(dataset.mappables)
     self.assertEqual(len(dataset.mappables), 5)
     # Now with the mappables deleted
     dataset.save(t)
     self.assertTrue(os.path.exists(t))
     dataset = Dataset.load(t)
     self.assertEqual(len(dataset.mappables), 0)
     os.unlink(t)
开发者ID:chrisfilo,项目名称:Neurosynth,代码行数:15,代码来源:test_base.py

示例3: _getdata

def _getdata():
    """Downloads data from neurosynth and returns it as a Dataset.

    Also pickles the dataset for future use."""
    LOG.warning("Downloading and processing Neurosynth database")

    os.makedirs("data", exist_ok=True)
    from neurosynth.base.dataset import download

    download(path="data", unpack=True)

    data = Dataset("data/database.txt")
    data.add_features("data/features.txt")
    data.save("data/dataset.pkl")
    return data
开发者ID:fredcallaway,项目名称:brain_matrix,代码行数:15,代码来源:brain_matrix.py

示例4: create_voxel_x_feature_matrix

def create_voxel_x_feature_matrix(path_to_dataset, path_to_image_files):
        dataset = Dataset.load(path_to_dataset)
	feature_list = dataset.get_feature_names()
	vox_feat_matrix = zeros((dataset.volume.num_vox_in_mask, len(feature_list)), dtype=int16)
	for (i,feature) in enumerate(feature_list):
		image_path = path_to_image_files + feature + '_pFgA_z.nii.gz'
		vox_feat_matrix[:,i] = dataset.volume.mask(image_path)
	return vox_feat_matrix
开发者ID:acley,项目名称:neuro-data-matrix-factorization,代码行数:8,代码来源:voxel-x-feature-matrix.py

示例5: generate_maps

def generate_maps(terms,output_dir):

    f,d = download_data()
    features = pandas.read_csv(f,sep="\t")  
    database = pandas.read_csv(d,sep="\t")  

    output_dir = "%s/maps" %(output_dir)

    print "Deriving pickled maps to extract relationships from..."
    dataset = Dataset(d)
    dataset.add_features(f)
    for t in range(len(terms)):
        term = terms[t]
        print "Generating P(term|activation) for term %s, %s of %s" %(term,t,len(terms))
        ids = dataset.get_ids_by_features(term)
        maps = meta.MetaAnalysis(dataset,ids)
        term_name = term.replace(" ","_")
        pickle.dump(maps.images["pFgA_z"],open("%s/%s_pFgA_z.pkl" %(output_dir,term_name),"wb"))
开发者ID:word-fish,项目名称:wordfish-plugins,代码行数:18,代码来源:functions.py

示例6: test_dataset_save_and_load

 def test_dataset_save_and_load(self):
     # smoke test of saving and loading
     t = tempfile.mktemp()
     self.dataset.save(t)
     self.assertTrue(os.path.exists(t))
     dataset = Dataset.load(t)
     self.assertIsNotNone(dataset)
     self.assertEqual(len(dataset.image_table.ids), 5)
     os.unlink(t)
开发者ID:MQMQ0229,项目名称:neurosynth,代码行数:9,代码来源:test_base.py

示例7: __init__

    def __init__(self, db, dataset=None, studies=None, features=None,
                 reset_db=False, reset_dataset=False, download_data=True):
        """
        Initialize instance from a pickled Neurosynth Dataset instance or a
        pair of study and analysis .txt files.

        Args:
            db: the SQLAlchemy database connection to use.
            dataset: an optional filename of a pickled neurosynth Dataset
                instance.
                Note that the Dataset must contain the list of Mappables (i.e.,
                    save() must have been called with keep_mappables set to
                    True).
            studies: name of file containing activation data. If passed, a new
                Dataset instance will be constructed.
            features: name of file containing feature data.
            reset_db: if True, will drop and re-create all database tables
                before adding new content. If False (default), will add content
                incrementally.
            reset_dataset: if True, will regenerate the pickled Neurosynth
                dataset.
            download_data: if True, ignores any existing files and downloads
                the latest Neurosynth data files from GitHub.
        """

        if (studies is not None and not os.path.exists(studies)) \
                or settings.RESET_ASSETS:
            print "WARNING: RESETTING ALL NEUROSYNTH ASSETS!"
            self.reset_assets(download_data)

        # Load or create Neurosynth Dataset instance
        if dataset is None or reset_dataset or (isinstance(dataset, basestring) and not os.path.exists(dataset)):

            print "\tInitializing a new Dataset..."
            if (studies is None) or (features is None):
                raise ValueError(
                    "To generate a new Dataset instance, both studies and "
                    "analyses must be provided.")
            dataset = Dataset(studies)
            dataset.add_features(features)
            dataset.save(settings.PICKLE_DATABASE, keep_mappables=True)
        else:
            print "\tLoading existing Dataset..."
            dataset = Dataset.load(dataset)
            if features is not None:
                dataset.add_features(features)

        self.dataset = dataset
        self.db = db

        if reset_db:
            print "WARNING: RESETTING DATABASE!!!"
            self.reset_database()
开发者ID:UCL-CS35,项目名称:incdb-poc,代码行数:53,代码来源:database_builder.py

示例8: extract_relations

def extract_relations(terms,maps_dir,output_dir):

    if isinstance(terms,str):
        terms = [terms]

    f,d = download_data()
    features = pandas.read_csv(f,sep="\t")  
    database = pandas.read_csv(d,sep="\t")  
    allterms = features.columns.tolist()
    allterms.pop(0)  #pmid

    dataset = Dataset(d)
    dataset.add_features(f)
    image_matrix = pandas.DataFrame(columns=range(228453))
    for t in range(len(allterms)):
        term = allterms[t]
        term_name = term.replace(" ","_")
        pickled_map = "%s/%s_pFgA_z.pkl" %(maps_dir,term_name)
        if not os.path.exists(pickled_map):
            print "Generating P(term|activation) for term %s" %(term)
            ids = dataset.get_ids_by_features(term)
            maps = meta.MetaAnalysis(dataset,ids)
            pickle.dump(maps.images["pFgA_z"],open(pickled_map,"wb"))
        map_data = pickle.load(open(pickled_map,"rb"))
        image_matrix.loc[term] = map_data

    sims = pandas.DataFrame(columns=image_matrix.index)
    tuples = []
    for t1 in range(len(terms)):
        term1 = terms[t1]
        print "Extracting NeuroSynth relationships for term %s..." %(term1)
        for t2 in range(len(terms)):
            term2 = terms[t2]
            if t1<t2:
                score = pearsonr(image_matrix.loc[term1],image_matrix.loc[term2])[0]
                tuples.append((term1,term2,score))

    save_relations(output_dir=output_dir,relations=tuples)
开发者ID:word-fish,项目名称:wordfish-plugins,代码行数:38,代码来源:functions.py

示例9: TestAnalysis

class TestAnalysis(unittest.TestCase):

  def setUp(self):
    """ Create a new Dataset and add features. """
    self.dataset = Dataset('data/test_dataset.txt')
    self.dataset.add_features('data/test_features.txt')
  
  def test_meta_analysis(self):
    """ Test full meta-analysis stream. """
    pass

  def test_decoder(self):
    pass

  def test_coactivation(self):
    """ Test seed-based coactivation. """ 
    pass

  def test_roi_averaging(self):
    pass

  def test_get_random_voxels(self):
    pass
开发者ID:wanirepo,项目名称:Neurosynth,代码行数:23,代码来源:test_analysis.py

示例10: import_neurosynth_git

    def import_neurosynth_git(self):
        # Add the appropriate neurosynth git folder to the python path. 
        sys.path.append(self.npath)
        from neurosynth.base.dataset import Dataset
        from neurosynth.analysis import meta

        # Try to load a pickle if it exists. Create a new dataset instance 
        # if it doesn't.
        try:
            self.dataset = cPickle.load(
                open(self.npath+os.sep+'data/dataset.pkl', 'rb'))
        except IOError:
        # Create Dataset instance from a database file.
            self.dataset = Dataset(self.npath+os.sep+'data/database.txt')

        # Load features from file
        self.dataset.add_features(self.npath+os.sep+'data/features.txt')

        # Get names of features. 
        self.feature_list = self.dataset.get_feature_names()
开发者ID:law826,项目名称:Neurosynth_SNA,代码行数:20,代码来源:Neurosynth_SNA.py

示例11: __init__

    def __init__(
        self,
        metric="emd",
        image_type="pAgF",
        name=None,
        multi=True,
        image_transform="block_reduce",
        downsample=8,
        auto_save=True,
    ):
        self.image_type = image_type
        self.multi = multi
        self.downsample = downsample
        self.auto_save = auto_save

        if callable(metric):
            self.metric = metric
        elif metric == "emd":
            self.metric = euclidean_emd
        else:
            raise ValueError("{metric} is not a valid metric".format(**locals()))

        if callable(image_transform):
            self.image_transform = image_transform
        elif image_transform == "block_reduce":
            from functools import partial

            self.image_transform = partial(block_reduce, factor=downsample)
            # def block_reduce_transform(image):
            # """The default transformation."""
            # return block_reduce(image, downsample, blur)
            # self.image_transform = block_reduce_transform
        else:
            raise ValueError(("{image_transform} is not a valid" "transform function").format(**locals()))
        self.name = name if name else time.strftime("analysis_from_%m-%d_%H-%M-%S")

        try:
            self.data = Dataset.load("data/dataset.pkl")
        except FileNotFoundError:
            self.data = _getdata()
开发者ID:fredcallaway,项目名称:brain_matrix,代码行数:40,代码来源:brain_matrix.py

示例12: Masker

from sklearn.cluster import KMeans, DBSCAN, MiniBatchKMeans
from sklearn import metrics
from scipy import stats

base_path = '/home/pauli/Development/neurobabel/'
test_data_path = base_path + 'ACE/'
masker_filename = base_path + 'atlases/whs_sd/WHS_SD_rat_one_sm_v2.nii.gz'
atlas_filename = base_path + 'atlases/whs_sd/WHS_SD_rat_atlas_brain_sm_v2.nii.gz'
mask = nib.load(masker_filename)
masker = Masker(mask)
r = 1.0
transform = {'BREGMA': transformations.bregma_to_whs()}
target = 'WHS'

# load data set
dataset = Dataset(os.path.join(test_data_path, 'db_bregma_cog_atlas_export.txt'), masker=masker_filename, r=r, transform=transform, target=target)
dataset.feature_table = FeatureTable(dataset)
dataset.add_features(os.path.join(test_data_path, "db_bregma_cog_atlas_features.txt")) # add features
fn = dataset.get_feature_names()
features = dataset.get_feature_data()

n_xyz, n_articles = dataset.image_table.data.shape
# do topic modeling (LSA)
n_components = 20
svd = TruncatedSVD(n_components=n_components)
X = svd.fit_transform(features)
X_orig = X.copy()

X = StandardScaler().fit_transform(X_orig)

# db = DBSCAN(eps=10.0, min_samples=10).fit(X)
开发者ID:wmpauli,项目名称:neurosynth,代码行数:31,代码来源:cluster_analysis.py

示例13: get_test_dataset

def get_test_dataset():
    test_data_path = get_test_data_path()
    dataset = Dataset(test_data_path + 'test_dataset.txt')
    dataset.add_features(test_data_path + 'test_features.txt')
    return dataset
开发者ID:jdnc,项目名称:ml-project,代码行数:5,代码来源:utils.py

示例14: setUp

 def setUp(self):
   """ Create a new Dataset and add features. """
   self.dataset = Dataset('data/test_dataset.txt')
   self.dataset.add_features('data/test_features.txt')
开发者ID:poldrack,项目名称:Neurosynth,代码行数:4,代码来源:test_base.py

示例15: TestBase

class TestBase(unittest.TestCase):

  def setUp(self):
    """ Create a new Dataset and add features. """
    self.dataset = Dataset('data/test_dataset.txt')
    self.dataset.add_features('data/test_features.txt')
  
  def test_dataset_initializes(self):
    """ Test whether dataset initializes properly. """
    self.assertIsNotNone(self.dataset.volume)
    self.assertIsNotNone(self.dataset.image_table)
    self.assertEqual(len(self.dataset.mappables), 5)
    self.assertIsNotNone(self.dataset.volume)
    self.assertIsNotNone(self.dataset.r)

  def test_image_table_loads(self):
    """ Test ImageTable initialization. """
    self.assertIsNotNone(self.dataset.image_table)
    it = self.dataset.image_table
    self.assertEqual(len(it.ids), 5)
    self.assertIsNotNone(it.volume)
    self.assertIsNotNone(it.r)
    self.assertEqual(it.data.shape, (228453, 5))
    # Add tests for values in table

  def test_feature_table_loads(self):
    """ Test FeatureTable initialization. """
    tt = self.dataset.feature_table
    self.assertIsNotNone(tt)
    self.assertEqual(len(self.dataset.list_features()), 5)
    self.assertEqual(tt.data.shape, (5,5))
    self.assertEqual(tt.feature_names[3], 'f4')
    self.assertEqual(tt.data[0,0], 0.0003)

  def test_feature_search(self):
    """ Test feature-based Mappable search. Tests both the FeatureTable method 
    and the Dataset wrapper. """
    tt = self.dataset.feature_table
    features = tt.search_features(['f*'])
    self.assertEqual(len(features), 4)
    d = self.dataset
    ids = d.get_ids_by_features(['f*'], threshold=0.001)
    self.assertEqual(len(ids), 4)
    img_data = d.get_ids_by_features(['f1', 'f3', 'g1'], 0.001, func='max', get_image_data=True)
    self.assertEqual(img_data.shape, (228453, 5))

  def test_selection_by_mask(self):
    """ Test mask-based Mappable selection.
    Only one peak in the test dataset (in study5) should be within the sgACC. """
    ids = self.dataset.get_ids_by_mask('data/sgacc_mask.nii.gz')
    self.assertEquals(len(ids), 1)
    self.assertEquals('study5', ids[0])

  def test_selection_by_peaks(self):
    """ Test peak-based Mappable selection. """
    ids = self.dataset.get_ids_by_peaks(np.array([[3, 30, -9]]))
    self.assertEquals(len(ids), 1)
    self.assertEquals('study5', ids[0])
  
  # def test_invalid_coordinates_ignored(self):
    """ Test dataset contains 3 valid coordinates and one outside mask. But this won't work 
开发者ID:poldrack,项目名称:Neurosynth,代码行数:61,代码来源:test_base.py


注:本文中的neurosynth.base.dataset.Dataset类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。