当前位置: 首页>>代码示例>>Python>>正文


Python Dataset.add_features方法代码示例

本文整理汇总了Python中neurosynth.base.dataset.Dataset.add_features方法的典型用法代码示例。如果您正苦于以下问题:Python Dataset.add_features方法的具体用法?Python Dataset.add_features怎么用?Python Dataset.add_features使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在neurosynth.base.dataset.Dataset的用法示例。


在下文中一共展示了Dataset.add_features方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: neurosynthInit

# 需要导入模块: from neurosynth.base.dataset import Dataset [as 别名]
# 或者: from neurosynth.base.dataset.Dataset import add_features [as 别名]
def neurosynthInit(dbsize):
    print "Initializing Neurosynth database..."
    dataset = Dataset('data/' + dbsize + 'terms/database.txt')
    dataset.add_features('data/' + dbsize + 'terms/features.txt')    

    #print "Loading standard space brain..."
    #img = nb.load("data/MNI152_T1_2mm_brain.nii.gz")
    #standard = img.get_data()
    return dataset
开发者ID:vsoch,项目名称:neuro2gene,代码行数:11,代码来源:neuro2gene.py

示例2: __init__

# 需要导入模块: from neurosynth.base.dataset import Dataset [as 别名]
# 或者: from neurosynth.base.dataset.Dataset import add_features [as 别名]
    def __init__(self, db, dataset=None, studies=None, features=None,
                 reset_db=False, reset_dataset=False, download_data=True):
        """
        Initialize instance from a pickled Neurosynth Dataset instance or a
        pair of study and analysis .txt files.

        Args:
            db: the SQLAlchemy database connection to use.
            dataset: an optional filename of a pickled neurosynth Dataset
                instance.
                Note that the Dataset must contain the list of Mappables (i.e.,
                    save() must have been called with keep_mappables set to
                    True).
            studies: name of file containing activation data. If passed, a new
                Dataset instance will be constructed.
            features: name of file containing feature data.
            reset_db: if True, will drop and re-create all database tables
                before adding new content. If False (default), will add content
                incrementally.
            reset_dataset: if True, will regenerate the pickled Neurosynth
                dataset.
            download_data: if True, ignores any existing files and downloads
                the latest Neurosynth data files from GitHub.
        """

        if (studies is not None and not os.path.exists(studies)) \
                or settings.RESET_ASSETS:
            print "WARNING: RESETTING ALL NEUROSYNTH ASSETS!"
            self.reset_assets(download_data)

        # Load or create Neurosynth Dataset instance
        if dataset is None or reset_dataset or (isinstance(dataset, basestring) and not os.path.exists(dataset)):

            print "\tInitializing a new Dataset..."
            if (studies is None) or (features is None):
                raise ValueError(
                    "To generate a new Dataset instance, both studies and "
                    "analyses must be provided.")
            dataset = Dataset(studies)
            dataset.add_features(features)
            dataset.save(settings.PICKLE_DATABASE, keep_mappables=True)
        else:
            print "\tLoading existing Dataset..."
            dataset = Dataset.load(dataset)
            if features is not None:
                dataset.add_features(features)

        self.dataset = dataset
        self.db = db

        if reset_db:
            print "WARNING: RESETTING DATABASE!!!"
            self.reset_database()
开发者ID:UCL-CS35,项目名称:incdb-poc,代码行数:55,代码来源:database_builder.py

示例3: _getdata

# 需要导入模块: from neurosynth.base.dataset import Dataset [as 别名]
# 或者: from neurosynth.base.dataset.Dataset import add_features [as 别名]
def _getdata():
    """Downloads data from neurosynth and returns it as a Dataset.

    Also pickles the dataset for future use."""
    LOG.warning("Downloading and processing Neurosynth database")

    os.makedirs("data", exist_ok=True)
    from neurosynth.base.dataset import download

    download(path="data", unpack=True)

    data = Dataset("data/database.txt")
    data.add_features("data/features.txt")
    data.save("data/dataset.pkl")
    return data
开发者ID:fredcallaway,项目名称:brain_matrix,代码行数:17,代码来源:brain_matrix.py

示例4: generate_maps

# 需要导入模块: from neurosynth.base.dataset import Dataset [as 别名]
# 或者: from neurosynth.base.dataset.Dataset import add_features [as 别名]
def generate_maps(terms,output_dir):

    f,d = download_data()
    features = pandas.read_csv(f,sep="\t")  
    database = pandas.read_csv(d,sep="\t")  

    output_dir = "%s/maps" %(output_dir)

    print "Deriving pickled maps to extract relationships from..."
    dataset = Dataset(d)
    dataset.add_features(f)
    for t in range(len(terms)):
        term = terms[t]
        print "Generating P(term|activation) for term %s, %s of %s" %(term,t,len(terms))
        ids = dataset.get_ids_by_features(term)
        maps = meta.MetaAnalysis(dataset,ids)
        term_name = term.replace(" ","_")
        pickle.dump(maps.images["pFgA_z"],open("%s/%s_pFgA_z.pkl" %(output_dir,term_name),"wb"))
开发者ID:word-fish,项目名称:wordfish-plugins,代码行数:20,代码来源:functions.py

示例5: extract_relations

# 需要导入模块: from neurosynth.base.dataset import Dataset [as 别名]
# 或者: from neurosynth.base.dataset.Dataset import add_features [as 别名]
def extract_relations(terms,maps_dir,output_dir):

    if isinstance(terms,str):
        terms = [terms]

    f,d = download_data()
    features = pandas.read_csv(f,sep="\t")  
    database = pandas.read_csv(d,sep="\t")  
    allterms = features.columns.tolist()
    allterms.pop(0)  #pmid

    dataset = Dataset(d)
    dataset.add_features(f)
    image_matrix = pandas.DataFrame(columns=range(228453))
    for t in range(len(allterms)):
        term = allterms[t]
        term_name = term.replace(" ","_")
        pickled_map = "%s/%s_pFgA_z.pkl" %(maps_dir,term_name)
        if not os.path.exists(pickled_map):
            print "Generating P(term|activation) for term %s" %(term)
            ids = dataset.get_ids_by_features(term)
            maps = meta.MetaAnalysis(dataset,ids)
            pickle.dump(maps.images["pFgA_z"],open(pickled_map,"wb"))
        map_data = pickle.load(open(pickled_map,"rb"))
        image_matrix.loc[term] = map_data

    sims = pandas.DataFrame(columns=image_matrix.index)
    tuples = []
    for t1 in range(len(terms)):
        term1 = terms[t1]
        print "Extracting NeuroSynth relationships for term %s..." %(term1)
        for t2 in range(len(terms)):
            term2 = terms[t2]
            if t1<t2:
                score = pearsonr(image_matrix.loc[term1],image_matrix.loc[term2])[0]
                tuples.append((term1,term2,score))

    save_relations(output_dir=output_dir,relations=tuples)
开发者ID:word-fish,项目名称:wordfish-plugins,代码行数:40,代码来源:functions.py

示例6: TestAnalysis

# 需要导入模块: from neurosynth.base.dataset import Dataset [as 别名]
# 或者: from neurosynth.base.dataset.Dataset import add_features [as 别名]
class TestAnalysis(unittest.TestCase):

  def setUp(self):
    """ Create a new Dataset and add features. """
    self.dataset = Dataset('data/test_dataset.txt')
    self.dataset.add_features('data/test_features.txt')
  
  def test_meta_analysis(self):
    """ Test full meta-analysis stream. """
    pass

  def test_decoder(self):
    pass

  def test_coactivation(self):
    """ Test seed-based coactivation. """ 
    pass

  def test_roi_averaging(self):
    pass

  def test_get_random_voxels(self):
    pass
开发者ID:wanirepo,项目名称:Neurosynth,代码行数:25,代码来源:test_analysis.py

示例7: get_test_dataset

# 需要导入模块: from neurosynth.base.dataset import Dataset [as 别名]
# 或者: from neurosynth.base.dataset.Dataset import add_features [as 别名]
def get_test_dataset():
    test_data_path = get_test_data_path()
    dataset = Dataset(test_data_path + 'test_dataset.txt')
    dataset.add_features(test_data_path + 'test_features.txt')
    return dataset
开发者ID:jdnc,项目名称:ml-project,代码行数:7,代码来源:utils.py

示例8: TestBase

# 需要导入模块: from neurosynth.base.dataset import Dataset [as 别名]
# 或者: from neurosynth.base.dataset.Dataset import add_features [as 别名]
class TestBase(unittest.TestCase):

  def setUp(self):
    """ Create a new Dataset and add features. """
    self.dataset = Dataset('data/test_dataset.txt')
    self.dataset.add_features('data/test_features.txt')
  
  def test_dataset_initializes(self):
    """ Test whether dataset initializes properly. """
    self.assertIsNotNone(self.dataset.volume)
    self.assertIsNotNone(self.dataset.image_table)
    self.assertEqual(len(self.dataset.mappables), 5)
    self.assertIsNotNone(self.dataset.volume)
    self.assertIsNotNone(self.dataset.r)

  def test_image_table_loads(self):
    """ Test ImageTable initialization. """
    self.assertIsNotNone(self.dataset.image_table)
    it = self.dataset.image_table
    self.assertEqual(len(it.ids), 5)
    self.assertIsNotNone(it.volume)
    self.assertIsNotNone(it.r)
    self.assertEqual(it.data.shape, (228453, 5))
    # Add tests for values in table

  def test_feature_table_loads(self):
    """ Test FeatureTable initialization. """
    tt = self.dataset.feature_table
    self.assertIsNotNone(tt)
    self.assertEqual(len(self.dataset.list_features()), 5)
    self.assertEqual(tt.data.shape, (5,5))
    self.assertEqual(tt.feature_names[3], 'f4')
    self.assertEqual(tt.data[0,0], 0.0003)

  def test_feature_search(self):
    """ Test feature-based Mappable search. Tests both the FeatureTable method 
    and the Dataset wrapper. """
    tt = self.dataset.feature_table
    features = tt.search_features(['f*'])
    self.assertEqual(len(features), 4)
    d = self.dataset
    ids = d.get_ids_by_features(['f*'], threshold=0.001)
    self.assertEqual(len(ids), 4)
    img_data = d.get_ids_by_features(['f1', 'f3', 'g1'], 0.001, func='max', get_image_data=True)
    self.assertEqual(img_data.shape, (228453, 5))

  def test_selection_by_mask(self):
    """ Test mask-based Mappable selection.
    Only one peak in the test dataset (in study5) should be within the sgACC. """
    ids = self.dataset.get_ids_by_mask('data/sgacc_mask.nii.gz')
    self.assertEquals(len(ids), 1)
    self.assertEquals('study5', ids[0])

  def test_selection_by_peaks(self):
    """ Test peak-based Mappable selection. """
    ids = self.dataset.get_ids_by_peaks(np.array([[3, 30, -9]]))
    self.assertEquals(len(ids), 1)
    self.assertEquals('study5', ids[0])
  
  # def test_invalid_coordinates_ignored(self):
    """ Test dataset contains 3 valid coordinates and one outside mask. But this won't work 
开发者ID:poldrack,项目名称:Neurosynth,代码行数:63,代码来源:test_base.py

示例9: Masker

# 需要导入模块: from neurosynth.base.dataset import Dataset [as 别名]
# 或者: from neurosynth.base.dataset.Dataset import add_features [as 别名]
base_path = '/home/pauli/Development/neurobabel/'
test_data_path = base_path + 'ACE/'
masker_filename = base_path + 'atlases/whs_sd/WHS_SD_rat_one_sm_v2.nii.gz'
atlas_filename = base_path + 'atlases/whs_sd/WHS_SD_rat_atlas_brain_sm_v2.nii.gz'
mask = nb.load(masker_filename)
masker = Masker(mask)
r = 1.0
# transform = {'BREGMA': transformations.bregma_to_whs()}
#transform = {'BREGMA': transformations.identity()}
transform = {'BREGMA': transformations.bregma_to_whs()}
target = 'WHS'

# load data set
dataset = Dataset(os.path.join(test_data_path, 'db_bregma_export.txt'), masker=masker_filename, r=r, transform=transform, target=target)
dataset.feature_table = FeatureTable(dataset)
dataset.add_features(os.path.join(test_data_path, "db_bregma_features.txt")) # add features
fn = dataset.get_feature_names()

def get_whs_labels(filename=os.path.join(base_path, "atlases/whs_sd/WHS_SD_rat_atlas_v2.label")):
    ''' load the names of all labelled areas in the atlas (e.g. brainstem), return list of them '''
    in_file = open(filename, 'r')
    lines = in_file.readlines()
    labels = {}
    for line in lines:
        start = line.find("\"") + 1
        if start > 0:
            stop = line.find("\"", start)
            label = line[start:stop]
            idx = line.split()[0]
            labels[label] = int(idx)
    in_file.close()
开发者ID:wmpauli,项目名称:neurosynth,代码行数:33,代码来源:create_bregma_dataset.py

示例10: __init__

# 需要导入模块: from neurosynth.base.dataset import Dataset [as 别名]
# 或者: from neurosynth.base.dataset.Dataset import add_features [as 别名]
class NeuroSynth:

    """Initialize Neurosynth Database"""

    def __init__(self, dbsize):
        print "Initializing Neurosynth database..."
        self.db = Dataset("data/" + str(dbsize) + "terms/database.txt")
        self.db.add_features("data/" + str(dbsize) + "terms/features.txt")
        self.ids = self.getIDs()
        self.decoder = None
        # self.masker = mask.Mask("data/X.nii.gz")

    """Do contrast analysis between two sets of """

    def neurosynthContrast(self, papers1, papers2, fdr, outdir=None, outprefix=None, image_list=None):

        # Do a meta analysis to contrast the two
        ma = meta.MetaAnalysis(self.db, papers1, papers2, q=float(fdr))
        if outdir:
            print "Saving results to %s" % (outdir)
            ma.save_results(outdir, prefix=outprefix, prefix_sep="_", image_list=image_list)
        return ma.images

    """Conduct meta analysis with particular set of ids"""

    def neurosynthMeta(self, papers, fdr, outdir=None, outprefix=None, image_list=None):
        # Get valid ids from user list
        valid_ids = self.get_valid_ids(papers)

        if len(valid_ids) > 0:
            # Do meta analysis
            ma = meta.MetaAnalysis(self.db, valid_ids, q=float(fdr))
            if outdir:
                print "Saving results to output directory %s" % (outdir)
                ma.save_results(outdir, prefix=outprefix, prefix_sep="_", image_list=image_list)
            return ma.images
        else:
            print "No studies found in database for ids in question!"

    """Return list of valid ids from user input"""

    def get_valid_ids(self, papers):
        # Input is DOI with list of papers
        valid_ids = [x for x in papers if int(x.strip(" ")) in self.ids]
        print "Found %s valid ids." % (str(len(valid_ids)))
        return valid_ids

    """Decode an image, return 100 results"""

    def decode(self, images, outfile, mrs=None, round=4):
        if not self.decoder:
            self.decoder = decode.Decoder(self.db)

        # If mrs is not specified, do decoding against neurosynth database
        if not mrs:
            result = self.decoder.decode(images, save=outfile)

        # If mrs is specified, do decoding against custom set of images
        else:
            # This is akin to traditional neurosynth method - pearson's r correlation
            imgs_to_compare = imageutils.load_imgs(mrs, self.masker)
            imgs_to_decode = imageutils.load_imgs(images, self.masker)
            x, y = imgs_to_compare.astype(float), imgs_to_decode.astype(float)
            x, y = x - x.mean(0), y - y.mean(0)
            x, y = x / np.sqrt((x ** 2).sum(0)), y / np.sqrt((y ** 2).sum(0))
            result = np.around(x.T.dot(y).T, round)
            features = [os.path.basename(m) for m in mrs]
            rownames = [os.path.basename(m) for m in images]
            df = pd.DataFrame(result, columns=features)
            df.index = rownames
            df.to_csv(outfile, sep="\t")
        return result

    """Return features in neurosynth database"""

    def getFeatures(self, dataset):
        return dataset.get_feature_names()

    """Extract pubmed IDs or dois from Neurosynth Database"""

    def getIDs(self):
        # Get all IDs in neuroSynth
        return self.db.image_table.ids

    """Extract author names for a given pmid or doi"""

    def getAuthor(self, db, id):
        article = self.db.get_mappables(id)
        meta = article[0].__dict__
        tmp = meta["data"]["authors"]
        tmp = tmp.split(",")
        authors = [x.strip("^ ") for x in tmp]
        return authors

    """Extract all author names in database"""

    def getAuthors(self, db):
        articles = db.mappables
        uniqueAuthors = []
        for a in articles:
#.........这里部分代码省略.........
开发者ID:vsoch,项目名称:brainbehavior,代码行数:103,代码来源:neurosyn.py

示例11: get_test_dataset

# 需要导入模块: from neurosynth.base.dataset import Dataset [as 别名]
# 或者: from neurosynth.base.dataset.Dataset import add_features [as 别名]
def get_test_dataset(prefix='test'):
    test_data_path = get_test_data_path()
    dataset = Dataset(test_data_path + '%s_dataset.txt' % prefix)
    dataset.add_features(test_data_path + '%s_features.txt' % prefix)
    return dataset
开发者ID:MQMQ0229,项目名称:neurosynth,代码行数:7,代码来源:utils.py

示例12: create_dataset

# 需要导入模块: from neurosynth.base.dataset import Dataset [as 别名]
# 或者: from neurosynth.base.dataset.Dataset import add_features [as 别名]
def create_dataset(database_location, feature_location):
	dataset = Dataset(database_location)
	dataset.add_features(feature_location)
	dataset.save('neurosynth-dataset.pkl')
	return dataset
开发者ID:acley,项目名称:neuro-data-matrix-factorization,代码行数:7,代码来源:neurosynthGlue.py

示例13: create_dataset

# 需要导入模块: from neurosynth.base.dataset import Dataset [as 别名]
# 或者: from neurosynth.base.dataset.Dataset import add_features [as 别名]
def create_dataset(database_location, feature_location):
	dataset = Dataset(database_location)
	dataset.add_features(feature_location)
	dataset.save('dataset-old.pkl')
        print 'created dataset'
	return dataset
开发者ID:acley,项目名称:neuro-data-matrix-factorization,代码行数:8,代码来源:voxel-x-feature-matrix.py

示例14: Dataset

# 需要导入模块: from neurosynth.base.dataset import Dataset [as 别名]
# 或者: from neurosynth.base.dataset.Dataset import add_features [as 别名]
from neurosynth.base.dataset import Dataset
from neurosynth.analysis import meta
import os
dataset = Dataset('database.txt')
dataset.add_features('features.txt')
print dataset.get_feature_names()
ids = dataset.get_ids_by_features('emo*', threshold=0.001)
print len(ids)
ma = meta.MetaAnalysis(dataset, ids)
ma.save_results('emotion')
开发者ID:MQMQ0229,项目名称:neurosynth,代码行数:12,代码来源:example.py

示例15: print

# 需要导入模块: from neurosynth.base.dataset import Dataset [as 别名]
# 或者: from neurosynth.base.dataset.Dataset import add_features [as 别名]
resource_dir = path.join(path.pardir, 'resources')

# make sure we have the data
dataset_dir = path.join(path.expanduser('~'), 'Documents', 'neurosynth-data')
database_path = path.join(dataset_dir, 'database_bregma.txt')
neurosynth_data_url = 'https://github.com/wmpauli/neurosynth-data'
if not path.exists(database_path):
    print("Please download dataset from %s and store it in %s" % (neurosynth_data_url, dataset_dir))

# load dataset, both image table and feature table
r = 1.0 # 1mm smoothing kernel
transform = {'BREGMA': transformations.bregma_to_whs()}
target = 'WHS'
masker_filename = path.join(resource_dir, 'WHS_SD_rat_brainmask_sm_v2.nii.gz')
dataset = Dataset(path.join(dataset_dir, 'database_bregma.txt'), masker=masker_filename, r=r, transform=transform, target=target)
dataset.feature_table = FeatureTable(dataset)
dataset.add_features(path.join(dataset_dir, "features_bregma.txt")) # add features
fn = dataset.get_feature_names()

# get the ids of studies where this feature occurs
ids = dataset.get_ids_by_features(('%s*' % feature), threshold=0.1)
ma = meta.MetaAnalysis(dataset, ids)
results_path = path.join('results', 'meta', feature)
if not path.exists(results_path):
    makedirs(results_path)

    print("saving results to: %s" % results_path)
ma.save_results(results_path)

# note, figure 2 of manuscript was used by plotting the z-score statistical maps for forward inference (pAgF_z.nii.gz) and reverse inference (pFgA_z.nii.gz)
开发者ID:wmpauli,项目名称:neurosynth,代码行数:32,代码来源:basic_ma.py


注:本文中的neurosynth.base.dataset.Dataset.add_features方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。