本文整理汇总了Python中neurosynth.base.dataset.Dataset.get_feature_names方法的典型用法代码示例。如果您正苦于以下问题:Python Dataset.get_feature_names方法的具体用法?Python Dataset.get_feature_names怎么用?Python Dataset.get_feature_names使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类neurosynth.base.dataset.Dataset
的用法示例。
在下文中一共展示了Dataset.get_feature_names方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: Dataset
# 需要导入模块: from neurosynth.base.dataset import Dataset [as 别名]
# 或者: from neurosynth.base.dataset.Dataset import get_feature_names [as 别名]
from neurosynth.base.dataset import Dataset
from neurosynth.analysis import meta
import os
dataset = Dataset('database.txt')
dataset.add_features('features.txt')
print dataset.get_feature_names()
ids = dataset.get_ids_by_features('emo*', threshold=0.001)
print len(ids)
ma = meta.MetaAnalysis(dataset, ids)
ma.save_results('emotion')
示例2: __init__
# 需要导入模块: from neurosynth.base.dataset import Dataset [as 别名]
# 或者: from neurosynth.base.dataset.Dataset import get_feature_names [as 别名]
class NeurosynthMerge:
def __init__(self, thesaurus, npath, outdir, test_mode=False):
"""
Generates a new set of images using the neurosynth repository combining
across terms in a thesarus.
Args:
- thesaurus: A list of tuples where:[('term that will be the name
of the file', 'the other term', 'expression combining the
terms')]
- the last expression is alphanumeric and separated by:
(& for and) (&~ for andnot) (| for or)
- npath: directory where the neurosynth git repository is locally
on your machine (https://github.com/neurosynth/neurosynth)
- outdir: directory where the generated images will be saved
- test_mode: when true, the code will run an abridged version for
test purposes (as implemented by test.Neurosynth.py)
"""
self.thesaurus = thesaurus
self.npath = npath
self.outdir = outdir
self.import_neurosynth_git()
from neurosynth.analysis import meta
# Take out first two terms from the feature_list and insert the third
# term from the tuple.
for triplet in thesaurus:
self.feature_list = [feature for feature in self.feature_list \
if feature not in triplet]
self.feature_list.append(triplet[-1])
# This makes an abridged version of feature_list for testing purposes.
if test_mode:
self.feature_list = [triplet[-1] for triplet in thesaurus]
# Run metanalyses on the new features set and save the results to the
#outdir.
for feature in self.feature_list:
self.ids = self.dataset.get_ids_by_expression(feature,
threshold=0.001)
ma = meta.MetaAnalysis(self.dataset, self.ids)
# Parse the feature name (to avoid conflicts with illegal
#characters as file names)
regex = re.compile('\W+')
split = re.split(regex, feature)
feat_fname = split[0]
# Save the results (many different types of files)
ma.save_results(self.outdir+os.sep+feat_fname)
def import_neurosynth_git(self):
# Add the appropriate neurosynth git folder to the python path.
sys.path.append(self.npath)
from neurosynth.base.dataset import Dataset
from neurosynth.analysis import meta
# Try to load a pickle if it exists. Create a new dataset instance
# if it doesn't.
try:
self.dataset = cPickle.load(
open(self.npath+os.sep+'data/dataset.pkl', 'rb'))
except IOError:
# Create Dataset instance from a database file.
self.dataset = Dataset(self.npath+os.sep+'data/database.txt')
# Load features from file
self.dataset.add_features(self.npath+os.sep+'data/features.txt')
# Get names of features.
self.feature_list = self.dataset.get_feature_names()
示例3: print
# 需要导入模块: from neurosynth.base.dataset import Dataset [as 别名]
# 或者: from neurosynth.base.dataset.Dataset import get_feature_names [as 别名]
resource_dir = path.join(path.pardir, 'resources')
# make sure we have the data
dataset_dir = path.join(path.expanduser('~'), 'Documents', 'neurosynth-data')
database_path = path.join(dataset_dir, 'database_bregma.txt')
neurosynth_data_url = 'https://github.com/wmpauli/neurosynth-data'
if not path.exists(database_path):
print("Please download dataset from %s and store it in %s" % (neurosynth_data_url, dataset_dir))
# load dataset, both image table and feature table
r = 1.0 # 1mm smoothing kernel
transform = {'BREGMA': transformations.bregma_to_whs()}
target = 'WHS'
masker_filename = path.join(resource_dir, 'WHS_SD_rat_brainmask_sm_v2.nii.gz')
dataset = Dataset(path.join(dataset_dir, 'database_bregma.txt'), masker=masker_filename, r=r, transform=transform, target=target)
dataset.feature_table = FeatureTable(dataset)
dataset.add_features(path.join(dataset_dir, "features_bregma.txt")) # add features
fn = dataset.get_feature_names()
# get the ids of studies where this feature occurs
ids = dataset.get_ids_by_features(('%s*' % feature), threshold=0.1)
ma = meta.MetaAnalysis(dataset, ids)
results_path = path.join('results', 'meta', feature)
if not path.exists(results_path):
makedirs(results_path)
print("saving results to: %s" % results_path)
ma.save_results(results_path)
# note, figure 2 of manuscript was used by plotting the z-score statistical maps for forward inference (pAgF_z.nii.gz) and reverse inference (pFgA_z.nii.gz)