本文整理汇总了Python中os.path._opj函数的典型用法代码示例。如果您正苦于以下问题:Python _opj函数的具体用法?Python _opj怎么用?Python _opj使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了_opj函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_dti
def get_dti(self, subj, instance=1):
"""Returns DTI image and b-values plus b-vectors
Parameters
----------
subj : int or str
Subject identifier (without 'sub' prefix).
instance : int
ID of the DTI dataset.
Returns
-------
NiBabel Nifti1Image, array, array
The first returned array the the vector of b-values for each volume
in the image file. The second array are the 3D b-vectors for each
volume in the image file.
"""
import nibabel as nb
path = _opj(self._basedir, _sub2id(subj), "dti")
return (
nb.load(_opj(path, "dti%.3i.nii.gz" % instance)),
np.loadtxt(_opj(path, "dti%.3i.bvals" % instance)),
np.loadtxt(_opj(path, "dti%.3i.bvecs" % instance)).T,
)
示例2: run_searchlight
def run_searchlight(op, subjectdir, conf, output_dir,TR=2):
mask_name = conf.mask_name
conditions = conf.conditions_to_compare
flavor = conf.flavor
study_path = op.study_dir()
subcode = subjectdir.subcode()
for condition in conditions:
did_run = True
output = _opj(output_dir, '*{}*'.format(conf.get_cond_prefix(condition)))
if conf.num_of_permutations > 0:
output = "{}_perm{}".format(output,conf.num_of_permutations)
if len(glob(output)) == 0:
did_run = False
if did_run:
print "already ran all sl for {}".format(output_dir)
return
fds = conf.get_ds(study_path, subcode, conf, mask_name, flavor, TR)
print fds.summary()
warp = glob(_opj(study_path,'sub{:0>3d}'.format(subcode), '**', conf.mvpa_tasks[0], 'reg', 'example_func2standard_warp.nii.gz'))[0]
if not os.path.exists(output_dir):
os.makedirs(output_dir)
for pair in conditions:
permute = AttributePermutator('condition', limit='chunks')
print conf.num_of_permutations+1
for j in xrange(conf.num_of_permutations+1):
prefix = conf.get_cond_prefix(pair)
cond_ds = fds[np.array([c in pair for c in fds.sa['condition']])]
if j > 0:
cond_ds = permute(cond_ds)
prefix = "{}_perm{}".format(prefix,j)
print prefix
output_basename = os.path.join(output_dir, prefix)
if(len(glob(output_basename+"*")) > 0):
print "sl already ran {}".format(j)
continue
kwa = {'voxel_indices': conf.get_neighbourhood_strategy(cond_ds)}
qe = IndexQueryEngine(**kwa)
# init the searchlight with the queryengine
sl = Searchlight(conf.get_sl_measure(), queryengine=qe, roi_ids=None,
enable_ca=['roi_sizes', 'roi_feature_ids'])
print "starting sl {}".format(datetime.now())
sl_map = sl(cond_ds)
print "finished sl {}".format(datetime.now())
pickle.dump(sl_map, open("{}_sl_map.p".format(output_basename), "wb"))
acc_results = map2nifti(sl_map,
imghdr=fds.a.imghdr)
acc_nii_filename = '{}-acc.nii.gz'.format(output_basename)
acc_results.to_filename(acc_nii_filename)
#do_searchlight(cond_ds,k,os.path.join(output_dir, prefix))
out_filename = acc_nii_filename.replace('.nii.gz', '_mni.nii.gz')
apply_warp(acc_nii_filename, warp, out_filename)
示例3: perm_hist
def perm_hist(subj):
conf = AnalysisConfiguration()
data_dir = os.environ.get('DATA_DIR') or '/home/user/data'
sub_dir = _opj(data_dir,conf.study_name,'sub{:0>3d}'.format(subj))
directory = _opj(data_dir,'LP/sub{:0>3d}/results/'.format(subj))
print conf.dir_name()
for pair in conf.conditions_to_compare:
#print _opj(directory,conf.dir_name(),'{}*{}{}*.p'.format(conf.mask_name,pair[0],pair[1]))
files = sorted(glob(_opj(directory,conf.dir_name(),'{}*{}{}*.p'.format(conf.mask_name,pair[0],pair[1]))))
plt.figure()
plt.subplot(211)
plt.title('sub{:0>3d}-{}{}'.format(subj,pair[0],pair[1]))
print pair, " ", len(files)
all_maps = []
for f in files[:-1]:
f_h = file(f,'r')
m = pickle.load(f_h)
all_maps.append(m)
if 'perm' in f:
color = 'black'
line_width = 1
else:
color = 'crimson'
line_width = 2
plt.hist(np.transpose(m),bins=20,histtype='step',color=[color], lw = line_width)
perms = vstack(all_maps)
real_f = files[-1]
f_h = file(real_f,'r')
real_map = pickle.load(f_h)
color = 'crimson'
line_width = 2
plt.hist(np.transpose(real_map),bins=20,histtype='step',color=[color], lw = line_width)
percentiles = np.zeros((1,len(real_map.samples[0])))
for i,vox in enumerate(real_map.samples[0]):
percentiles[0,i]=percentileofscore(perms[:,i].samples.flat,vox)
plt.subplot(212)
print len(percentiles[0])
plt.hist(percentiles[0],bins=20,histtype='step')
real_map.samples=percentiles
nii = real_f.replace("_sl_map.p", "-acc.nii.gz")
nii_file = nib.load(nii)
perc_results = map2nifti(real_map, imghdr=nii_file.header)
perc_nii_filename =real_f.replace("_sl_map.p", "-percentiles_sub{:0>3d}.nii.gz".format(subj))
perc_results.to_filename(perc_nii_filename)
thr_prc_filename = perc_nii_filename.replace(".nii.gz","_p0.01.nii.gz")
thr = fsl.maths.Threshold(in_file=perc_nii_filename, thresh=100,
out_file=thr_prc_filename)
thr.run()
mni_thr_filename = thr_prc_filename.replace(".nii.gz","_mni.nii.gz")
apply_warp(sub_dir,thr_prc_filename, mni_thr_filename)
plt.show()
#plt.savefig('/tmp/sub{:0>3d}_{}{}'.format(subj,pair[0],pair[1]))
raw_input()
示例4: process_files
def process_files(prefix, output_dir, thr,all_file, avg_file):
from scipy import ndimage
data = avg_file.get_data()
cluster_map, n_clusters = ndimage.label(data > thr)
output_file = _opj(output_dir, "{}_thr_{}.nii.gz".format(prefix, thr))
nib.save(nib.Nifti1Image(cluster_map, None, avg_file.header), output_file)
data = all_file.get_data()
thr_data = data > thr
res = np.sum(thr_data, 3)
output_file = _opj(output_dir, "{}_sum_thr_{}.nii.gz".format(prefix, thr))
nib.save(nib.Nifti1Image(res, None, avg_file.header), output_file)
return output_file
示例5: main
def main():
conf = AnalysisConfiguration()
data_dir = os.environ.get('DATA_DIR') or '/home/user/data'
op = OpenFMRIData(data_dir, conf.study_name)
analyzer = OpenFMRIAnalyzer(op, conf)
all_subject_dirs = op.all_subjects_dirs_with_raw()
for subject in all_subject_dirs:
analyzer.extract_brain(subject)
for subject in all_subject_dirs:
analyzer.anatomical_registration(subject)
for subject in all_subject_dirs:
#for task in conf.mvpa_tasks:
#subject.remove_volumes_from_model(1, "", task, conf.num_of_volumes_to_delete)
analyzer.motion_correction(subject)
analyzer.functional_registration(subject)
if conf.func_seg:
analyzer.functional_segmentation(subject)
else:
analyzer.segmentation(subject)
analyzer.generate_functional_gm_masks(subject)
#analyzer.warp_standard_mask(subject)
for subject in all_subject_dirs:
# DO SL
out_dir = _opj(subject.path(),'results',conf.dir_name())
if not os.path.exists(out_dir):
os.makedirs(out_dir)
run_searchlight(op, subject, conf, out_dir)
# run_searchlight(op.study_dir(), subject.subcode(), mask_name, k, [['G1','G4']], out_dir,flavor)
#Group Level
output_dir = _opj(op.study_dir(), 'results', "{}".format(conf.dir_name()))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
files = glob(_opj(op.study_dir(), "**", 'results', conf.dir_name(), '*acc_mni.nii.gz'))
print files
generate_group_level_map( files, output_dir)
示例6: calc_summary_niis
def calc_summary_niis(in_files, output_dir, prefix):
all_file = _opj(output_dir, '{}_all.nii.gz'.format(prefix))
avg_file = _opj(output_dir, '{}_avg.nii.gz'.format(prefix))
merge = fsl.Merge(in_files=in_files,
dimension='t',
merged_file=all_file)
merge.run()
mean = fsl.maths.MeanImage(in_file=all_file, dimension='T', out_file=avg_file)
mean.run()
all_nii = nib.load(all_file)
avg_nii = nib.load(avg_file)
all_data = all_nii.get_data()
med_data = np.median(all_data,3)
output_file = _opj(output_dir, "{}_median.nii.gz".format(prefix))
nib.save(nib.Nifti1Image(med_data, None, avg_nii.header), output_file)
return all_nii, avg_nii
示例7: get_run_fmri
def get_run_fmri(self, subj, task, run, flavor="dico"):
"""Returns a NiBabel image instance for fMRI of a particular run
Parameters
----------
subj : int or str
Subject identifier (without 'sub' prefix).
task : int
Task ID (see task_key.txt)
run : int
Run ID.
flavor : ('', 'dico', 'dico7Tad2grpbold7Tad', 'dico7Tad2grpbold7Tad_nl')
fMRI data flavor to access (see dataset description)
Returns
-------
NiBabel Nifti1Image
"""
import nibabel as nb
if flavor == "":
fname = "bold.nii.gz"
elif flavor == "dico":
fname = "bold_dico.nii.gz"
else:
fname = "bold_%s.nii.gz" % flavor
fname = _opj(self._basedir, _sub2id(subj), "BOLD", _taskrun(task, run), fname)
return nb.load(fname)
示例8: get_model_conditions
def get_model_conditions(self, model):
"""Returns a description of all conditions for a given model
Parameters
----------
model : int
Model identifier.
Returns
-------
list(dict)
A list of a model conditions is returned, where each item is a
dictionary with keys ``id`` (numerical condition ID), ``task``
(numerical task ID for the task containing this condition), and
``name`` (the literal condition name). This information is
returned in a list (instead of a dictionary), because the openfmri
specification of model conditions contains no unique condition
identifier. Conditions are only uniquely described by the combination
of task and condition ID.
"""
def_fname = _opj(self._basedir, 'models', _model2id(model),
'condition_key.txt')
def_data = np.recfromtxt(def_fname)
conds = []
# load model meta data
for dd in def_data:
cond = {}
cond['task'] = _id2int(dd[0])
cond['id'] = _id2int(dd[1])
cond['name'] = dd[2]
conds.append(cond)
return conds
示例9: get_bold_run_image
def get_bold_run_image(self, subj, task, run, flavor=None):
"""Returns a NiBabel image instance for the BOLD data of a
particular subject/task/run combination.
Parameters
----------
subj : int
Subject identifier.
task : int
Task ID (see task_key.txt)
run : int
Run ID.
flavor : None or str
BOLD data flavor to access (see dataset description)
Returns
-------
NiBabel Nifti1Image
"""
import nibabel as nb
if flavor is None:
flavor = ''
else:
flavor = '_' + flavor
fname = 'bold%s.nii.gz' % flavor
fname = _opj(self._basedir, _sub2id(subj),
'BOLD', _taskrun(task, run),
fname)
return nb.load(fname)
示例10: get_run_physio_data
def get_run_physio_data(self, subj, task, run, sensors=None):
"""Returns the physiological recording for a particular run
Parameters
----------
subj : int or str
Subject identifier (without 'sub' prefix).
task : int
Task ID (see task_key.txt)
run : int
Run ID.
sensors : None or tuple({'trigger', 'respiratory', 'cardiac', 'oxygen'})
Selection and order of values to return.
Returns
-------
array
Array of floats -- one row per sample (100Hz), if ``sensors`` is None,
4 columns are returned (trigger track, respiratory trace, cardiac trace,
oxygen saturation). If ``sensors`` is specified the order of columns
matches the order of the ``sensors`` sequence.
"""
fname = _opj(self._basedir, _sub2id(subj), "physio", _taskrun(task, run), "physio.txt.gz")
sensor_map = {"trigger": 0, "respiratory": 1, "cardiac": 2, "oxygen": 3}
if not sensors is None:
sensors = [sensor_map[s] for s in sensors]
data = np.loadtxt(fname, usecols=sensors)
return data
示例11: get_task_descriptions
def get_task_descriptions(self):
"""Returns a dictionary with the tasks defined in the dataset
Dictionary keys are integer task IDs, values are task description
strings.
"""
fname = _opj(self._basedir, 'task_key.txt')
return _get_description_dict(fname, xfm_key=_id2int)
示例12: _subdirs2ids
def _subdirs2ids(path, prefix, **kwargs):
ids = []
if not os.path.exists(path):
return ids
for item in os.listdir(path):
if item.startswith(prefix) and os.path.isdir(_opj(path, item)):
ids.append(_id2int(item, **kwargs))
return sorted(ids)
示例13: get_angio
def get_angio(self, subj, instance=1):
"""Returns angiography scan.
Parameters
----------
subj : int or str
Subject identifier (without 'sub' prefix).
instance : int
ID of the angio dataset.
Returns
-------
NiBabel Nifti1Image
"""
import nibabel as nb
path = _opj(self._basedir, _sub2id(subj), "angio")
return nb.load(_opj(path, "angio%.3i.nii.gz" % instance))
示例14: get_t2
def get_t2(self, subj, instance=1):
"""Returns T2-weighted scan.
Parameters
----------
subj : int or str
Subject identifier (without 'sub' prefix).
instance : int
ID of the T2 dataset.
Returns
-------
NiBabel Nifti1Image
"""
import nibabel as nb
path = _opj(self._basedir, _sub2id(subj), "anatomy", "other")
return nb.load(_opj(path, "t2w%.3i.nii.gz" % instance))
示例15: _subdirs2ids
def _subdirs2ids(path, prefix, **kwargs):
# num_ids to separate sorting of numeric and literal ids
ids, num_ids = [], []
if not os.path.exists(path):
return ids
for item in os.listdir(path):
if item.startswith(prefix) and os.path.isdir(_opj(path, item)):
id_ = _id2int(item, **kwargs)
(num_ids if isinstance(id_, (np.integer, int)) else ids).append(id_)
return sorted(num_ids) + sorted(ids)