本文整理汇总了Python中nipype.Node.iterables方法的典型用法代码示例。如果您正苦于以下问题:Python Node.iterables方法的具体用法?Python Node.iterables怎么用?Python Node.iterables使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类nipype.Node
的用法示例。
在下文中一共展示了Node.iterables方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create_surface_projection_workflow
# 需要导入模块: from nipype import Node [as 别名]
# 或者: from nipype.Node import iterables [as 别名]
def create_surface_projection_workflow(name="surfproj", exp_info=None):
"""Project the group mask and thresholded zstat file onto the surface."""
if exp_info is None:
exp_info = lyman.default_experiment_parameters()
inputnode = Node(IdentityInterface(["zstat_file", "mask_file"]), "inputs")
# Sample the zstat image to the surface
hemisource = Node(IdentityInterface(["mni_hemi"]), "hemisource")
hemisource.iterables = ("mni_hemi", ["lh", "rh"])
zstatproj = Node(freesurfer.SampleToSurface(
sampling_method=exp_info["sampling_method"],
sampling_range=exp_info["sampling_range"],
sampling_units=exp_info["sampling_units"],
smooth_surf=exp_info["surf_smooth"],
subject_id="fsaverage",
mni152reg=True,
target_subject="fsaverage"),
"zstatproj")
# Sample the mask to the surface
maskproj = Node(freesurfer.SampleToSurface(
sampling_range=exp_info["sampling_range"],
sampling_units=exp_info["sampling_units"],
subject_id="fsaverage",
mni152reg=True,
target_subject="fsaverage"),
"maskproj")
if exp_info["sampling_method"] == "point":
maskproj.inputs.sampling_method = "point"
else:
maskproj.inputs.sampling_method = "max"
outputnode = Node(IdentityInterface(["surf_zstat",
"surf_mask"]), "outputs")
# Define and connect the workflow
proj = Workflow(name)
proj.connect([
(inputnode, zstatproj,
[("zstat_file", "source_file")]),
(inputnode, maskproj,
[("mask_file", "source_file")]),
(hemisource, zstatproj,
[("mni_hemi", "hemi")]),
(hemisource, maskproj,
[("mni_hemi", "hemi")]),
(zstatproj, outputnode,
[("out_file", "surf_zstat")]),
(maskproj, outputnode,
[("out_file", "surf_mask")]),
])
return proj
示例2: create_workflow
# 需要导入模块: from nipype import Node [as 别名]
# 或者: from nipype.Node import iterables [as 别名]
#.........这里部分代码省略.........
# Sample the average time series in aparc ROIs
sampleaparc = MapNode(freesurfer.SegStats(default_color_table=True),
iterfield=['in_file', 'summary_file',
'avgwf_txt_file'],
name='aparc_ts')
sampleaparc.inputs.segment_id = ([8] + list(range(10, 14)) + [17, 18, 26, 47] +
list(range(49, 55)) + [58] + list(range(1001, 1036)) +
list(range(2001, 2036)))
wf.connect(registration, 'outputspec.aparc',
sampleaparc, 'segmentation_file')
wf.connect(collector, 'out', sampleaparc, 'in_file')
def get_names(files, suffix):
"""Generate appropriate names for output files
"""
from nipype.utils.filemanip import (split_filename, filename_to_list,
list_to_filename)
import os
out_names = []
for filename in files:
path, name, _ = split_filename(filename)
out_names.append(os.path.join(path, name + suffix))
return list_to_filename(out_names)
wf.connect(collector, ('out', get_names, '_avgwf.txt'),
sampleaparc, 'avgwf_txt_file')
wf.connect(collector, ('out', get_names, '_summary.stats'),
sampleaparc, 'summary_file')
# Sample the time series onto the surface of the target surface. Performs
# sampling into left and right hemisphere
target = Node(IdentityInterface(fields=['target_subject']), name='target')
target.iterables = ('target_subject', filename_to_list(target_subject))
samplerlh = MapNode(freesurfer.SampleToSurface(),
iterfield=['source_file'],
name='sampler_lh')
samplerlh.inputs.sampling_method = "average"
samplerlh.inputs.sampling_range = (0.1, 0.9, 0.1)
samplerlh.inputs.sampling_units = "frac"
samplerlh.inputs.interp_method = "trilinear"
samplerlh.inputs.smooth_surf = surf_fwhm
# samplerlh.inputs.cortex_mask = True
samplerlh.inputs.out_type = 'niigz'
samplerlh.inputs.subjects_dir = subjects_dir
samplerrh = samplerlh.clone('sampler_rh')
samplerlh.inputs.hemi = 'lh'
wf.connect(collector, 'out', samplerlh, 'source_file')
wf.connect(registration, 'outputspec.out_reg_file', samplerlh, 'reg_file')
wf.connect(target, 'target_subject', samplerlh, 'target_subject')
samplerrh.set_input('hemi', 'rh')
wf.connect(collector, 'out', samplerrh, 'source_file')
wf.connect(registration, 'outputspec.out_reg_file', samplerrh, 'reg_file')
wf.connect(target, 'target_subject', samplerrh, 'target_subject')
# Combine left and right hemisphere to text file
combiner = MapNode(Function(input_names=['left', 'right'],
output_names=['out_file'],
function=combine_hemi,
imports=imports),
iterfield=['left', 'right'],
name="combiner")
示例3: group_onesample_openfmri
# 需要导入模块: from nipype import Node [as 别名]
# 或者: from nipype.Node import iterables [as 别名]
def group_onesample_openfmri(dataset_dir,model_id=None,task_id=None,l1output_dir=None,out_dir=None, no_reversal=False):
wk = Workflow(name='one_sample')
wk.base_dir = os.path.abspath(work_dir)
info = Node(util.IdentityInterface(fields=['model_id','task_id','dataset_dir']),
name='infosource')
info.inputs.model_id=model_id
info.inputs.task_id=task_id
info.inputs.dataset_dir=dataset_dir
num_copes=contrasts_num(model_id,task_id,dataset_dir)
dg = Node(DataGrabber(infields=['model_id','task_id','cope_id'],
outfields=['copes', 'varcopes']),name='grabber')
dg.inputs.template = os.path.join(l1output_dir,'model%03d/task%03d/*/%scopes/mni/%scope%02d.nii.gz')
dg.inputs.template_args['copes'] = [['model_id','task_id','', '', 'cope_id']]
dg.inputs.template_args['varcopes'] = [['model_id','task_id','var', 'var', 'cope_id']]
dg.iterables=('cope_id',num_copes)
dg.inputs.sort_filelist = True
wk.connect(info,'model_id',dg,'model_id')
wk.connect(info,'task_id',dg,'task_id')
model = Node(L2Model(), name='l2model')
wk.connect(dg, ('copes', get_len), model, 'num_copes')
mergecopes = Node(Merge(dimension='t'), name='merge_copes')
wk.connect(dg, 'copes', mergecopes, 'in_files')
mergevarcopes = Node(Merge(dimension='t'), name='merge_varcopes')
wk.connect(dg, 'varcopes', mergevarcopes, 'in_files')
mask_file = fsl.Info.standard_image('MNI152_T1_2mm_brain_mask.nii.gz')
flame = Node(FLAMEO(), name='flameo')
flame.inputs.mask_file = mask_file
flame.inputs.run_mode = 'flame1'
wk.connect(model, 'design_mat', flame, 'design_file')
wk.connect(model, 'design_con', flame, 't_con_file')
wk.connect(mergecopes, 'merged_file', flame, 'cope_file')
wk.connect(mergevarcopes, 'merged_file', flame, 'var_cope_file')
wk.connect(model, 'design_grp', flame, 'cov_split_file')
smoothest = Node(SmoothEstimate(), name='smooth_estimate')
wk.connect(flame, 'zstats', smoothest, 'zstat_file')
smoothest.inputs.mask_file = mask_file
cluster = Node(Cluster(), name='cluster')
wk.connect(smoothest,'dlh', cluster, 'dlh')
wk.connect(smoothest, 'volume', cluster, 'volume')
cluster.inputs.connectivity = 26
cluster.inputs.threshold=2.3
cluster.inputs.pthreshold = 0.05
cluster.inputs.out_threshold_file = True
cluster.inputs.out_index_file = True
cluster.inputs.out_localmax_txt_file = True
wk.connect(flame, 'zstats', cluster, 'in_file')
ztopval = Node(ImageMaths(op_string='-ztop', suffix='_pval'),
name='z2pval')
wk.connect(flame, 'zstats', ztopval,'in_file')
sinker = Node(DataSink(), name='sinker')
sinker.inputs.base_directory = os.path.abspath(out_dir)
sinker.inputs.substitutions = [('_cope_id', 'contrast'),
('_maths__', '_reversed_')]
wk.connect(flame, 'zstats', sinker, 'stats')
wk.connect(cluster, 'threshold_file', sinker, '[email protected]')
wk.connect(cluster, 'index_file', sinker, '[email protected]')
wk.connect(cluster, 'localmax_txt_file', sinker, '[email protected]')
if no_reversal == False:
zstats_reverse = Node( BinaryMaths() , name='zstats_reverse')
zstats_reverse.inputs.operation = 'mul'
zstats_reverse.inputs.operand_value= -1
wk.connect(flame, 'zstats', zstats_reverse, 'in_file')
cluster2=cluster.clone(name='cluster2')
wk.connect(smoothest,'dlh',cluster2,'dlh')
wk.connect(smoothest,'volume',cluster2,'volume')
wk.connect(zstats_reverse,'out_file',cluster2,'in_file')
ztopval2 = ztopval.clone(name='ztopval2')
wk.connect(zstats_reverse,'out_file',ztopval2,'in_file')
wk.connect(zstats_reverse,'out_file',sinker,'[email protected]')
wk.connect(cluster2,'threshold_file',sinker,'[email protected]_thr')
wk.connect(cluster2,'index_file',sinker,'[email protected]_index')
wk.connect(cluster2,'localmax_txt_file',sinker,'[email protected]_localmax')
return wk
示例4: group_multregress_openfmri
# 需要导入模块: from nipype import Node [as 别名]
# 或者: from nipype.Node import iterables [as 别名]
def group_multregress_openfmri(dataset_dir, model_id=None, task_id=None, l1output_dir=None, out_dir=None,
no_reversal=False, plugin=None, plugin_args=None, flamemodel='flame1',
nonparametric=False, use_spm=False):
meta_workflow = Workflow(name='mult_regress')
meta_workflow.base_dir = work_dir
for task in task_id:
task_name = get_taskname(dataset_dir, task)
cope_ids = l1_contrasts_num(model_id, task_name, dataset_dir)
regressors_needed, contrasts, groups, subj_list = get_sub_vars(dataset_dir, task_name, model_id)
for idx, contrast in enumerate(contrasts):
wk = Workflow(name='model_%03d_task_%03d_contrast_%s' % (model_id, task, contrast[0][0]))
info = Node(util.IdentityInterface(fields=['model_id', 'task_id', 'dataset_dir', 'subj_list']),
name='infosource')
info.inputs.model_id = model_id
info.inputs.task_id = task
info.inputs.dataset_dir = dataset_dir
dg = Node(DataGrabber(infields=['model_id', 'task_id', 'cope_id'],
outfields=['copes', 'varcopes']), name='grabber')
dg.inputs.template = os.path.join(l1output_dir,
'model%03d/task%03d/%s/%scopes/%smni/%scope%02d.nii%s')
if use_spm:
dg.inputs.template_args['copes'] = [['model_id', 'task_id', subj_list, '', 'spm/',
'', 'cope_id', '']]
dg.inputs.template_args['varcopes'] = [['model_id', 'task_id', subj_list, 'var', 'spm/',
'var', 'cope_id', '.gz']]
else:
dg.inputs.template_args['copes'] = [['model_id', 'task_id', subj_list, '', '', '',
'cope_id', '.gz']]
dg.inputs.template_args['varcopes'] = [['model_id', 'task_id', subj_list, 'var', '',
'var', 'cope_id', '.gz']]
dg.iterables=('cope_id', cope_ids)
dg.inputs.sort_filelist = False
wk.connect(info, 'model_id', dg, 'model_id')
wk.connect(info, 'task_id', dg, 'task_id')
model = Node(MultipleRegressDesign(), name='l2model')
model.inputs.groups = groups
model.inputs.contrasts = contrasts[idx]
model.inputs.regressors = regressors_needed[idx]
mergecopes = Node(Merge(dimension='t'), name='merge_copes')
wk.connect(dg, 'copes', mergecopes, 'in_files')
if flamemodel != 'ols':
mergevarcopes = Node(Merge(dimension='t'), name='merge_varcopes')
wk.connect(dg, 'varcopes', mergevarcopes, 'in_files')
mask_file = fsl.Info.standard_image('MNI152_T1_2mm_brain_mask.nii.gz')
flame = Node(FLAMEO(), name='flameo')
flame.inputs.mask_file = mask_file
flame.inputs.run_mode = flamemodel
#flame.inputs.infer_outliers = True
wk.connect(model, 'design_mat', flame, 'design_file')
wk.connect(model, 'design_con', flame, 't_con_file')
wk.connect(mergecopes, 'merged_file', flame, 'cope_file')
if flamemodel != 'ols':
wk.connect(mergevarcopes, 'merged_file', flame, 'var_cope_file')
wk.connect(model, 'design_grp', flame, 'cov_split_file')
if nonparametric:
palm = Node(Function(input_names=['cope_file', 'design_file', 'contrast_file',
'group_file', 'mask_file', 'cluster_threshold'],
output_names=['palm_outputs'],
function=run_palm),
name='palm')
palm.inputs.cluster_threshold = 3.09
palm.inputs.mask_file = mask_file
palm.plugin_args = {'sbatch_args': '-p om_all_nodes -N1 -c2 --mem=10G', 'overwrite': True}
wk.connect(model, 'design_mat', palm, 'design_file')
wk.connect(model, 'design_con', palm, 'contrast_file')
wk.connect(mergecopes, 'merged_file', palm, 'cope_file')
wk.connect(model, 'design_grp', palm, 'group_file')
smoothest = Node(SmoothEstimate(), name='smooth_estimate')
wk.connect(flame, 'zstats', smoothest, 'zstat_file')
smoothest.inputs.mask_file = mask_file
cluster = Node(Cluster(), name='cluster')
wk.connect(smoothest,'dlh', cluster, 'dlh')
wk.connect(smoothest, 'volume', cluster, 'volume')
cluster.inputs.connectivity = 26
cluster.inputs.threshold = 2.3
cluster.inputs.pthreshold = 0.05
cluster.inputs.out_threshold_file = True
cluster.inputs.out_index_file = True
cluster.inputs.out_localmax_txt_file = True
wk.connect(flame, 'zstats', cluster, 'in_file')
ztopval = Node(ImageMaths(op_string='-ztop', suffix='_pval'),
name='z2pval')
wk.connect(flame, 'zstats', ztopval,'in_file')
sinker = Node(DataSink(), name='sinker')
sinker.inputs.base_directory = os.path.join(out_dir, 'task%03d' % task, contrast[0][0])
#.........这里部分代码省略.........
示例5: create_workflow
# 需要导入模块: from nipype import Node [as 别名]
# 或者: from nipype.Node import iterables [as 别名]
#.........这里部分代码省略.........
bandpass.inputs.highpass_sigma = 1. / (2 * TR * highpass_freq)
if lowpass_freq < 0:
bandpass.inputs.lowpass_sigma = -1
else:
bandpass.inputs.lowpass_sigma = 1. / (2 * TR * lowpass_freq)
wf.connect(smooth, 'smoothed_file', bandpass, 'in_file')
# Convert aparc to subject functional space
aparctransform = wmcsftransform.clone("aparctransform")
if fieldmap_images:
wf.connect(fieldmap, 'exf_mask', aparctransform, 'source_file')
else:
wf.connect(calc_median, 'median_file', aparctransform, 'source_file')
wf.connect(register, 'out_reg_file', aparctransform, 'reg_file')
wf.connect(fssource, ('aparc_aseg', get_aparc_aseg),
aparctransform, 'target_file')
# Sample the average time series in aparc ROIs
sampleaparc = MapNode(freesurfer.SegStats(avgwf_txt_file=True,
default_color_table=True),
iterfield=['in_file'],
name='aparc_ts')
sampleaparc.inputs.segment_id = ([8] + range(10, 14) + [17, 18, 26, 47] +
range(49, 55) + [58] + range(1001, 1036) +
range(2001, 2036))
wf.connect(aparctransform, 'transformed_file',
sampleaparc, 'segmentation_file')
wf.connect(bandpass, 'out_file', sampleaparc, 'in_file')
# Sample the time series onto the surface of the target surface. Performs
# sampling into left and right hemisphere
target = Node(IdentityInterface(fields=['target_subject']), name='target')
target.iterables = ('target_subject', filename_to_list(target_subject))
samplerlh = MapNode(freesurfer.SampleToSurface(),
iterfield=['source_file'],
name='sampler_lh')
samplerlh.inputs.sampling_method = "average"
samplerlh.inputs.sampling_range = (0.1, 0.9, 0.1)
samplerlh.inputs.sampling_units = "frac"
samplerlh.inputs.interp_method = "trilinear"
#samplerlh.inputs.cortex_mask = True
samplerlh.inputs.out_type = 'niigz'
samplerlh.inputs.subjects_dir = os.environ['SUBJECTS_DIR']
samplerrh = samplerlh.clone('sampler_rh')
samplerlh.inputs.hemi = 'lh'
wf.connect(bandpass, 'out_file', samplerlh, 'source_file')
wf.connect(register, 'out_reg_file', samplerlh, 'reg_file')
wf.connect(target, 'target_subject', samplerlh, 'target_subject')
samplerrh.set_input('hemi', 'rh')
wf.connect(bandpass, 'out_file', samplerrh, 'source_file')
wf.connect(register, 'out_reg_file', samplerrh, 'reg_file')
wf.connect(target, 'target_subject', samplerrh, 'target_subject')
# Combine left and right hemisphere to text file
combiner = MapNode(Function(input_names=['left', 'right'],
output_names=['out_file'],
function=combine_hemi,
imports=imports),
iterfield=['left', 'right'],
name="combiner")
wf.connect(samplerlh, 'out_file', combiner, 'left')
示例6: dict
# 需要导入模块: from nipype import Node [as 别名]
# 或者: from nipype.Node import iterables [as 别名]
from nipype import Node, Function, Workflow, IdentityInterface
from nipype.interfaces.freesurfer import ReconAll
from nipype.interfaces.io import DataGrabber
#curr_dir_age = 'cmind_age00_raw'
#data_dir = '/home/data/madlab/data/mri/cmind/raw_data'
#sids = os.listdir('%s/%s' % (data_dir, curr_dir_age))
#sids = sids [:-1] #REMOVES THE .tar file
sids = ['783125', '783126', '783127', '783128', '783129', '783131', '783132', '783133']
info = dict(T1=[['subject_id']])
infosource = Node(IdentityInterface(fields=['subject_id']), name='infosource')
infosource.iterables = ('subject_id', sids)
# Create a datasource node to get the T1 file
datasource = Node(DataGrabber(infields=['subject_id'],outfields=info.keys()),name = 'datasource')
datasource.inputs.template = '%s/%s'
datasource.inputs.base_directory = os.path.abspath('/home/data/madlab/data/mri/seqtrd/')
datasource.inputs.field_template = dict(T1='%s/anatomy/T1_*.nii.gz')
datasource.inputs.template_args = info
datasource.inputs.sort_filelist = True
reconall_node = Node(ReconAll(), name='reconall_node')
reconall_node.inputs.openmp = 2
reconall_node.inputs.subjects_dir = os.environ['SUBJECTS_DIR']
reconall_node.inputs.terminal_output = 'allatonce'
reconall_node.plugin_args={'bsub_args': ('-q PQ_madlab -n 2'), 'overwrite': True}
示例7: create_surface_ols_workflow
# 需要导入模块: from nipype import Node [as 别名]
# 或者: from nipype.Node import iterables [as 别名]
def create_surface_ols_workflow(name="surface_group",
subject_list=None,
exp_info=None):
"""Workflow to project ffx copes onto surface and run ols."""
if subject_list is None:
subject_list = []
if exp_info is None:
exp_info = lyman.default_experiment_parameters()
inputnode = Node(IdentityInterface(["l1_contrast",
"copes",
"reg_file",
"subject_id"]),
"inputnode")
hemisource = Node(IdentityInterface(["hemi"]), "hemisource")
hemisource.iterables = ("hemi", ["lh", "rh"])
# Sample the volume-encoded native data onto the fsaverage surface
# manifold with projection + spherical transform
surfsample = MapNode(fs.SampleToSurface(
sampling_method=exp_info["sampling_method"],
sampling_range=exp_info["sampling_range"],
sampling_units=exp_info["sampling_units"],
smooth_surf=exp_info["surf_smooth"],
target_subject="fsaverage"),
["subject_id", "reg_file", "source_file"], "surfsample")
# Remove subjects with completely empty images
removeempty = Node(RemoveEmpty(), "removeempty")
# Concatenate the subject files into a 4D image
mergecope = Node(fs.Concatenate(), "mergecope")
# Run the one-sample OLS model
glmfit = Node(fs.GLMFit(one_sample=True,
surf=True,
cortex=True,
glm_dir="_glm_results",
subject_id="fsaverage"),
"glmfit")
# Use the cached Monte-Carlo simulations for correction
cluster = Node(Function(["y_file",
"glm_dir",
"sign",
"cluster_zthresh",
"p_thresh"],
["glm_dir",
"thresholded_file"],
glm_corrections,
imports),
"cluster")
cluster.inputs.cluster_zthresh = exp_info["cluster_zthresh"]
cluster.inputs.p_thresh = exp_info["grf_pthresh"]
cluster.inputs.sign = exp_info["surf_corr_sign"]
# Return the outputs
outputnode = Node(IdentityInterface(["glm_dir", "sig_file"]), "outputnode")
# Define and connect the workflow
group = Workflow(name)
group.connect([
(inputnode, surfsample,
[("copes", "source_file"),
("reg_file", "reg_file"),
("subject_id", "subject_id")]),
(hemisource, surfsample,
[("hemi", "hemi")]),
(surfsample, removeempty,
[("out_file", "in_files")]),
(removeempty, mergecope,
[("out_files", "in_files")]),
(mergecope, glmfit,
[("concatenated_file", "in_file")]),
(hemisource, glmfit,
[("hemi", "hemi")]),
(mergecope, cluster,
[("concatenated_file", "y_file")]),
(glmfit, cluster,
[("glm_dir", "glm_dir")]),
(glmfit, outputnode,
[("glm_dir", "glm_dir")]),
(cluster, outputnode,
[("thresholded_file", "sig_file")]),
])
return group, inputnode, outputnode
示例8: create_surfdist_workflow
# 需要导入模块: from nipype import Node [as 别名]
# 或者: from nipype.Node import iterables [as 别名]
def create_surfdist_workflow(subjects_dir,
subject_list,
sources,
target,
hemi,
atlas,
labs,
name):
sd = Workflow(name=name)
# Run a separate tree for each template, hemisphere and source structure
infosource = Node(IdentityInterface(fields=['template','hemi','source']), name="infosource")
infosource.iterables = [('template', target),('hemi', hemi),('source',sources)]
# Get template files
fsst = Node(FreeSurferSource(),name='FS_Source_template')
fsst.inputs.subjects_dir = subjects_dir
sd.connect(infosource,'template',fsst,'subject_id')
sd.connect(infosource,'hemi',fsst,'hemi')
# Generate folder name for output
genfoldname = Node(Function(input_names=['hemi','source','target'],
output_names=['cname'], function=genfname),
name='genfoldname')
sd.connect(infosource,'hemi',genfoldname,'hemi')
sd.connect(infosource,'source',genfoldname,'source')
sd.connect(infosource,'template',genfoldname,'target')
# Get subjects
fss = Node(FreeSurferSource(),name='FS_Source')
fss.iterables = ('subject_id', subject_list)
fss.inputs.subjects_dir = subjects_dir
fss.inputs.subject_id = subject_list
sd.connect(infosource,'hemi',fss,'hemi')
# Trim labels
tlab = Node(Function(input_names=['itemz','phrase'],
output_names=['item'], function=trimming),
name='tlab')
tlab.inputs.phrase = labs
sd.connect(fss,'label',tlab,'itemz')
# Trim annotations
tannot = Node(Function(input_names=['itemz','phrase'],
output_names=['item'], function=trimming),
name='tannot')
tannot.inputs.phrase = atlas
sd.connect(fss,'annot',tannot,'itemz')
# Calculate distances for each hemi
sdist = Node(Function(input_names=['surface','labels','annot','reg','origin','target'],
output_names=['distances'], function=calc_surfdist),
name='sdist')
sd.connect(infosource,'source',sdist,'origin')
sd.connect(fss,'pial',sdist,'surface')
sd.connect(tlab,'item',sdist,'labels')
sd.connect(tannot,'item',sdist,'annot')
sd.connect(fss,'sphere_reg',sdist,'reg')
sd.connect(fsst,'sphere_reg',sdist,'target')
# Gather data for each hemi from all subjects
bucket = JoinNode(Function(input_names=['files','hemi','source','target'],output_names=['group_dist'],
function=stack_files), joinsource = fss, joinfield = 'files', name='bucket')
sd.connect(infosource,'source',bucket,'source')
sd.connect(infosource,'template',bucket,'target')
sd.connect(infosource,'hemi',bucket,'hemi')
sd.connect(sdist,'distances',bucket,'files')
# Sink the data
datasink = Node(DataSink(), name='sinker')
datasink.inputs.parameterization = False
datasink.inputs.base_directory = os.path.abspath(args.sink)
sd.connect(genfoldname,'cname',datasink,'container')
sd.connect(bucket,'group_dist',datasink,'group_distances')
return sd
示例9: Node
# 需要导入模块: from nipype import Node [as 别名]
# 或者: from nipype.Node import iterables [as 别名]
sampling_strategy=['Regular', 'Regular', 'None'],
shrink_factors=[[8, 4, 2, 1]] * 3,
smoothing_sigmas=[[3, 2, 1, 0]] * 3,
transform_parameters=[(0.1,), (0.1,),
(0.1, 3.0, 0.0)],
use_histogram_matching=True,
write_composite_transform=True),
name='antsreg')
###
# Input & Output Stream
# Infosource - a function free node to iterate over the list of subject names
infosource = Node(IdentityInterface(fields=['subject_id']),
name="infosource")
infosource.iterables = [('subject_id', subject_list)]
# SelectFiles - to grab the data (alternative to DataGrabber)
anat_file = opj('sub-{subject_id}', 'ses-test', 'anat', 'sub-{subject_id}_ses-test_T1w.nii.gz')
templates = {'anat': anat_file}
selectfiles = Node(SelectFiles(templates,
base_directory='/data/ds000114'),
name="selectfiles")
# Datasink - creates output folder for important outputs
datasink = Node(DataSink(base_directory=experiment_dir,
container=output_dir),
name="datasink")
# Use the following DataSink output substitutions
示例10: Workflow
# 需要导入模块: from nipype import Node [as 别名]
# 或者: from nipype.Node import iterables [as 别名]
import numpy as np
from nipype import Function
from nipype import Node
from nipype import Workflow
from nipype import IdentityInterface
ds="/storage/gablab001/data/genus/GIT/genus/fs_cog/pred_diag/data_sets"
data_sets = [os.path.join(ds, x) for x in os.listdir(ds) if ".csv" in x]
response_var = os.path.join(ds, "response.txt")
wf = Workflow(name="classify_disease")
wf.base_dir = "/om/scratch/Sat/ysa"
Iternode = Node(IdentityInterface(fields=['data', 'classifier']), name="Iternode")
Iternode.iterables = [
('data', data_sets),
('classifier', ['et', 'lg'])
]
def run(data, classifier, response):
import numpy as np
import pandas as pd
from custom import Mods
from custom import utils
y = np.genfromtxt(response)
X = pd.read_csv(data)
data_mod = data.split('/')[-1].replace('.csv', '')
if classifier == 'et':
od = '/storage/gablab001/data/genus/GIT/genus/fs_cog/pred_diag/extra_trees/results/'
on = classifier + '_{}.pkl'.format(data_mod)