当前位置: 首页>>代码示例>>Python>>正文


Python Node.iterables方法代码示例

本文整理汇总了Python中nipype.pipeline.engine.Node.iterables方法的典型用法代码示例。如果您正苦于以下问题:Python Node.iterables方法的具体用法?Python Node.iterables怎么用?Python Node.iterables使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在nipype.pipeline.engine.Node的用法示例。


在下文中一共展示了Node.iterables方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: create

# 需要导入模块: from nipype.pipeline.engine import Node [as 别名]
# 或者: from nipype.pipeline.engine.Node import iterables [as 别名]
    def create(self):  # , **kwargs):
        """ Create the nodes and connections for the workflow """
        # Preamble
        csvReader = CSVReader()
        csvReader.inputs.in_file = self.csv_file.default_value
        csvReader.inputs.header = self.hasHeader.default_value
        csvOut = csvReader.run()

        print(("=" * 80))
        print((csvOut.outputs.__dict__))
        print(("=" * 80))

        iters = OrderedDict()
        label = list(csvOut.outputs.__dict__.keys())[0]
        result = eval("csvOut.outputs.{0}".format(label))
        iters['tests'], iters['trains'] = subsample_crossValidationSet(result, self.sample_size.default_value)
        # Main event
        out_fields = ['T1', 'T2', 'Label', 'trainindex', 'testindex']
        inputsND = Node(interface=IdentityInterface(fields=out_fields),
                        run_without_submitting=True, name='inputs')
        inputsND.iterables = [('trainindex', iters['trains']),
                              ('testindex', iters['tests'])]
        if not self.hasHeader.default_value:
            inputsND.inputs.T1 = csvOut.outputs.column_0
            inputsND.inputs.Label = csvOut.outputs.column_1
            inputsND.inputs.T2 = csvOut.outputs.column_2
        else:
            inputsND.inputs.T1 = csvOut.outputs.__dict__['t1']
            inputsND.inputs.Label = csvOut.outputs.__dict__['label']
            inputsND.inputs.T2 = csvOut.outputs.__dict__['t2']
            pass  # TODO
        metaflow = Workflow(name='metaflow')
        metaflow.config['execution'] = {
            'plugin': 'Linear',
            'stop_on_first_crash': 'false',
            'stop_on_first_rerun': 'false',
        # This stops at first attempt to rerun, before running, and before deleting previous results.
            'hash_method': 'timestamp',
            'single_thread_matlab': 'true',  # Multi-core 2011a  multi-core for matrix multiplication.
            'remove_unnecessary_outputs': 'true',
            'use_relative_paths': 'false',  # relative paths should be on, require hash update when changed.
            'remove_node_directories': 'false',  # Experimental
            'local_hash_check': 'false'
        }

        metaflow.add_nodes([inputsND])
        """import pdb; pdb.set_trace()"""
        fusionflow = FusionLabelWorkflow()
        self.connect(
            [(metaflow, fusionflow, [('inputs.trainindex', 'trainT1s.index'), ('inputs.T1', 'trainT1s.inlist')]),
             (metaflow, fusionflow,
              [('inputs.trainindex', 'trainLabels.index'), ('inputs.Label', 'trainLabels.inlist')]),
             (metaflow, fusionflow, [('inputs.testindex', 'testT1s.index'), ('inputs.T1', 'testT1s.inlist')])
             ])
开发者ID:NIRALUser,项目名称:BRAINSTools,代码行数:56,代码来源:crossValidate.py

示例2: create_conversion

# 需要导入模块: from nipype.pipeline.engine import Node [as 别名]
# 或者: from nipype.pipeline.engine.Node import iterables [as 别名]
def create_conversion(
    name, subject, scans, working_dir, out_dir, folder, xnat_server, xnat_user, xnat_pass, project_id, exp_id
):

    convert = Workflow(name=name)
    convert.base_dir = working_dir
    convert.config["execution"]["crashdump_dir"] = convert.base_dir + "/crash_files"

    # infosource to iterate over scans
    scan_infosource = Node(util.IdentityInterface(fields=["scan_key", "scan_val"]), name="scan_infosource")
    scan_infosource.iterables = [("scan_key", scans.keys()), ("scan_val", scans.values())]
    scan_infosource.synchronize = True

    # xnat source
    xnatsource = Node(
        nio.XNATSource(
            infields=["project_id", "subject_id", "exp_id", "scan_id"],
            outfields=["dicom"],
            server=xnat_server,
            user=xnat_user,
            pwd=xnat_pass,
            cache_dir=working_dir,
        ),
        name="xnatsource",
    )

    xnatsource.inputs.query_template = (
        "/projects/%s/subjects/%s/experiments/%s/scans/%d/resources/DICOM/files"
    )  # files')
    xnatsource.inputs.query_template_args["dicom"] = [["project_id", "subject_id", "exp_id", "scan_id"]]
    xnatsource.inputs.project_id = project_id
    xnatsource.inputs.subject_id = subject
    xnatsource.inputs.exp_id = exp_id
    convert.connect([(scan_infosource, xnatsource, [("scan_val", "scan_id")])])

    # workflow to convert dicoms
    dcmconvert = create_dcmconvert_pipeline()
    convert.connect(
        [
            (scan_infosource, dcmconvert, [("scan_key", "inputnode.filename")]),
            (xnatsource, dcmconvert, [("dicom", "inputnode.dicoms")]),
        ]
    )

    # xnat sink
    sink = Node(nio.DataSink(base_directory=out_dir, parameterization=False), name="sink")

    convert.connect([(dcmconvert, sink, [("outputnode.nifti", folder)])])

    convert.run()
开发者ID:JanisReinelt,项目名称:pipelines,代码行数:52,代码来源:convert.py

示例3: mean_con_mat_wf

# 需要导入模块: from nipype.pipeline.engine import Node [as 别名]
# 或者: from nipype.pipeline.engine.Node import iterables [as 别名]
def mean_con_mat_wf(subjects_list,
                    preprocessed_data_dir,
                    working_dir,
                    ds_dir,
                    parcellations_list,
                    extraction_methods_list,
                    bp_freq_list,
                    use_n_procs,
                    plugin_name):

    import os
    from nipype import config
    from nipype.pipeline.engine import Node, Workflow, MapNode, JoinNode
    import nipype.interfaces.utility as util
    import nipype.interfaces.io as nio
    from nipype.interfaces.freesurfer.utils import ImageInfo
    from utils import aggregate_data


    #####################################
    # GENERAL SETTINGS
    #####################################
    wf = Workflow(name='mean_con_mats')
    wf.base_dir = os.path.join(working_dir)

    nipype_cfg = dict(logging=dict(workflow_level='DEBUG'), execution={'stop_on_first_crash': True,
                                                                       'remove_unnecessary_outputs': True,
                                                                       'job_finished_timeout': 120})
    config.update_config(nipype_cfg)
    wf.config['execution']['crashdump_dir'] = os.path.join(working_dir, 'crash')

    ds = Node(nio.DataSink(), name='ds')
    ds.inputs.base_directory = os.path.join(ds_dir, 'group_conmats_test')

    ds.inputs.regexp_substitutions = [
        # ('subject_id_', ''),
        ('_parcellation_', ''),
        ('_bp_freqs_', 'bp_'),
        ('_extraction_method_', ''),
        ('_subject_id_[A0-9]*/', '')
    ]



    #####################################
    # SET ITERATORS
    #####################################
    # SUBJECTS ITERATOR
    subjects_infosource = Node(util.IdentityInterface(fields=['subject_id']), name='subjects_infosource')
    subjects_infosource.iterables = ('subject_id', subjects_list)

    # PARCELLATION ITERATOR
    parcellation_infosource = Node(util.IdentityInterface(fields=['parcellation']), name='parcellation_infosource')
    parcellation_infosource.iterables = ('parcellation', parcellations_list)

    # BP FILTER ITERATOR
    bp_filter_infosource = Node(util.IdentityInterface(fields=['bp_freqs']), name='bp_filter_infosource')
    bp_filter_infosource.iterables = ('bp_freqs', bp_freq_list)

    # EXTRACTION METHOD ITERATOR
    extraction_method_infosource = Node(util.IdentityInterface(fields=['extraction_method']),
                                        name='extraction_method_infosource')
    extraction_method_infosource.iterables = ('extraction_method', extraction_methods_list)



    def create_file_list_fct(subjects_list, base_path, parcellation, bp_freqs, extraction_method):
        import os

        file_list = []
        for s in subjects_list:
            file_list.append(os.path.join(base_path, s, 'metrics/con_mat/matrix',
                                                       'bp_%s.%s'%(bp_freqs),
                            parcellation, extraction_method, 'matrix.pkl'))
        return file_list

    create_file_list = Node(util.Function(input_names=['subjects_list', 'base_path', 'parcellation', 'bp_freqs', 'extraction_method'],
                                       output_names=['file_list'],
                                       function=create_file_list_fct),
                         name='create_file_list')
    create_file_list.inputs.subjects_list = subjects_list
    create_file_list.inputs.base_path = preprocessed_data_dir
    wf.connect(parcellation_infosource, 'parcellation', create_file_list, 'parcellation')
    wf.connect(bp_filter_infosource, 'bp_freqs', create_file_list, 'bp_freqs')
    wf.connect(extraction_method_infosource, 'extraction_method', create_file_list, 'extraction_method')




    aggregate = Node(util.Function(input_names=['file_list'],
                                       output_names=['merged_file'],
                                       function=aggregate_data),
                         name='aggregate')
    wf.connect(create_file_list, 'file_list', aggregate, 'file_list')



    def plot_matrix_fct(in_file):
        import pickle, os
        import pylab as plt
#.........这里部分代码省略.........
开发者ID:fliem,项目名称:LeiCA,代码行数:103,代码来源:mean_con_mat_wf.py

示例4: Workflow

# 需要导入模块: from nipype.pipeline.engine import Node [as 别名]
# 或者: from nipype.pipeline.engine.Node import iterables [as 别名]
Created on Tue Aug 25 11:39:05 2015

@author: craigmoodie
"""

from nipype.pipeline.engine import Workflow, Node, MapNode
from variables import data_dir, work_dir, subject_list, plugin, plugin_args


surface_workflow = Workflow(name="qc_workflow")

surface_workflow.base_dir = work_dir


from nipype import SelectFiles
templates = dict(T1="*_{subject_id}_*/T1w_MPR_BIC_v1/*00001.nii*")
file_list = Node(SelectFiles(templates), name = "EPI_and_T1_File_Selection")
file_list.inputs.base_directory = data_dir
file_list.iterables = [("subject_id", subject_list)]


from nipype.interfaces.freesurfer import ReconAll
reconall = Node(ReconAll(), name = "Recon_All")
reconall.inputs.subject_id = "subject_id"
reconall.inputs.directive = 'all'
reconall.inputs.subjects_dir = data_dir
#reconall.inputs.T1_files = "T1"
#reconall.run()

surface_workflow.connect(file_list,"T1", reconall, "T1_files")
surface_workflow.run(plugin=plugin, plugin_args=plugin_args)
开发者ID:poldracklab,项目名称:qc_pipeline,代码行数:33,代码来源:recon_all.py

示例5: Workflow

# 需要导入模块: from nipype.pipeline.engine import Node [as 别名]
# 或者: from nipype.pipeline.engine.Node import iterables [as 别名]
working_dir = '/scr/ilz3/myelinconnect/working_dir/' 
data_dir= '/scr/ilz3/myelinconnect/'
out_dir = '/scr/ilz3/myelinconnect/final_struct_space/rest1_1_trans'

# set fsl output type to nii.gz
fsl.FSLCommand.set_default_output_type('NIFTI_GZ')

# main workflow
smooth = Workflow(name='smooth')
smooth.base_dir = working_dir
smooth.config['execution']['crashdump_dir'] = smooth.base_dir + "/crash_files"

# iterate over subjects
subject_infosource = Node(util.IdentityInterface(fields=['subject']), 
                  name='subject_infosource')
subject_infosource.iterables=[('subject', subjects_db)]

# iterate over sessions
session_infosource = Node(util.IdentityInterface(fields=['session']), 
                  name='session_infosource')
session_infosource.iterables=[('session', sessions)]

# select files
templates={'rest': 'final_struct_space/rest1_1_trans/{subject}_{session}_denoised_trans.nii.gz'
           }    
selectfiles = Node(nio.SelectFiles(templates, base_directory=data_dir),
                   name="selectfiles")

smooth.connect([(subject_infosource, selectfiles, [('subject', 'subject')]),
                 (session_infosource, selectfiles, [('session', 'session')])
                 ])
开发者ID:fliem,项目名称:myelinconnect,代码行数:33,代码来源:smooth_func.py

示例6: Node

# 需要导入模块: from nipype.pipeline.engine import Node [as 别名]
# 或者: from nipype.pipeline.engine.Node import iterables [as 别名]
preproc.connect(remove_noise, "out_file", bandpass_filter, "in_file")
preproc.connect(bandpass_filter, "out_file", outputnode, "filtered_file")


###################################################################################################################################
# in and out
preproc.base_dir = "/scr/kansas1/huntenburg/"
preproc.config["execution"] = {"remove_unnecessary_outputs": "False"}
out_dir = "/scr/"
data_dir = "/scr/"
subjects = ["LEMON006"]  # ,'LEMON001','LEMON087','LEMON030','LEMON044','LEMON071']


# infosource to iterate over subjects
subject_infosource = Node(util.IdentityInterface(fields=["subject_id"]), name="subject_infosource")
subject_infosource.iterables = ("subject_id", subjects)


# infosource to iterate over coregistration methods
cor_method_infosource = Node(util.IdentityInterface(fields=["cor_method"]), name="cor_method_infosource")
cor_method_infosource.iterables = ("cor_method", ["lin_ts"])  # , 'lin_ts', 'nonlin_ts', 'fmap_ts', 'topup_ts'])


# select files
templates = {
    "timeseries": "kansas1/huntenburg/*_timeseries/_subject_id_{subject_id}/*apply*/{cor_method}.nii.gz",
    #'par_file':'jessica2/Schaare/LEMON/preprocessed/{subject_id}/motion_correction/rest_moco.nii.gz.par'
}

selectfiles = Node(nio.SelectFiles(templates, base_directory=data_dir), name="selectfiles")
开发者ID:juhuntenburg,项目名称:nonlinear_coreg,代码行数:32,代码来源:preproc_conn.py

示例7: learning_predict_data_2samp_wf

# 需要导入模块: from nipype.pipeline.engine import Node [as 别名]
# 或者: from nipype.pipeline.engine.Node import iterables [as 别名]
def learning_predict_data_2samp_wf(working_dir,
                                   ds_dir,
                                   in_data_name_list,
                                   subjects_selection_crit_dict,
                                   subjects_selection_crit_names_list,
                                   aggregated_subjects_dir,
                                   target_list,
                                   use_n_procs,
                                   plugin_name,
                                   confound_regression=[False, True],
                                   run_cv=False,
                                   n_jobs_cv=1,
                                   run_tuning=False,
                                   run_2sample_training=False,
                                   aggregated_subjects_dir_nki=None,
                                   subjects_selection_crit_dict_nki=None,
                                   subjects_selection_crit_name_nki=None,
                                   reverse_split=False,
                                   random_state_nki=666,
                                   run_learning_curve=False,
                                   life_test_size=0.5):
    import os
    from nipype import config
    from nipype.pipeline.engine import Node, Workflow
    import nipype.interfaces.utility as util
    import nipype.interfaces.io as nio
    from itertools import chain
    from learning_utils import aggregate_multimodal_metrics_fct, run_prediction_split_fct, \
        backproject_and_split_weights_fct, select_subjects_fct, select_multimodal_X_fct, learning_curve_plot
    import pandas as pd



    ###############################################################################################################
    # GENERAL SETTINGS

    wf = Workflow(name='learning_predict_data_2samp_wf')
    wf.base_dir = os.path.join(working_dir)

    nipype_cfg = dict(logging=dict(workflow_level='DEBUG'),
                      execution={'stop_on_first_crash': False,
                                 'remove_unnecessary_outputs': False,
                                 'job_finished_timeout': 120})
    config.update_config(nipype_cfg)
    wf.config['execution']['crashdump_dir'] = os.path.join(working_dir, 'crash')

    ds = Node(nio.DataSink(), name='ds')
    ds.inputs.base_directory = os.path.join(ds_dir, 'group_learning_prepare_data')

    ds.inputs.regexp_substitutions = [
        # ('subject_id_', ''),
        ('_parcellation_', ''),
        ('_bp_freqs_', 'bp_'),
        ('_extraction_method_', ''),
        ('_subject_id_[A0-9]*/', '')
    ]
    ds_pdf = Node(nio.DataSink(), name='ds_pdf')
    ds_pdf.inputs.base_directory = os.path.join(ds_dir, 'pdfs')
    ds_pdf.inputs.parameterization = False



    ###############################################################################################################
    # ensure in_data_name_list is list of lists
    in_data_name_list = [i if type(i) == list else [i] for i in in_data_name_list]
    in_data_name_list_unique = list(set(chain.from_iterable(in_data_name_list)))



    ###############################################################################################################
    # SET ITERATORS

    in_data_name_infosource = Node(util.IdentityInterface(fields=['in_data_name']), name='in_data_name_infosource')
    in_data_name_infosource.iterables = ('in_data_name', in_data_name_list_unique)

    multimodal_in_data_name_infosource = Node(util.IdentityInterface(fields=['multimodal_in_data_name']),
                                              name='multimodal_in_data_name_infosource')
    multimodal_in_data_name_infosource.iterables = ('multimodal_in_data_name', in_data_name_list)

    subject_selection_infosource = Node(util.IdentityInterface(fields=['selection_criterium']),
                                        name='subject_selection_infosource')
    subject_selection_infosource.iterables = ('selection_criterium', subjects_selection_crit_names_list)

    target_infosource = Node(util.IdentityInterface(fields=['target_name']), name='target_infosource')
    target_infosource.iterables = ('target_name', target_list)



    ###############################################################################################################
    # COMPILE LIFE DATA
    ###############################################################################################################

    ###############################################################################################################
    # GET INFO AND SELECT FILES
    df_all_subjects_pickle_file = os.path.join(aggregated_subjects_dir, 'df_all_subjects_pickle_file/df_all.pkl')
    df = pd.read_pickle(df_all_subjects_pickle_file)

    # build lookup dict for unimodal data
    X_file_template = 'X_file/_in_data_name_{in_data_name}/vectorized_aggregated_data.npy'
    info_file_template = 'unimodal_backprojection_info_file/_in_data_name_{in_data_name}/unimodal_backprojection_info.pkl'
#.........这里部分代码省略.........
开发者ID:fliem,项目名称:LeiCA_LIFE,代码行数:103,代码来源:learning_predict_data_wf.py

示例8: calc_centrality_metrics

# 需要导入模块: from nipype.pipeline.engine import Node [as 别名]
# 或者: from nipype.pipeline.engine.Node import iterables [as 别名]
def calc_centrality_metrics(cfg):
    import os
    from nipype import config
    from nipype.pipeline.engine import Node, Workflow
    import nipype.interfaces.utility as util
    import nipype.interfaces.io as nio
    import nipype.interfaces.fsl as fsl
    import nipype.interfaces.freesurfer as freesurfer

    import CPAC.network_centrality.resting_state_centrality as cpac_centrality
    import CPAC.network_centrality.z_score as cpac_centrality_z_score


    # INPUT PARAMETERS
    dicom_dir = cfg['dicom_dir']
    preprocessed_data_dir = cfg['preprocessed_data_dir']

    working_dir = cfg['working_dir']
    freesurfer_dir = cfg['freesurfer_dir']
    template_dir = cfg['template_dir']
    script_dir = cfg['script_dir']
    ds_dir = cfg['ds_dir']

    subjects_list = cfg['subjects_list']
    TR_list = cfg['TR_list']

    use_n_procs = cfg['use_n_procs']
    plugin_name = cfg['plugin_name']



    #####################################
    # GENERAL SETTINGS
    #####################################
    fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
    freesurfer.FSCommand.set_default_subjects_dir(freesurfer_dir)

    wf = Workflow(name='LeiCA_metrics')
    wf.base_dir = os.path.join(working_dir)

    nipype_cfg = dict(logging=dict(workflow_level='DEBUG'), execution={'stop_on_first_crash': False,
                                                                       'remove_unnecessary_outputs': True,
                                                                       'job_finished_timeout': 120})
    config.update_config(nipype_cfg)
    wf.config['execution']['crashdump_dir'] = os.path.join(working_dir, 'crash')

    ds = Node(nio.DataSink(), name='ds')
    ds.inputs.substitutions = [('_TR_id_', 'TR_')]
    ds.inputs.regexp_substitutions = [('_subject_id_[A0-9]*/', ''), (
    '_z_score[0-9]*/', '')]  # , #('dc/_TR_id_[0-9]*/', ''), ('evc/_TR_id_[0-9]*/','')]

    #####################################
    # SET ITERATORS
    #####################################
    # GET SCAN TR_ID ITERATOR
    scan_infosource = Node(util.IdentityInterface(fields=['TR_id']), name='scan_infosource')
    scan_infosource.iterables = ('TR_id', TR_list)

    subjects_infosource = Node(util.IdentityInterface(fields=['subject_id']), name='subjects_infosource')
    subjects_infosource.iterables = ('subject_id', subjects_list)

    def add_subject_id_to_ds_dir_fct(subject_id, ds_path):
        import os
        out_path = os.path.join(ds_path, subject_id)
        return out_path

    add_subject_id_to_ds_dir = Node(util.Function(input_names=['subject_id', 'ds_path'],
                                                  output_names=['out_path'],
                                                  function=add_subject_id_to_ds_dir_fct),
                                    name='add_subject_id_to_ds_dir')
    wf.connect(subjects_infosource, 'subject_id', add_subject_id_to_ds_dir, 'subject_id')
    add_subject_id_to_ds_dir.inputs.ds_path = ds_dir

    wf.connect(add_subject_id_to_ds_dir, 'out_path', ds, 'base_directory')


    # get atlas data
    templates_atlases = {'GM_mask_MNI_2mm': 'SPM_GM/SPM_GM_mask_2mm.nii.gz',
                         'GM_mask_MNI_3mm': 'SPM_GM/SPM_GM_mask_3mm.nii.gz',
                         'FSL_MNI_3mm_template': 'MNI152_T1_3mm_brain.nii.gz',
                         'vmhc_symm_brain': 'cpac_image_resources/symmetric/MNI152_T1_2mm_brain_symmetric.nii.gz',
                         'vmhc_symm_brain_3mm': 'cpac_image_resources/symmetric/MNI152_T1_3mm_brain_symmetric.nii.gz',
                         'vmhc_symm_skull': 'cpac_image_resources/symmetric/MNI152_T1_2mm_symmetric.nii.gz',
                         'vmhc_symm_brain_mask_dil': 'cpac_image_resources/symmetric/MNI152_T1_2mm_brain_mask_symmetric_dil.nii.gz',
                         'vmhc_config_file_2mm': 'cpac_image_resources/symmetric/T1_2_MNI152_2mm_symmetric.cnf'
                         }

    selectfiles_anat_templates = Node(nio.SelectFiles(templates_atlases,
                                                      base_directory=template_dir),
                                      name="selectfiles_anat_templates")


    # GET SUBJECT SPECIFIC FUNCTIONAL AND STRUCTURAL DATA
    selectfiles_templates = {
        'epi_2_MNI_warp': '{subject_id}/rsfMRI_preprocessing/registration/epi_2_MNI_warp/TR_{TR_id}/*.nii.gz',
        'epi_mask': '{subject_id}/rsfMRI_preprocessing/masks/brain_mask_epiSpace/TR_{TR_id}/*.nii.gz',
        'preproc_epi_full_spectrum': '{subject_id}/rsfMRI_preprocessing/epis/01_denoised/TR_{TR_id}/*.nii.gz',
        'preproc_epi_bp': '{subject_id}/rsfMRI_preprocessing/epis/02_denoised_BP/TR_{TR_id}/*.nii.gz',
        'preproc_epi_bp_tNorm': '{subject_id}/rsfMRI_preprocessing/epis/03_denoised_BP_tNorm/TR_{TR_id}/*.nii.gz',
        'epi_2_struct_mat': '{subject_id}/rsfMRI_preprocessing/registration/epi_2_struct_mat/TR_{TR_id}/*.mat',
#.........这里部分代码省略.........
开发者ID:Yaqiongxiao,项目名称:LeiCA,代码行数:103,代码来源:calc_metrics.py

示例9: downsampel_surfs

# 需要导入模块: from nipype.pipeline.engine import Node [as 别名]
# 或者: from nipype.pipeline.engine.Node import iterables [as 别名]
def downsampel_surfs(subject_id,
                     working_dir,
                     freesurfer_dir,
                     ds_dir,
                     plugin_name,
                     use_n_procs):
    '''
    Workflow resamples e.g. native thickness maps to fsaverage5 space
    '''

    #####################################
    # GENERAL SETTINGS
    #####################################
    fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
    fs.FSCommand.set_default_subjects_dir(freesurfer_dir)

    wf = Workflow(name='freesurfer_downsample')
    wf.base_dir = os.path.join(working_dir)

    nipype_cfg = dict(logging=dict(workflow_level='DEBUG'), execution={'stop_on_first_crash': True,
                                                                       'remove_unnecessary_outputs': True,
                                                                       'job_finished_timeout': 15})
    config.update_config(nipype_cfg)
    wf.config['execution']['crashdump_dir'] = os.path.join(working_dir, 'crash')

    ds = Node(nio.DataSink(base_directory=ds_dir), name='ds')
    ds.inputs.parameterization = False



    #####################
    # ITERATORS
    #####################
    # PARCELLATION ITERATOR
    infosource = Node(util.IdentityInterface(fields=['hemi', 'surf_measure', 'fwhm', 'target']), name='infosource')
    infosource.iterables = [('hemi', ['lh', 'rh']),
                            ('surf_measure', ['thickness', 'area']),
                            ('fwhm', [0, 5, 10, 20]),
                            ('target', ['fsaverage3', 'fsaverage4', 'fsaverage5']),
                            ]

    downsample = Node(fs.model.MRISPreproc(), name='downsample')
    downsample.inputs.subjects = [subject_id]
    wf.connect(infosource, 'target', downsample, 'target')
    wf.connect(infosource, 'hemi', downsample, 'hemi')
    wf.connect(infosource, 'surf_measure', downsample, 'surf_measure')
    wf.connect(infosource, 'fwhm', downsample, 'fwhm_source')

    rename = Node(util.Rename(format_string='%(hemi)s.%(surf_measure)s.%(target)s.%(fwhm)smm'), name='rename')
    rename.inputs.keep_ext = True
    wf.connect(infosource, 'target', rename, 'target')
    wf.connect(infosource, 'hemi', rename, 'hemi')
    wf.connect(infosource, 'surf_measure', rename, 'surf_measure')
    wf.connect(infosource, 'fwhm', rename, 'fwhm')
    wf.connect(downsample, 'out_file', rename, 'in_file')

    wf.connect(rename, 'out_file', ds, '[email protected]')





    #
    wf.write_graph(dotfilename=wf.name, graph2use='colored', format='pdf')  # 'hierarchical')
    wf.write_graph(dotfilename=wf.name, graph2use='orig', format='pdf')
    wf.write_graph(dotfilename=wf.name, graph2use='flat', format='pdf')

    if plugin_name == 'CondorDAGMan':
        wf.run(plugin=plugin_name)
    if plugin_name == 'MultiProc':
        wf.run(plugin=plugin_name, plugin_args={'n_procs': use_n_procs})
开发者ID:fliem,项目名称:LeiCA_LIFE,代码行数:73,代码来源:downsample_surfs.py

示例10: preprocessing_pipeline

# 需要导入模块: from nipype.pipeline.engine import Node [as 别名]
# 或者: from nipype.pipeline.engine.Node import iterables [as 别名]
def preprocessing_pipeline(cfg):
    import os

    from nipype import config
    from nipype.pipeline.engine import Node, Workflow, JoinNode
    import nipype.interfaces.utility as util
    import nipype.interfaces.io as nio
    import nipype.interfaces.fsl as fsl
    import nipype.interfaces.freesurfer as freesurfer

    # LeiCA modules
    from utils import zip_and_save_running_scripts
    from preprocessing.rsfMRI_preprocessing import create_rsfMRI_preproc_pipeline
    from preprocessing.converter import create_converter_structural_pipeline, create_converter_functional_pipeline, \
        create_converter_diffusion_pipeline
    from sca import create_sca_pipeline

    # INPUT PARAMETERS
    dicom_dir = cfg['dicom_dir']
    working_dir = cfg['working_dir']
    freesurfer_dir = cfg['freesurfer_dir']
    template_dir = cfg['template_dir']
    script_dir = cfg['script_dir']
    ds_dir = cfg['ds_dir']

    subject_id = cfg['subject_id']
    TR_list = cfg['TR_list']

    vols_to_drop = cfg['vols_to_drop']
    rois_list = cfg['rois_list']
    lp_cutoff_freq = cfg['lp_cutoff_freq']
    hp_cutoff_freq = cfg['hp_cutoff_freq']
    use_fs_brainmask = cfg['use_fs_brainmask']

    use_n_procs = cfg['use_n_procs']
    plugin_name = cfg['plugin_name']


    #####################################
    # GENERAL SETTINGS
    #####################################
    fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
    freesurfer.FSCommand.set_default_subjects_dir(freesurfer_dir)

    wf = Workflow(name='LeiCA_resting')
    wf.base_dir = os.path.join(working_dir)

    nipype_cfg = dict(logging=dict(workflow_level = 'DEBUG'), execution={'stop_on_first_crash': True,
                                                                  'remove_unnecessary_outputs': True,
                                                                  'job_finished_timeout': 120})
    config.update_config(nipype_cfg)
    wf.config['execution']['crashdump_dir'] = os.path.join(working_dir, 'crash')

    ds = Node(nio.DataSink(base_directory=ds_dir), name='ds')


    #####################################
    # SET ITERATORS
    #####################################
    # GET SCAN TR_ID ITERATOR
    scan_infosource = Node(util.IdentityInterface(fields=['TR_id']), name='scan_infosource')
    scan_infosource.iterables = ('TR_id', TR_list)




    #####################################
    # FETCH MRI DATA
    #####################################
    # GET LATERAL VENTRICLE MASK
    templates_atlases = {'lat_ventricle_mask_MNI': 'cpac_image_resources/HarvardOxford-lateral-ventricles-thr25-2mm.nii.gz'}
    selectfiles_templates = Node(nio.SelectFiles(templates_atlases,
                                                 base_directory=template_dir),
                                 name="selectfiles_templates")

    if not subject_id.startswith('A000'): # releases 1-6 with 01... format subject_id
        # GET FUNCTIONAL DATA
        templates_funct = {'funct_dicom': '{subject_id}/session_1/RfMRI_*_{TR_id}'}

        selectfiles_funct = Node(nio.SelectFiles(templates_funct,
                                                 base_directory=dicom_dir),
                                 name="selectfiles_funct")
        selectfiles_funct.inputs.subject_id = subject_id

        wf.connect(scan_infosource, 'TR_id', selectfiles_funct, 'TR_id')


        # GET STRUCTURAL DATA
        templates_struct = {'t1w_dicom': '{subject_id}/anat',
                            'dMRI_dicom': '{subject_id}/session_1/DTI_mx_137/*.dcm'} # *.dcm for dMRI as Dcm2nii requires this

        selectfiles_struct = Node(nio.SelectFiles(templates_struct,
                                                  base_directory=dicom_dir),
                                  name="selectfiles_struct")
        selectfiles_struct.inputs.subject_id = subject_id



    else: #startin with release 6 new folder structure
        templates_funct = {'funct_dicom': '*/{subject_id}/*_V2/REST_{TR_id}*/*.dcm'}
#.........这里部分代码省略.........
开发者ID:NeuroanatomyAndConnectivity,项目名称:LeiCA,代码行数:103,代码来源:preprocessing_pipeline.py

示例11: create_lsd_resting

# 需要导入模块: from nipype.pipeline.engine import Node [as 别名]
# 或者: from nipype.pipeline.engine.Node import iterables [as 别名]
def create_lsd_resting(subject, working_dir, out_dir, freesurfer_dir, data_dir, 
                    echo_space, te_diff, vol_to_remove, scans, epi_resolution,
                    TR, highpass, lowpass):
    
    # main workflow
    func_preproc = Workflow(name='lsd_resting')
    func_preproc.base_dir = working_dir
    func_preproc.config['execution']['crashdump_dir'] = func_preproc.base_dir + "/crash_files"
    
    
    # set fsl output type to nii.gz
    fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
    
    # infosource to iterate over scans
    scan_infosource = Node(util.IdentityInterface(fields=['scan_id']), 
                      name='scan_infosource')
    scan_infosource.iterables=('scan_id', scans)
    
    # function node to get fieldmap information
    def fmap_info(scan_id):
        if scan_id=='rest1a':
            fmap_id='fmap1'
            pe_dir='y-'
        elif scan_id=='rest1b':
            fmap_id='fmap1'
            pe_dir='y'
        elif scan_id=='rest2a':
            fmap_id='fmap2'
            pe_dir='y-'
        elif scan_id=='rest2b':
            fmap_id='fmap2'
            pe_dir='y'
        return fmap_id, pe_dir
    
    fmap_infosource=Node(util.Function(input_names=['scan_id'],
                                       output_names=['fmap_id', 'pe_dir'],
                                       function=fmap_info),
                          name='fmap_infosource')
            
    # select files
    templates={'func': 'nifti/lsd_resting/{scan_id}.nii.gz',
               'fmap_phase' : 'nifti/lsd_resting/{fmap_id}_phase.nii.gz',
               'fmap_mag' : 'nifti/lsd_resting/{fmap_id}_mag.nii.gz',
               'anat_head' : 'preprocessed/anat/T1.nii.gz',
               'anat_brain' : 'preprocessed/anat/T1_brain.nii.gz',
               'func_mask' : 'preprocessed/anat/func_mask.nii.gz',
               }
    selectfiles = Node(nio.SelectFiles(templates,
                                       base_directory=data_dir),
                       name="selectfiles")
    
    
    # node to strip rois
    remove_vol = Node(util.Function(input_names=['in_file','t_min'],
                                    output_names=["out_file"],
                                    function=strip_rois_func),
                      name='remove_vol')
    remove_vol.inputs.t_min = vol_to_remove
    
    
    # workflow for motion correction
    moco=create_moco_pipeline()
    
    # workflow for fieldmap correction and coregistration
    fmap_coreg=create_fmap_coreg_pipeline()
    fmap_coreg.inputs.inputnode.fs_subjects_dir=freesurfer_dir
    fmap_coreg.inputs.inputnode.fs_subject_id=subject
    fmap_coreg.inputs.inputnode.echo_space=echo_space
    fmap_coreg.inputs.inputnode.te_diff=te_diff
    
    # workflow for applying transformations to timeseries
    transform_ts = create_transform_pipeline()
    transform_ts.inputs.inputnode.resolution=epi_resolution
    
    # workflow to denoise timeseries
    denoise = create_denoise_pipeline()
    denoise.inputs.inputnode.highpass_sigma= 1./(2*TR*highpass)
    denoise.inputs.inputnode.lowpass_sigma= 1./(2*TR*lowpass)
    # https://www.jiscmail.ac.uk/cgi-bin/webadmin?A2=ind1205&L=FSL&P=R57592&1=FSL&9=A&I=-3&J=on&d=No+Match%3BMatch%3BMatches&z=4 
    denoise.inputs.inputnode.tr = TR
    
    #sink to store files of single scans
    sink = Node(nio.DataSink(parameterization=False,
                             base_directory=out_dir,
                             substitutions=[('fmap1_phase_fslprepared', 'fieldmap'),
                                            ('fmap2_phase_fslprepared', 'fieldmap'),
                                            ('fieldmap_fslprepared_fieldmap_unmasked_vsm', 'shiftmap'),
                                            ('plot.rest_coregistered', 'outlier_plot'),
                                            ('filter_motion_comp_norm_compcor_art_dmotion', 'nuissance_matrix'),
                                            ('rest_realigned.nii.gz_abs.rms', 'rest_realigned_abs.rms'),
                                            ('rest_realigned.nii.gz.par','rest_realigned.par'),
                                            ('rest_realigned.nii.gz_rel.rms', 'rest_realigned_rel.rms'),
                                            ('rest_realigned.nii.gz_abs_disp', 'abs_displacement_plot'),
                                            ('rest_realigned.nii.gz_rel_disp', 'rel_displacment_plot'),
                                            ('art.rest_coregistered_outliers', 'outliers'),
                                            ('global_intensity.rest_coregistered', 'global_intensity'),
                                            ('norm.rest_coregistered', 'composite_norm'),
                                            ('stats.rest_coregistered', 'stats'),
                                            ('rest_denoised_bandpassed_norm.nii.gz', 'rest_preprocessed.nii.gz')
                                            ]),
#.........这里部分代码省略.........
开发者ID:NeuroanatomyAndConnectivity,项目名称:pipelines,代码行数:103,代码来源:lsd_resting.py

示例12: create_preproc_func_pipeline

# 需要导入模块: from nipype.pipeline.engine import Node [as 别名]
# 或者: from nipype.pipeline.engine.Node import iterables [as 别名]
def create_preproc_func_pipeline(data_dir=None, subject_id=None, task_list=None):

	###############################
	## Set up Nodes
	###############################

	ds = Node(nio.DataGrabber(infields=['subject_id', 'task_id'], outfields=['func', 'struc']),name='datasource')
	ds.inputs.base_directory = os.path.abspath(data_dir + '/' + subject_id)
	ds.inputs.template = '*'
	ds.inputs.sort_filelist = True
	ds.inputs.template_args = {'func': [['task_id']], 'struc':[]}
	ds.inputs.field_template = {'func': 'Functional/Raw/%s/func.nii','struc': 'Structural/SPGR/spgr.nii'}
	ds.inputs.subject_id = subject_id
	ds.inputs.task_id = task_list
	ds.iterables = ('task_id',task_list)
	# ds.run().outputs #show datafiles

	# #Setup Data Sinker for writing output files
	# datasink = Node(nio.DataSink(), name='sinker')
	# datasink.inputs.base_directory = '/path/to/output'
	# workflow.connect(realigner, 'realignment_parameters', datasink, '[email protected]')
	# datasink.inputs.substitutions = [('_variable', 'variable'),('file_subject_', '')]

	#Get Timing Acquisition for slice timing
	tr = 2
	ta = Node(interface=util.Function(input_names=['tr', 'n_slices'], output_names=['ta'],  function = get_ta), name="ta")
	ta.inputs.tr=tr

	#Slice Timing: sequential ascending 
	slice_timing = Node(interface=spm.SliceTiming(), name="slice_timing")
	slice_timing.inputs.time_repetition = tr
	slice_timing.inputs.ref_slice = 1

	#Realignment - 6 parameters - realign to first image of very first series.
	realign = Node(interface=spm.Realign(), name="realign")
	realign.inputs.register_to_mean = True

	#Plot Realignment
	plot_realign = Node(interface=PlotRealignmentParameters(), name="plot_realign")

	#Artifact Detection
	art = Node(interface=ra.ArtifactDetect(), name="art")
	art.inputs.use_differences      = [True,False]
	art.inputs.use_norm             = True
	art.inputs.norm_threshold       = 1
	art.inputs.zintensity_threshold = 3
	art.inputs.mask_type            = 'file'
	art.inputs.parameter_source     = 'SPM'

	#Coregister - 12 parameters, cost function = 'nmi', fwhm 7, interpolate, don't mask
	#anatomical to functional mean across all available data.
	coregister = Node(interface=spm.Coregister(), name="coregister")
	coregister.inputs.jobtype = 'estimate'

	# Segment structural, gray/white/csf,mni, 
	segment = Node(interface=spm.Segment(), name="segment")
	segment.inputs.save_bias_corrected = True

	#Normalize - structural to MNI - then apply this to the coregistered functionals
	normalize = Node(interface=spm.Normalize(), name = "normalize")
	normalize.inputs.template = os.path.abspath(t1_template_file)

	#Plot normalization Check
	plot_normalization_check = Node(interface=Plot_Coregistration_Montage(), name="plot_normalization_check")
	plot_normalization_check.inputs.canonical_img = canonical_file

	#Create Mask
	compute_mask = Node(interface=ComputeMask(), name="compute_mask")
	#remove lower 5% of histogram of mean image
	compute_mask.inputs.m = .05

	#Smooth
	#implicit masking (.im) = 0, dtype = 0
	smooth = Node(interface=spm.Smooth(), name = "smooth")
	fwhmlist = [8]
	smooth.iterables = ('fwhm',fwhmlist)

	#Create Covariate matrix
	make_covariates = Node(interface=Create_Covariates(), name="make_covariates")

	###############################
	## Create Pipeline
	###############################

	Preprocessed = Workflow(name="Preprocessed")
	Preprocessed.base_dir = os.path.abspath(data_dir + '/' + subject_id + '/Functional')

	Preprocessed.connect([(ds, ta, [(('func', get_n_slices), "n_slices")]),
						(ta, slice_timing, [("ta", "time_acquisition")]),
						(ds, slice_timing, [('func', 'in_files'),
											(('func', get_n_slices), "num_slices"),
											(('func', get_slice_order), "slice_order"),
											]),
						(slice_timing, realign, [('timecorrected_files', 'in_files')]),
						(realign, compute_mask, [('mean_image','mean_volume')]),
						(realign,coregister, [('mean_image', 'target')]),
						(ds,coregister, [('struc', 'source')]),
						(coregister,segment, [('coregistered_source', 'data')]),
						(segment, normalize, [('transformation_mat','parameter_file'),
											('bias_corrected_image', 'source'),]),
#.........这里部分代码省略.........
开发者ID:GordonMatthewson,项目名称:CosanlabToolbox,代码行数:103,代码来源:Nipype_SPM_Preproc.py

示例13: Workflow

# 需要导入模块: from nipype.pipeline.engine import Node [as 别名]
# 或者: from nipype.pipeline.engine.Node import iterables [as 别名]
from variables_AFNI import data_dir, work_dir, subject_list, plugin, plugin_args

import nibabel as nb



Afni_workflow = Workflow(name="Afni_volreg_workflow")

Afni_workflow.base_dir = work_dir


from nipype import SelectFiles
templates = dict(epi="*_{subject_id}_*/rfMRI_REST_{pe_dir}_BIC_v2/*_00001.nii*")
file_list = Node(SelectFiles(templates), name = "EPI_and_T1_File_Selection")
file_list.inputs.base_directory = data_dir
file_list.iterables = [("subject_id", subject_list), ("pe_dir", ["LR", "RL"])]



from nipype.interfaces.afni import Volreg
volreg_motion = MapNode(Volreg(), name="Afni_Motion_Correction", iterfield="in_file")
volreg_motion.inputs.args = '-Fourier -twopass'
volreg_motion.inputs.zpad = 1
#volreg_motion.inputs.basefile = "in_file"
volreg_motion.inputs.outputtype = "NIFTI_GZ"
volreg_motion.inputs.verbose = True
volreg_motion.inputs.out_file = "afni_test"
volreg_motion.inputs.oned_file = "S0799AAW_P126317_6_7_00001"
volreg_motion.inputs.oned_matrix_save = "S0799AAW_P126317_6_7_00001"

开发者ID:poldracklab,项目名称:qc_pipeline,代码行数:31,代码来源:Func_QC_AfniMC.py

示例14: mat_to_nii

# 需要导入模块: from nipype.pipeline.engine import Node [as 别名]
# 或者: from nipype.pipeline.engine.Node import iterables [as 别名]
#     if s.endswith('_roi.mat'):
#         mat_to_nii(s)
#         s = s.replace('_roi.mat', '.nii')

# Nuisance variables
nuisance_masks = ['/data/mridata/SeeleyToolbox/SeeleyFirstLevel/proc/csf_ant_post_bilateral.nii',
                  '/data/mridata/SeeleyToolbox/SeeleyFirstLevel/proc/avg152T1_white_mask.nii']

# TR
TR = 2.0

## CREATE NODES
# For distributing subject paths
infosource = Node(IdentityInterface(fields=['subject_path', 'seed']),
                  name="infosource")
infosource.iterables = [('subject_path', subjdir), ('seed', all_seeds)]

info = dict(func = [['subject_path', '/processedfmri_TRCNnSFmDI/images/swua_filteredf*.nii']],
            motion = [['subject_path', '/processedfmri_TRCNnSFmDI/motion_params_filtered.txt']])

selectfiles = Node(DataGrabber(infields = ['subject_path'],
                              outfields = ['func', 'motion'],
                              base_directory = '/',
                              template = '%s/%s',
                              template_args = info,
                              sort_filelist = True),
                  name = 'selectfiles')

# For merging seed and nuisance mask paths and then distributing them downstream
seed_plus_nuisance = Node(utilMerge(2), name = 'seed_plus_nuisance')
seed_plus_nuisance.inputs.in2 = nuisance_masks
开发者ID:seeleylab,项目名称:first_level,代码行数:33,代码来源:first_level.py

示例15: Workflow

# 需要导入模块: from nipype.pipeline.engine import Node [as 别名]
# 或者: from nipype.pipeline.engine.Node import iterables [as 别名]
working_dir = "/scr/ilz3/myelinconnect/working_dir/mappings_fixhdr/"
data_dir = "/scr/ilz3/myelinconnect/"
out_dir = "/scr/ilz3/myelinconnect/transformations/"

# set fsl output type to nii.gz
fsl.FSLCommand.set_default_output_type("NIFTI_GZ")


# main workflow
mappings = Workflow(name="mappings")
mappings.base_dir = working_dir
mappings.config["execution"]["crashdump_dir"] = mappings.base_dir + "/crash_files"

# iterate over subjects
subject_infosource = Node(util.IdentityInterface(fields=["subject"]), name="subject_infosource")
subject_infosource.iterables = [("subject", subjects_db)]

# iterate over sessions
session_infosource = Node(util.IdentityInterface(fields=["session"]), name="session_infosource")
session_infosource.iterables = [("session", sessions)]

# select files
templates = {
    "median": "resting/preprocessed/{subject}/{session}/realignment/corr_{subject}_{session}_roi_detrended_median_corrected.nii.gz",
    "median_mapping": "mappings/rest/fixed_hdr/corr_{subject}_{session}_*mapping_fixed.nii.gz",
    #'t1_mapping': 'mappings/t1/{subject}*T1_Images_merged_mapping.nii.gz',
    "t1_highres": "struct/t1/{subject}*T1_Images_merged.nii.gz",
    "epi2highres_lin_itk": "resting/preprocessed/{subject}/{session}/registration/epi2highres_lin.txt",
    "epi2highres_warp": "resting/preprocessed/{subject}/{session}/registration/transform0Warp.nii.gz",
    "epi2highres_invwarp": "resting/preprocessed/{subject}/{session}/registration/transform0InverseWarp.nii.gz",
    #'t1_prep_rh' : 'struct/surf_rh/prep_t1/smooth_1.5/{subject}_rh_mid_T1_avg_smoothdata_data.nii.gz',
开发者ID:fliem,项目名称:myelinconnect,代码行数:33,代码来源:mappings.py


注:本文中的nipype.pipeline.engine.Node.iterables方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。