当前位置: 首页>>代码示例>>Python>>正文


Python Workflow.base_dir方法代码示例

本文整理汇总了Python中nipype.pipeline.engine.Workflow.base_dir方法的典型用法代码示例。如果您正苦于以下问题:Python Workflow.base_dir方法的具体用法?Python Workflow.base_dir怎么用?Python Workflow.base_dir使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在nipype.pipeline.engine.Workflow的用法示例。


在下文中一共展示了Workflow.base_dir方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: create_converter_structural_pipeline

# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import base_dir [as 别名]
def create_converter_structural_pipeline(working_dir, ds_dir, name="converter_struct"):
    # initiate workflow
    converter_wf = Workflow(name=name)
    converter_wf.base_dir = os.path.join(working_dir, "LeiCA_resting")

    # set fsl output
    fsl.FSLCommand.set_default_output_type("NIFTI_GZ")

    # inputnode
    inputnode = Node(util.IdentityInterface(fields=["t1w_dicom"]), name="inputnode")

    outputnode = Node(util.IdentityInterface(fields=["t1w"]), name="outputnode")

    niftisink = Node(nio.DataSink(), name="niftisink")
    niftisink.inputs.base_directory = os.path.join(ds_dir, "raw_niftis")

    # convert to nifti
    # todo check if geometry bugs attac. use dcm2nii?
    converter_t1w = Node(DcmStack(embed_meta=True), name="converter_t1w")
    converter_t1w.plugin_args = {"submit_specs": "request_memory = 2000"}
    converter_t1w.inputs.out_format = "t1w"

    converter_wf.connect(inputnode, "t1w_dicom", converter_t1w, "dicom_files")

    # reorient to standard orientation
    reor_2_std = Node(fsl.Reorient2Std(), name="reor_2_std")
    converter_wf.connect(converter_t1w, "out_file", reor_2_std, "in_file")

    converter_wf.connect(reor_2_std, "out_file", outputnode, "t1w")

    # save original niftis
    converter_wf.connect(reor_2_std, "out_file", niftisink, "sMRI")

    converter_wf.write_graph(dotfilename="converter_struct", graph2use="flat", format="pdf")
    return converter_wf
开发者ID:NeuroanatomyAndConnectivity,项目名称:LeiCA,代码行数:37,代码来源:converter.py

示例2: ants_ct_wf

# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import base_dir [as 别名]
def ants_ct_wf(subjects_id,
            preprocessed_data_dir,
            working_dir,
            ds_dir,
            template_dir,
            plugin_name):
    import os
    from nipype import config
    from nipype.pipeline.engine import Node, Workflow, MapNode
    import nipype.interfaces.utility as util
    import nipype.interfaces.io as nio
    from nipype.interfaces.freesurfer.utils import ImageInfo



    #####################################
    # GENERAL SETTINGS
    #####################################
    wf = Workflow(name='ants_ct')
    wf.base_dir = os.path.join(working_dir)

    nipype_cfg = dict(logging=dict(workflow_level='DEBUG'), execution={'stop_on_first_crash': True,
                                                                       'remove_unnecessary_outputs': True,
                                                                       'job_finished_timeout': 120})
    config.update_config(nipype_cfg)
    wf.config['execution']['crashdump_dir'] = os.path.join(working_dir, 'crash')

    ds = Node(nio.DataSink(base_directory=ds_dir), name='ds')



    #####################################
    # GET DATA
    #####################################
    # GET SUBJECT SPECIFIC STRUCTURAL DATA
    in_data_templates = {
        't1w': '{subject_id}/raw_niftis/sMRI/t1w_reoriented.nii.gz',
    }

    in_data = Node(nio.SelectFiles(in_data_templates,
                                       base_directory=preprocessed_data_dir),
                       name="in_data")
    in_data.inputs.subject_id = subjects_id


    # GET NKI ANTs templates
    ants_templates_templates = {
        'brain_template': 'NKI/T_template.nii.gz',
        'brain_probability_mask': 'NKI/T_templateProbabilityMask.nii.gz',
        'segmentation_priors': 'NKI/Priors/*.nii.gz',
        't1_registration_template': 'NKI/T_template_BrainCerebellum.nii.gz'

    }

    ants_templates = Node(nio.SelectFiles(ants_templates_templates,
                                       base_directory=template_dir),
                       name="ants_templates")
开发者ID:fliem,项目名称:LeiCA,代码行数:59,代码来源:ants_ct.py

示例3: create_conversion

# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import base_dir [as 别名]
def create_conversion(
    name, subject, scans, working_dir, out_dir, folder, xnat_server, xnat_user, xnat_pass, project_id, exp_id
):

    convert = Workflow(name=name)
    convert.base_dir = working_dir
    convert.config["execution"]["crashdump_dir"] = convert.base_dir + "/crash_files"

    # infosource to iterate over scans
    scan_infosource = Node(util.IdentityInterface(fields=["scan_key", "scan_val"]), name="scan_infosource")
    scan_infosource.iterables = [("scan_key", scans.keys()), ("scan_val", scans.values())]
    scan_infosource.synchronize = True

    # xnat source
    xnatsource = Node(
        nio.XNATSource(
            infields=["project_id", "subject_id", "exp_id", "scan_id"],
            outfields=["dicom"],
            server=xnat_server,
            user=xnat_user,
            pwd=xnat_pass,
            cache_dir=working_dir,
        ),
        name="xnatsource",
    )

    xnatsource.inputs.query_template = (
        "/projects/%s/subjects/%s/experiments/%s/scans/%d/resources/DICOM/files"
    )  # files')
    xnatsource.inputs.query_template_args["dicom"] = [["project_id", "subject_id", "exp_id", "scan_id"]]
    xnatsource.inputs.project_id = project_id
    xnatsource.inputs.subject_id = subject
    xnatsource.inputs.exp_id = exp_id
    convert.connect([(scan_infosource, xnatsource, [("scan_val", "scan_id")])])

    # workflow to convert dicoms
    dcmconvert = create_dcmconvert_pipeline()
    convert.connect(
        [
            (scan_infosource, dcmconvert, [("scan_key", "inputnode.filename")]),
            (xnatsource, dcmconvert, [("dicom", "inputnode.dicoms")]),
        ]
    )

    # xnat sink
    sink = Node(nio.DataSink(base_directory=out_dir, parameterization=False), name="sink")

    convert.connect([(dcmconvert, sink, [("outputnode.nifti", folder)])])

    convert.run()
开发者ID:JanisReinelt,项目名称:pipelines,代码行数:52,代码来源:convert.py

示例4: create_converter_diffusion_pipeline

# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import base_dir [as 别名]
def create_converter_diffusion_pipeline(working_dir, ds_dir, name="converter_diffusion"):
    # initiate workflow
    converter_wf = Workflow(name=name)
    converter_wf.base_dir = os.path.join(working_dir, "LeiCA_resting")

    # set fsl output
    fsl.FSLCommand.set_default_output_type("NIFTI_GZ")

    # inputnode
    inputnode = Node(util.IdentityInterface(fields=["dMRI_dicom"]), name="inputnode")

    outputnode = Node(util.IdentityInterface(fields=["dMRI"]), name="outputnode")

    niftisink = Node(nio.DataSink(), name="niftisink")
    niftisink.inputs.base_directory = os.path.join(ds_dir, "raw_niftis")

    #######

    converter_dMRI = Node(Dcm2nii(), name="converter_dMRI")
    converter_dMRI.inputs.gzip_output = True
    converter_dMRI.inputs.nii_output = True
    converter_dMRI.inputs.anonymize = False
    converter_dMRI.plugin_args = {"submit_specs": "request_memory = 2000"}
    converter_wf.connect(inputnode, "dMRI_dicom", converter_dMRI, "source_names")

    dMRI_rename = Node(util.Rename(format_string="DTI_mx_137.nii.gz"), name="dMRI_rename")
    converter_wf.connect(converter_dMRI, "converted_files", dMRI_rename, "in_file")

    bvecs_rename = Node(util.Rename(format_string="DTI_mx_137.bvecs"), name="bvecs_rename")
    converter_wf.connect(converter_dMRI, "bvecs", bvecs_rename, "in_file")

    bvals_rename = Node(util.Rename(format_string="DTI_mx_137.bvals"), name="bvals_rename")
    converter_wf.connect(converter_dMRI, "bvals", bvals_rename, "in_file")

    # reorient to standard orientation
    reor_2_std = Node(fsl.Reorient2Std(), name="reor_2_std")
    converter_wf.connect(dMRI_rename, "out_file", reor_2_std, "in_file")
    converter_wf.connect(reor_2_std, "out_file", outputnode, "dMRI")

    # save original niftis
    converter_wf.connect(reor_2_std, "out_file", niftisink, "[email protected]")
    converter_wf.connect(bvals_rename, "out_file", niftisink, "dM[email protected]")
    converter_wf.connect(bvecs_rename, "out_file", niftisink, "[email protected]")

    converter_wf.write_graph(dotfilename="converter_struct", graph2use="flat", format="pdf")
    return converter_wf
开发者ID:NeuroanatomyAndConnectivity,项目名称:LeiCA,代码行数:48,代码来源:converter.py

示例5: create_structural

# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import base_dir [as 别名]
def create_structural(subject, working_dir, data_dir, freesurfer_dir, out_dir, standard_brain):
    # main workflow
    struct_preproc = Workflow(name='anat_preproc')
    struct_preproc.base_dir = working_dir
    struct_preproc.config['execution']['crashdump_dir'] = struct_preproc.base_dir + "/crash_files"


    # workflow to get brain, head and wmseg from freesurfer and convert to nifti
    mgzconvert = create_mgzconvert_pipeline()
    mgzconvert.inputs.inputnode.fs_subjects_dir = freesurfer_dir
    mgzconvert.inputs.inputnode.fs_subject_id = subject

    normalize = create_normalize_pipeline()
    normalize.inputs.inputnode.standard = standard_brain

    # sink to store files
    sink = Node(nio.DataSink(base_directory=out_dir,
                             parameterization=False,
                             substitutions=[
                                 ('transform_Warped', 'T1_brain2mni')]),
                name='sink')

    # connections
    struct_preproc.connect(
        [(mgzconvert, normalize, [('outputnode.anat_brain', 'inputnode.anat')]),
         (mgzconvert, sink, [('outputnode.anat_head', '@head')]),
         (mgzconvert, sink, [('outputnode.anat_brain', '@brain')]),
         (mgzconvert, sink, [('outputnode.anat_brain_mask', '@mask')]),
         (normalize, sink, [('outputnode.anat2std', '@anat2std'),
                            ('outputnode.anat2std_transforms', '[email protected]_transforms'),
                            ('outputnode.std2anat_transforms', '[email protected]_transforms')])
         ])

    struct_preproc.write_graph(dotfilename='struct_preproc.dot', graph2use='colored', format='pdf', simple_form=True)
    # struct_preproc.run()
    struct_preproc.run(plugin='CondorDAGMan', plugin_args = {'initial_specs': 'request_memory = 1500'})  #
开发者ID:fBeyer89,项目名称:LIFE_Lemon_mod_mod,代码行数:38,代码来源:structural.py

示例6: Node

# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import base_dir [as 别名]
                                          ('t1map', 'inQuantitative'),
                                          ('uni', 'inT1weighted')]),
                 (background, strip, [('outMasked2','inInput')]),
                 (background, outputnode, [('outMasked2','uni_masked'),
                                           ('outMasked','t1map_masked'),
                                           ('outSignal2','background_mask')]),
                 (strip, outputnode, [('outStripped','uni_stripped'),
                                      ('outMask', 'skullstrip_mask'),
                                      ('outOriginal','uni_reoriented')])
                 ])


#### in and out ####################################################################################

#mp2rage.base_dir='/scr/ilz1/nonlinear_registration/lemon/testing/dicom_start/'
mp2rage.base_dir='/scr/kansas1/huntenburg/'
data_dir='/scr/litauen1/lsd/pilot_140521/dicoms/DL1T/'
#data_dir='/scr/ilz1/nonlinear_registration/lemon/testing/dicom_start/'
#data_dir = '/scr/jessica2/Schaare/LEMON/raw/'
#out_dir = '/scr/jessica2/Schaare/LEMON/preprocessed/'
#mp2rage.config['execution']={'remove_unnecessary_outputs': 'False'}
subjects=['LEMON064', 'LEMON065', 'LEMON096']
# subjects=os.listdir(data_dir)
# subjects.remove('LEMON025')
# subjects.remove('LEMON065')

#infosource to iterate over subjects
infosource = Node(util.IdentityInterface(fields=['subject_id']), 
                  name='infosource')
infosource.iterables=('subject_id', subjects)
开发者ID:juhuntenburg,项目名称:nonlinear_coreg,代码行数:32,代码来源:mp2rage.py

示例7: open

# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import base_dir [as 别名]
    
fmapepi.connect([(inputnode, convertwarp, [('anat_head', 'reference')]),
                 (convertwarp0, convertwarp, [('out_field', 'warp1')]),
                 (bbregister, convertwarp, [('out_fsl_file', 'postmat')]),
                 (inputnode, applywarp, [('epi_mean', 'in_file'),
                                         ('anat_head', 'ref_file')]),
                 (convertwarp, applywarp, [('out_field', 'field_file')]),
                 (applywarp, outputnode, [('out_file', 'fmap_mean_coreg')]),
                 (convertwarp, outputnode, [('out_field', 'fmap_fullwarp')])
              ])



#### running directly ############################################################################################################

fmapepi.base_dir='/scr/kansas1/huntenburg/lemon_missing/working_dir/'
#fmapepi.config['execution']={'remove_unnecessary_outputs': 'False'}
data_dir = '/scr/jessica2/Schaare/LEMON/'
fs_subjects_dir='/scr/jessica2/Schaare/LEMON/freesurfer/'
out_dir = '/scr/jessica2/Schaare/LEMON/preprocessed/'
#subjects=['LEMON001']
subjects=[]
f = open('/scr/jessica2/Schaare/LEMON/missing_subjects.txt','r')
for line in f:
    subjects.append(line.strip())

# create inforsource to iterate over subjects
infosource = Node(util.IdentityInterface(fields=['subject_id','fs_subjects_dir']), 
                  name='infosource')
infosource.inputs.fs_subjects_dir=fs_subjects_dir
infosource.iterables=('subject_id', subjects)
开发者ID:juhuntenburg,项目名称:nonlinear_coreg,代码行数:33,代码来源:fieldmap_epi2fmap.py

示例8: Node

# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import base_dir [as 别名]
                                    terminal_output='file'),
                    name='apply2con', iterfield=['input_image'])

# Apply Transformation - applies the normalization matrix to the mean image
apply2mean = Node(ApplyTransforms(args='--float',
                                  input_image_type=3,
                                  interpolation='Linear',
                                  invert_transform_flags=[False],
                                  num_threads=1,
                                  reference_image=template,
                                  terminal_output='file'),
                  name='apply2mean')

# Initiation of the ANTS normalization workflow
normflow = Workflow(name='normflow')
normflow.base_dir = opj(experiment_dir, working_dir)

# Connect up ANTS normalization components
normflow.connect([(antsreg, apply2con, [('composite_transform', 'transforms')]),
                  (antsreg, apply2mean, [('composite_transform',
                                          'transforms')])
                  ])

# Infosource - a function free node to iterate over the list of subject names
infosource = Node(IdentityInterface(fields=['subject_id']),
                  name="infosource")
infosource.iterables = [('subject_id', subject_list)]

# SelectFiles - to grab the data (alternativ to DataGrabber)
anat_file = opj('freesurfer', '{subject_id}', 'mri/brain.mgz')
func_file = opj(input_dir_1st, 'contrasts', '{subject_id}',
开发者ID:dalejn,项目名称:ants_scripts,代码行数:33,代码来源:ANTS_TEST.py

示例9: Node

# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import base_dir [as 别名]
# cutoff volumes = 1/(2*TR*cutoff in Hz)
# https://www.jiscmail.ac.uk/cgi-bin/webadmin?A2=ind1205&L=FSL&P=R57592&1=FSL&9=A&I=-3&J=on&d=No+Match%3BMatch%3BMatches&z=4
lp = 1.0 / (2 * 1.4 * 0.1)
hp = 1.0 / (2 * 1.4 * 0.01)

bandpass_filter = Node(fsl.TemporalFilter(lowpass_sigma=lp, highpass_sigma=hp), name="bandpass_filter")
bandpass_filter.plugin_args = {"initial_specs": "request_memory = 30000"}


preproc.connect(remove_noise, "out_file", bandpass_filter, "in_file")
preproc.connect(bandpass_filter, "out_file", outputnode, "filtered_file")


###################################################################################################################################
# in and out
preproc.base_dir = "/scr/kansas1/huntenburg/"
preproc.config["execution"] = {"remove_unnecessary_outputs": "False"}
out_dir = "/scr/"
data_dir = "/scr/"
subjects = ["LEMON006"]  # ,'LEMON001','LEMON087','LEMON030','LEMON044','LEMON071']


# infosource to iterate over subjects
subject_infosource = Node(util.IdentityInterface(fields=["subject_id"]), name="subject_infosource")
subject_infosource.iterables = ("subject_id", subjects)


# infosource to iterate over coregistration methods
cor_method_infosource = Node(util.IdentityInterface(fields=["cor_method"]), name="cor_method_infosource")
cor_method_infosource.iterables = ("cor_method", ["lin_ts"])  # , 'lin_ts', 'nonlin_ts', 'fmap_ts', 'topup_ts'])
开发者ID:juhuntenburg,项目名称:nonlinear_coreg,代码行数:32,代码来源:preproc_conn.py

示例10: Workflow

# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import base_dir [as 别名]
subjects=list(subjects['DB'])
subjects.remove('KSMT')

sessions = ['rest1_1', 'rest1_2', 'rest2_1', 'rest2_2']

# directories
working_dir = '/scr/ilz3/myelinconnect/working_dir/' 
data_dir= '/scr/ilz3/myelinconnect/'
final_dir = '/scr/ilz3/myelinconnect/mappings/rest2highres/'

# set fsl output type to nii.gz
fsl.FSLCommand.set_default_output_type('NIFTI_GZ')

# main workflow
mapping2struct = Workflow(name='mapping2struct')
mapping2struct.base_dir = working_dir
mapping2struct.config['execution']['crashdump_dir'] = mapping2struct.base_dir + "/crash_files"

# iterate over subjects
subject_infosource = Node(util.IdentityInterface(fields=['subject']), 
                  name='subject_infosource')
subject_infosource.iterables=[('subject', subjects)]

# iterate over sessions
session_infosource = Node(util.IdentityInterface(fields=['session']), 
                  name='session_infosource')
session_infosource.iterables=[('session', sessions)]

# select files
templates={'mapping': 'mappings/rest/fixed_hdr/corr_{subject}_{session}_roi_detrended_median_corrected_mapping_fixed.nii.gz',
           'epi2highres_lin_itk' : 'resting/preprocessed/{subject}/{session}/registration/epi2highres_lin.txt',
开发者ID:juhuntenburg,项目名称:myelinconnect,代码行数:33,代码来源:project_mapping2struct.py

示例11: open

# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import base_dir [as 别名]
   
topup.connect([(inputnode, convertwarp, [('anat_head', 'reference')]),
              (epireg, convertwarp, [('shiftmap', 'shiftmap')]),
              (concat, convertwarp, [('out_file', 'postmat')]),
              (inputnode, applywarp, [('epi_mean', 'in_file'),
                                      ('anat_head', 'ref_file')]),
              (convertwarp, applywarp, [('out_field', 'field_file')]),
              (applywarp, outputnode, [('out_file', 'topup_mean_coreg')]),
              (convertwarp, outputnode, [('out_field', 'topup_fullwarp')])
              ])



##### in and output ############

topup.base_dir='/scr/kansas1/huntenburg/'
topup.config['execution']={'remove_unnecessary_outputs': 'False'}
data_dir='/scr/jessica2/Schaare/LEMON/'
fs_subjects_dir='/scr/jessica2/Schaare/LEMON/freesurfer/freesurfer/'
out_dir = '/scr/jessica2/Schaare/LEMON/preprocessed/'
subjects=['LEMON001']
# subjects=[]
# f = open('/scr/jessica2/Schaare/LEMON/done_freesurfer.txt','r')
# for line in f:
#     subjects.append(line.strip())


# create inforsource to iterate over subjects
infosource = Node(util.IdentityInterface(fields=['subject_id','fs_subjects_dir']), 
                  name='infosource')
infosource.inputs.fs_subjects_dir=fs_subjects_dir
开发者ID:juhuntenburg,项目名称:nonlinear_coreg,代码行数:33,代码来源:topup.py

示例12: create_rsfMRI_preproc_pipeline

# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import base_dir [as 别名]
def create_rsfMRI_preproc_pipeline(working_dir, freesurfer_dir, ds_dir, use_fs_brainmask, name='rsfMRI_preprocessing'):
    # initiate workflow
    rsfMRI_preproc_wf = Workflow(name=name)
    rsfMRI_preproc_wf.base_dir = os.path.join(working_dir, 'LeiCA_resting')
    ds_dir = os.path.join(ds_dir, name)

    # set fsl output
    fsl.FSLCommand.set_default_output_type('NIFTI_GZ')

    # inputnode
    inputnode = Node(util.IdentityInterface(fields=['epi',
                                                    't1w',
                                                    'subject_id',
                                                    'TR_ms',
                                                    'vols_to_drop',
                                                    'lat_ventricle_mask_MNI',
                                                    'lp_cutoff_freq',
                                                    'hp_cutoff_freq']),
                     name='inputnode')

    # outputnode
    outputnode = Node(util.IdentityInterface(fields=['epi_moco',
                                                     'rs_preprocessed',
                                                     'epi_2_MNI_warp']),
                      name='outputnode')


    # MOCO
    moco = create_moco_pipeline(working_dir, ds_dir, 'motion_correction')
    rsfMRI_preproc_wf.connect(inputnode, 'epi', moco, 'inputnode.epi')
    rsfMRI_preproc_wf.connect(inputnode, 'vols_to_drop', moco, 'inputnode.vols_to_drop')



    # STRUCT PREPROCESSING
    struct_preproc = create_struct_preproc_pipeline(working_dir, freesurfer_dir, ds_dir, use_fs_brainmask, 'struct_preproc')
    rsfMRI_preproc_wf.connect(inputnode, 't1w', struct_preproc, 'inputnode.t1w')
    rsfMRI_preproc_wf.connect(inputnode, 'subject_id', struct_preproc, 'inputnode.subject_id')



    # REGISTRATIONS
    reg = create_registration_pipeline(working_dir, freesurfer_dir, ds_dir, 'registration')
    rsfMRI_preproc_wf.connect(moco, 'outputnode.initial_mean_epi_moco', reg, 'inputnode.initial_mean_epi_moco')
    rsfMRI_preproc_wf.connect(inputnode, 't1w', reg, 'inputnode.t1w')
    rsfMRI_preproc_wf.connect(struct_preproc, 'outputnode.t1w_brain', reg, 'inputnode.t1w_brain')
    rsfMRI_preproc_wf.connect(struct_preproc, 'outputnode.wm_mask_4_bbr', reg, 'inputnode.wm_mask_4_bbr')
    rsfMRI_preproc_wf.connect(struct_preproc, 'outputnode.struct_brain_mask', reg, 'inputnode.struct_brain_mask')
    rsfMRI_preproc_wf.connect(inputnode, 'subject_id', reg, 'inputnode.subject_id')
    rsfMRI_preproc_wf.connect(reg, 'outputnode.epi_2_MNI_warp', outputnode, 'epi_2_MNI_warp')



    # DESKULL EPI
    deskull = create_deskull_pipeline(working_dir, ds_dir, 'deskull')
    rsfMRI_preproc_wf.connect(moco, 'outputnode.epi_moco', deskull, 'inputnode.epi_moco')
    rsfMRI_preproc_wf.connect(struct_preproc, 'outputnode.struct_brain_mask', deskull, 'inputnode.struct_brain_mask')
    rsfMRI_preproc_wf.connect(reg, 'outputnode.struct_2_epi_mat', deskull, 'inputnode.struct_2_epi_mat')




    # DENOISE
    denoise = create_denoise_pipeline(working_dir, ds_dir, 'denoise')
    rsfMRI_preproc_wf.connect(inputnode, 'TR_ms', denoise, 'inputnode.TR_ms')
    rsfMRI_preproc_wf.connect(inputnode, 'subject_id', denoise, 'inputnode.subject_id')
    rsfMRI_preproc_wf.connect(inputnode, 'lat_ventricle_mask_MNI', denoise, 'inputnode.lat_ventricle_mask_MNI')
    rsfMRI_preproc_wf.connect(moco, 'outputnode.par_moco', denoise, 'inputnode.par_moco')
    rsfMRI_preproc_wf.connect(deskull, 'outputnode.epi_deskulled', denoise, 'inputnode.epi')
    rsfMRI_preproc_wf.connect(deskull, 'outputnode.mean_epi', denoise, 'inputnode.mean_epi')
    rsfMRI_preproc_wf.connect(deskull, 'outputnode.brain_mask_epiSpace', denoise, 'inputnode.brain_mask_epiSpace')
    rsfMRI_preproc_wf.connect(reg, 'outputnode.struct_2_epi_mat', denoise, 'inputnode.struct_2_epi_mat')
    rsfMRI_preproc_wf.connect(reg, 'outputnode.MNI_2_epi_warp', denoise, 'inputnode.MNI_2_epi_warp')
    rsfMRI_preproc_wf.connect(struct_preproc, 'outputnode.wm_mask', denoise, 'inputnode.wm_mask')
    rsfMRI_preproc_wf.connect(struct_preproc, 'outputnode.csf_mask', denoise, 'inputnode.csf_mask')
    rsfMRI_preproc_wf.connect(inputnode, 'lp_cutoff_freq', denoise, 'inputnode.lp_cutoff_freq')
    rsfMRI_preproc_wf.connect(inputnode, 'hp_cutoff_freq', denoise, 'inputnode.hp_cutoff_freq')

    rsfMRI_preproc_wf.connect(denoise, 'outputnode.rs_preprocessed', outputnode, 'rs_preprocessed')



    # QC
    qc = create_qc_pipeline(working_dir, ds_dir, 'qc')
    rsfMRI_preproc_wf.connect(inputnode, 'subject_id', qc, 'inputnode.subject_id')
    rsfMRI_preproc_wf.connect(moco, 'outputnode.par_moco', qc, 'inputnode.par_moco')
    rsfMRI_preproc_wf.connect(deskull, 'outputnode.epi_deskulled', qc, 'inputnode.epi_deskulled')
    rsfMRI_preproc_wf.connect(deskull, 'outputnode.brain_mask_epiSpace', qc, 'inputnode.brain_mask_epiSpace')
    rsfMRI_preproc_wf.connect([(struct_preproc, qc, [('outputnode.t1w_brain', 'inputnode.t1w_brain'),
                                                     ('outputnode.struct_brain_mask', 'inputnode.struct_brain_mask')])])
    rsfMRI_preproc_wf.connect([(reg, qc, [('outputnode.mean_epi_structSpace', 'inputnode.mean_epi_structSpace'),
                                          ('outputnode.mean_epi_MNIspace', 'inputnode.mean_epi_MNIspace'),
                                          ('outputnode.struct_MNIspace', 'inputnode.struct_MNIspace'),
                                          ('outputnode.struct_2_MNI_warp', 'inputnode.struct_2_MNI_warp')])])
    rsfMRI_preproc_wf.connect(denoise, 'outputnode.outlier_files', qc, 'inputnode.outlier_files')
    rsfMRI_preproc_wf.connect(denoise, 'outputnode.rs_preprocessed', qc, 'inputnode.rs_preprocessed')

    rsfMRI_preproc_wf.write_graph(dotfilename=rsfMRI_preproc_wf.name, graph2use='orig', format='pdf')
    rsfMRI_preproc_wf.write_graph(dotfilename=rsfMRI_preproc_wf.name, graph2use='colored', format='pdf')

#.........这里部分代码省略.........
开发者ID:JoHannnezzz,项目名称:nki_nilearn,代码行数:103,代码来源:rsfMRI_preprocessing.py

示例13: Workflow

# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import base_dir [as 别名]
    
# field_methodlist=[]
# for field in fields:
#     fieldlist=[]
#     for subject in subjects:
#         fieldlist.append(mask_dir+subject+'/fieldcompare/fields/'+field+'_field.nii.gz')
#     field_methodlist.append(fieldlist)
#         

'''basic workflow
=======================
'''

# create workflow
group = Workflow(name='group')
group.base_dir=working_dir

# sink
sink = Node(nio.DataSink(base_directory=out_dir,
                         parameterization=False), 
             name='sink')
 

'''groupmeans and sdv
=======================
'''

# merge means
merger = MapNode(fsl.Merge(dimension='t'),
                 iterfield=['in_files'],
                 name='merger')
开发者ID:juhuntenburg,项目名称:nonlinear_coreg,代码行数:33,代码来源:eval_group.py

示例14: open

# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import base_dir [as 别名]
                               relwarp=True,
                               out_file='topup_ts.nii.gz', 
                               datatype='float'),
                 name='apply_topup') 
   
apply_ts.connect([(inputnode, apply_topup, [('moco_ts', 'in_file'),
                                            ('topup_fullwarp', 'field_file')]),
                 (resamp_anat, apply_topup, [('out_file', 'ref_file')]),
                 (apply_topup, outputnode, [('out_file', 'topup_ts')])
                 ])
apply_topup.plugin_args={'initial_specs': 'request_memory = 8000'}



# set up workflow, in- and output
apply_ts.base_dir='/scr/kansas1/huntenburg/'
data_dir='/scr/jessica2/Schaare/LEMON/'
#out_dir = '/scr/kansas1/huntenburg/timeseries/'
#applywarp_linear.config['execution']={'remove_unnecessary_outputs': 'False'}
apply_ts.config['execution']['crashdump_dir'] = apply_ts.base_dir + "/crash_files"

# reading subjects from file
#subjects=['LEMON003']
subjects=[]
f = open('/scr/jessica2/Schaare/LEMON/done_freesurfer.txt','r')
for line in f:
    subjects.append(line.strip())
subjects.remove('LEMON007')
subjects.remove('LEMON027')

开发者ID:juhuntenburg,项目名称:nonlinear_coreg,代码行数:31,代码来源:apply_timeseries_fsl.py

示例15: calc_local_metrics

# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import base_dir [as 别名]
def calc_local_metrics(
    preprocessed_data_dir,
    subject_id,
    parcellations_dict,
    bp_freq_list,
    fd_thresh,
    working_dir,
    ds_dir,
    use_n_procs,
    plugin_name,
):
    import os
    from nipype import config
    from nipype.pipeline.engine import Node, Workflow, MapNode
    import nipype.interfaces.utility as util
    import nipype.interfaces.io as nio
    import nipype.interfaces.fsl as fsl
    import utils as calc_metrics_utils

    #####################################
    # GENERAL SETTINGS
    #####################################
    fsl.FSLCommand.set_default_output_type("NIFTI_GZ")

    wf = Workflow(name="LeiCA_LIFE_metrics")
    wf.base_dir = os.path.join(working_dir)

    nipype_cfg = dict(
        logging=dict(workflow_level="DEBUG"),
        execution={"stop_on_first_crash": True, "remove_unnecessary_outputs": True, "job_finished_timeout": 15},
    )
    config.update_config(nipype_cfg)
    wf.config["execution"]["crashdump_dir"] = os.path.join(working_dir, "crash")

    ds = Node(nio.DataSink(base_directory=ds_dir), name="ds")
    ds.inputs.regexp_substitutions = [
        ("MNI_resampled_brain_mask_calc.nii.gz", "falff.nii.gz"),
        ("residual_filtered_3dT.nii.gz", "alff.nii.gz"),
        ("_parcellation_", ""),
        ("_bp_freqs_", "bp_"),
    ]

    #####################
    # ITERATORS
    #####################
    # PARCELLATION ITERATOR
    parcellation_infosource = Node(util.IdentityInterface(fields=["parcellation"]), name="parcellation_infosource")
    parcellation_infosource.iterables = ("parcellation", parcellations_dict.keys())

    bp_filter_infosource = Node(util.IdentityInterface(fields=["bp_freqs"]), name="bp_filter_infosource")
    bp_filter_infosource.iterables = ("bp_freqs", bp_freq_list)

    selectfiles = Node(
        nio.SelectFiles(
            {
                "parcellation_time_series": "{subject_id}/con_mat/parcellated_time_series/bp_{bp_freqs}/{parcellation}/parcellation_time_series.npy"
            },
            base_directory=preprocessed_data_dir,
        ),
        name="selectfiles",
    )
    selectfiles.inputs.subject_id = subject_id
    wf.connect(parcellation_infosource, "parcellation", selectfiles, "parcellation")
    wf.connect(bp_filter_infosource, "bp_freqs", selectfiles, "bp_freqs")

    fd_file = Node(
        nio.SelectFiles({"fd_p": "{subject_id}/QC/FD_P_ts"}, base_directory=preprocessed_data_dir), name="fd_file"
    )
    fd_file.inputs.subject_id = subject_id

    ##############
    ## CON MATS
    ##############
    ##############
    ## extract ts
    ##############

    get_good_trs = Node(
        util.Function(
            input_names=["fd_file", "fd_thresh"],
            output_names=["good_trs", "fd_scrubbed_file"],
            function=calc_metrics_utils.get_good_trs,
        ),
        name="get_good_trs",
    )
    wf.connect(fd_file, "fd_p", get_good_trs, "fd_file")
    get_good_trs.inputs.fd_thresh = fd_thresh

    parcellated_ts_scrubbed = Node(
        util.Function(
            input_names=["parcellation_time_series_file", "good_trs"],
            output_names=["parcellation_time_series_scrubbed"],
            function=calc_metrics_utils.parcellation_time_series_scrubbing,
        ),
        name="parcellated_ts_scrubbed",
    )

    wf.connect(selectfiles, "parcellation_time_series", parcellated_ts_scrubbed, "parcellation_time_series_file")
    wf.connect(get_good_trs, "good_trs", parcellated_ts_scrubbed, "good_trs")

#.........这里部分代码省略.........
开发者ID:fliem,项目名称:LeiCA_LIFE,代码行数:103,代码来源:calc_metrics_scrubbing.py


注:本文中的nipype.pipeline.engine.Workflow.base_dir方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。