当前位置: 首页>>代码示例>>Python>>正文


Python engine.Node类代码示例

本文整理汇总了Python中nipype.pipeline.engine.Node的典型用法代码示例。如果您正苦于以下问题:Python Node类的具体用法?Python Node怎么用?Python Node使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了Node类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __call__

    def __call__(self, **kwargs):
        kwargs = modify_paths(kwargs, relative=False)
        interface = self.interface()
        # Set the inputs early to get some argument checking
        interface.inputs.set(**kwargs)
        # Make a name for our node
        inputs = interface.inputs.get_hashval()
        hasher = hashlib.new('md5')
        hasher.update(pickle.dumps(inputs))
        dir_name = '%s-%s' % (interface.__class__.__module__.replace('.', '-'),
                              interface.__class__.__name__)
        job_name = hasher.hexdigest()
        node = Node(interface, name=job_name)
        node.base_dir = os.path.join(self.base_dir, dir_name)

        cwd = os.getcwd()
        try:
            out = node.run()
        finally:
            # node.run() changes to the node directory - if something goes wrong
            # before it cds back you would end up in strange places
            os.chdir(cwd)
        if self.callback is not None:
            self.callback(dir_name, job_name)
        return out
开发者ID:xavierislam,项目名称:electrode-gui,代码行数:25,代码来源:memory.py

示例2: create_converter_structural_pipeline

def create_converter_structural_pipeline(working_dir, ds_dir, name="converter_struct"):
    # initiate workflow
    converter_wf = Workflow(name=name)
    converter_wf.base_dir = os.path.join(working_dir, "LeiCA_resting")

    # set fsl output
    fsl.FSLCommand.set_default_output_type("NIFTI_GZ")

    # inputnode
    inputnode = Node(util.IdentityInterface(fields=["t1w_dicom"]), name="inputnode")

    outputnode = Node(util.IdentityInterface(fields=["t1w"]), name="outputnode")

    niftisink = Node(nio.DataSink(), name="niftisink")
    niftisink.inputs.base_directory = os.path.join(ds_dir, "raw_niftis")

    # convert to nifti
    # todo check if geometry bugs attac. use dcm2nii?
    converter_t1w = Node(DcmStack(embed_meta=True), name="converter_t1w")
    converter_t1w.plugin_args = {"submit_specs": "request_memory = 2000"}
    converter_t1w.inputs.out_format = "t1w"

    converter_wf.connect(inputnode, "t1w_dicom", converter_t1w, "dicom_files")

    # reorient to standard orientation
    reor_2_std = Node(fsl.Reorient2Std(), name="reor_2_std")
    converter_wf.connect(converter_t1w, "out_file", reor_2_std, "in_file")

    converter_wf.connect(reor_2_std, "out_file", outputnode, "t1w")

    # save original niftis
    converter_wf.connect(reor_2_std, "out_file", niftisink, "sMRI")

    converter_wf.write_graph(dotfilename="converter_struct", graph2use="flat", format="pdf")
    return converter_wf
开发者ID:NeuroanatomyAndConnectivity,项目名称:LeiCA,代码行数:35,代码来源:converter.py

示例3: create_reconall_pipeline

def create_reconall_pipeline(name='reconall'):
    reconall = Workflow(name='reconall')
    # inputnode
    inputnode = Node(util.IdentityInterface(fields=['anat',
                                                    'fs_subjects_dir',
                                                    'fs_subject_id'
                                                    ]),
                     name='inputnode')
    outputnode = Node(util.IdentityInterface(fields=['fs_subjects_dir',
                                                     'fs_subject_id']),
                      name='outputnode')
    # run reconall
    recon_all = Node(fs.ReconAll(args='-autorecon2 -nuiterations 7 -no-isrunning -hippo-subfields'),
                     name="recon_all")
    # recon_all.inputs.directive= 'autorecon2-wm' # -autorecon3
    recon_all.plugin_args = {'submit_specs': 'request_memory = 9000'}
    # function to replace / in subject id string with a _
    def sub_id(sub_id):
        return sub_id.replace('/', '_')

    reconall.connect([(inputnode, recon_all, [('fs_subjects_dir', 'subjects_dir'),
                                              ('anat', 'T1_files'),
                                              (('fs_subject_id', sub_id), 'subject_id')]),
                      (recon_all, outputnode, [('subject_id', 'fs_subject_id'),
                                               ('subjects_dir', 'fs_subjects_dir')])
                      ])
    return reconall
开发者ID:fBeyer89,项目名称:LIFE_Lemon_mod_mod,代码行数:27,代码来源:reconall.py

示例4: create_normalize_pipeline

def create_normalize_pipeline(name='normalize'):
    # workflow
    normalize = Workflow(name='normalize')
    # Define nodes
    inputnode = Node(interface=util.IdentityInterface(fields=['epi_coreg',
                                                              'tr']),
                     name='inputnode')
    outputnode = Node(interface=util.IdentityInterface(fields=[
        'normalized_file']),
        name='outputnode')

    # time-normalize scans
    normalize_time = Node(util.Function(input_names=['in_file', 'tr'],
                                        output_names=['out_file'],
                                        function=time_normalizer),
                          name='normalize_time')
    normalize_time.plugin_args = {'submit_specs': 'request_memory = 17000'}
    normalize.connect([(inputnode, normalize_time, [('tr', 'tr')]),
                       (inputnode, normalize_time, [('epi_coreg', 'in_file')]),
                       (normalize_time, outputnode, [('out_file', 'normalized_file')])
                       ])

    # time-normalize scans    

    return normalize
开发者ID:fBeyer89,项目名称:LIFE_Lemon_mod_mod,代码行数:25,代码来源:normalize.py

示例5: create

    def create(self):  # , **kwargs):
        """ Create the nodes and connections for the workflow """
        # Preamble
        csvReader = CSVReader()
        csvReader.inputs.in_file = self.csv_file.default_value
        csvReader.inputs.header = self.hasHeader.default_value
        csvOut = csvReader.run()

        print(("=" * 80))
        print((csvOut.outputs.__dict__))
        print(("=" * 80))

        iters = OrderedDict()
        label = list(csvOut.outputs.__dict__.keys())[0]
        result = eval("csvOut.outputs.{0}".format(label))
        iters['tests'], iters['trains'] = subsample_crossValidationSet(result, self.sample_size.default_value)
        # Main event
        out_fields = ['T1', 'T2', 'Label', 'trainindex', 'testindex']
        inputsND = Node(interface=IdentityInterface(fields=out_fields),
                        run_without_submitting=True, name='inputs')
        inputsND.iterables = [('trainindex', iters['trains']),
                              ('testindex', iters['tests'])]
        if not self.hasHeader.default_value:
            inputsND.inputs.T1 = csvOut.outputs.column_0
            inputsND.inputs.Label = csvOut.outputs.column_1
            inputsND.inputs.T2 = csvOut.outputs.column_2
        else:
            inputsND.inputs.T1 = csvOut.outputs.__dict__['t1']
            inputsND.inputs.Label = csvOut.outputs.__dict__['label']
            inputsND.inputs.T2 = csvOut.outputs.__dict__['t2']
            pass  # TODO
        metaflow = Workflow(name='metaflow')
        metaflow.config['execution'] = {
            'plugin': 'Linear',
            'stop_on_first_crash': 'false',
            'stop_on_first_rerun': 'false',
        # This stops at first attempt to rerun, before running, and before deleting previous results.
            'hash_method': 'timestamp',
            'single_thread_matlab': 'true',  # Multi-core 2011a  multi-core for matrix multiplication.
            'remove_unnecessary_outputs': 'true',
            'use_relative_paths': 'false',  # relative paths should be on, require hash update when changed.
            'remove_node_directories': 'false',  # Experimental
            'local_hash_check': 'false'
        }

        metaflow.add_nodes([inputsND])
        """import pdb; pdb.set_trace()"""
        fusionflow = FusionLabelWorkflow()
        self.connect(
            [(metaflow, fusionflow, [('inputs.trainindex', 'trainT1s.index'), ('inputs.T1', 'trainT1s.inlist')]),
             (metaflow, fusionflow,
              [('inputs.trainindex', 'trainLabels.index'), ('inputs.Label', 'trainLabels.inlist')]),
             (metaflow, fusionflow, [('inputs.testindex', 'testT1s.index'), ('inputs.T1', 'testT1s.inlist')])
             ])
开发者ID:NIRALUser,项目名称:BRAINSTools,代码行数:54,代码来源:crossValidate.py

示例6: _merge_nii

    def _merge_nii(file_list, out_filename):
        from nipype.pipeline.engine import Node, Workflow
        import nipype.interfaces.fsl as fsl

        merge = Node(fsl.Merge(dimension='t'), name='merge')
        merge.base_dir = os.getcwd()
        merge.inputs.in_files = file_list
        merge.inputs.merged_file = out_filename
        result = merge.run()

        return result.outputs.merged_file
开发者ID:Yaqiongxiao,项目名称:LeiCA,代码行数:11,代码来源:utils.py

示例7: func_preprocess

def func_preprocess(name = 'func_preproc'):

    '''
    Method to preprocess functional data after warping to anatomical space.

    Accomplished after one step Distortion Correction, Motion Correction and Boundary based linear registration to
    anatomical space.

    Precodure includes:
    # 1- skull strip
    # 2- Normalize the image intensity values.
    # 3- Calculate Mean of Skull stripped image
    # 4- Create brain mask from Normalized data.
    '''

    # Define Workflow
    flow        = Workflow(name=name)
    inputnode   = Node(util.IdentityInterface(fields=['func_in']),
                           name='inputnode')
    outputnode  = Node(util.IdentityInterface(fields=['func_preproc',
                                                      'func_preproc_mean',
                                                      'func_preproc_mask']),
                           name = 'outputnode')


    # 2- Normalize the image intensity values.
    norm                               = Node(interface = fsl.ImageMaths(),       name = 'func_normalized')
    norm.inputs.op_string              = '-ing 1000'
    norm.out_data_type                 = 'float'
    norm.output_type                   = 'NIFTI'

    # 4- Create brain mask from Normalized data.
    mask                               = Node(interface = fsl.BET(),  name = 'func_preprocessed')
    mask.inputs.functional             = True
    mask.inputs.mask                   = True
    mask.inputs.frac                   = 0.5
    mask.inputs.vertical_gradient      = 0
    mask.inputs.threshold              = True

    # 3- Calculate Mean of Skull stripped image
    mean                          = Node(interface = preprocess.TStat(),     name = 'func_preprocessed_mean')
    mean.inputs.options           = '-mean'
    mean.inputs.outputtype        = 'NIFTI'


    flow.connect( inputnode  ,   'func_in'           ,   norm,        'in_file'     )
    flow.connect( norm       ,   'out_file'          ,   mask,        'in_file'     )
    flow.connect( norm       ,   'out_file'          ,   mean,        'in_file'     )
    flow.connect( mask       ,   'out_file'          ,   outputnode,  'func_preproc')
    flow.connect( mask       ,   'mask_file'         ,   outputnode,  'func_preproc_mask')
    flow.connect( mean       ,   'out_file'          ,   outputnode,  'func_preproc_mean')

    return flow
开发者ID:amadeuskanaan,项目名称:GluREST,代码行数:53,代码来源:func_preprocess.py

示例8: anatomical_preprocessing

def anatomical_preprocessing():
    '''
    Inputs:
        MP2RAGE Skull stripped image using Spectre-2010

    Workflow:
        1. reorient to RPI
        2. create a brain mask

    Returns:
        brain
        brain_mask

    '''
    # define workflow
    flow = Workflow('anat_preprocess')
    inputnode    = Node(util.IdentityInterface(fields=['anat', 'anat_gm', 'anat_wm', 'anat_csf', 'anat_first']), name = 'inputnode')
    outputnode   = Node(util.IdentityInterface(fields=['brain','brain_gm', 'brain_wm', 'brain_csf', 'brain_first', 'brain_mask',]),  name = 'outputnode')

    reorient   = Node(interface=preprocess.Resample(),                     name = 'anat_reorient')
    reorient.inputs.orientation = 'RPI'
    reorient.inputs.outputtype = 'NIFTI'

    erode = Node(interface=fsl.ErodeImage(),                                 name = 'anat_preproc')

    reorient_gm    = reorient.clone('anat_preproc_gm')
    reorient_wm    = reorient.clone('anat_preproc_wm')
    reorient_cm    = reorient.clone('anat_preproc_csf')
    reorient_first = reorient.clone('anat_preproc_first')

    make_mask    = Node(interface=fsl.UnaryMaths(),                        name = 'anat_preproc_mask')
    make_mask.inputs.operation = 'bin'

    # connect workflow nodes
    flow.connect(inputnode,    'anat'     , reorient,      'in_file'    )
    flow.connect(inputnode,    'anat_gm'  , reorient_gm,   'in_file'    )
    flow.connect(inputnode,    'anat_wm'  , reorient_wm,   'in_file'    )
    flow.connect(inputnode,    'anat_csf' , reorient_cm,   'in_file'    )
    flow.connect(inputnode,    'anat_first' , reorient_first,'in_file'    )
    flow.connect(reorient,     'out_file' , erode,        'in_file'    )
    flow.connect(erode,        'out_file' , make_mask,    'in_file'    )
    flow.connect(make_mask,    'out_file' , outputnode,   'brain_mask' )

    flow.connect(erode,        'out_file' , outputnode,   'brain'      )
    flow.connect(reorient_gm,  'out_file' , outputnode,   'brain_gm'   )
    flow.connect(reorient_wm,  'out_file' , outputnode,   'brain_wm'   )
    flow.connect(reorient_cm,  'out_file' , outputnode,   'brain_csf'  )
    flow.connect(reorient_first,  'out_file' , outputnode,   'brain_first' )

    return flow
开发者ID:amadeuskanaan,项目名称:GluREST,代码行数:50,代码来源:anat_preprocess.py

示例9: create_conversion

def create_conversion(
    name, subject, scans, working_dir, out_dir, folder, xnat_server, xnat_user, xnat_pass, project_id, exp_id
):

    convert = Workflow(name=name)
    convert.base_dir = working_dir
    convert.config["execution"]["crashdump_dir"] = convert.base_dir + "/crash_files"

    # infosource to iterate over scans
    scan_infosource = Node(util.IdentityInterface(fields=["scan_key", "scan_val"]), name="scan_infosource")
    scan_infosource.iterables = [("scan_key", scans.keys()), ("scan_val", scans.values())]
    scan_infosource.synchronize = True

    # xnat source
    xnatsource = Node(
        nio.XNATSource(
            infields=["project_id", "subject_id", "exp_id", "scan_id"],
            outfields=["dicom"],
            server=xnat_server,
            user=xnat_user,
            pwd=xnat_pass,
            cache_dir=working_dir,
        ),
        name="xnatsource",
    )

    xnatsource.inputs.query_template = (
        "/projects/%s/subjects/%s/experiments/%s/scans/%d/resources/DICOM/files"
    )  # files')
    xnatsource.inputs.query_template_args["dicom"] = [["project_id", "subject_id", "exp_id", "scan_id"]]
    xnatsource.inputs.project_id = project_id
    xnatsource.inputs.subject_id = subject
    xnatsource.inputs.exp_id = exp_id
    convert.connect([(scan_infosource, xnatsource, [("scan_val", "scan_id")])])

    # workflow to convert dicoms
    dcmconvert = create_dcmconvert_pipeline()
    convert.connect(
        [
            (scan_infosource, dcmconvert, [("scan_key", "inputnode.filename")]),
            (xnatsource, dcmconvert, [("dicom", "inputnode.dicoms")]),
        ]
    )

    # xnat sink
    sink = Node(nio.DataSink(base_directory=out_dir, parameterization=False), name="sink")

    convert.connect([(dcmconvert, sink, [("outputnode.nifti", folder)])])

    convert.run()
开发者ID:JanisReinelt,项目名称:pipelines,代码行数:50,代码来源:convert.py

示例10: create_converter_diffusion_pipeline

def create_converter_diffusion_pipeline(working_dir, ds_dir, name="converter_diffusion"):
    # initiate workflow
    converter_wf = Workflow(name=name)
    converter_wf.base_dir = os.path.join(working_dir, "LeiCA_resting")

    # set fsl output
    fsl.FSLCommand.set_default_output_type("NIFTI_GZ")

    # inputnode
    inputnode = Node(util.IdentityInterface(fields=["dMRI_dicom"]), name="inputnode")

    outputnode = Node(util.IdentityInterface(fields=["dMRI"]), name="outputnode")

    niftisink = Node(nio.DataSink(), name="niftisink")
    niftisink.inputs.base_directory = os.path.join(ds_dir, "raw_niftis")

    #######

    converter_dMRI = Node(Dcm2nii(), name="converter_dMRI")
    converter_dMRI.inputs.gzip_output = True
    converter_dMRI.inputs.nii_output = True
    converter_dMRI.inputs.anonymize = False
    converter_dMRI.plugin_args = {"submit_specs": "request_memory = 2000"}
    converter_wf.connect(inputnode, "dMRI_dicom", converter_dMRI, "source_names")

    dMRI_rename = Node(util.Rename(format_string="DTI_mx_137.nii.gz"), name="dMRI_rename")
    converter_wf.connect(converter_dMRI, "converted_files", dMRI_rename, "in_file")

    bvecs_rename = Node(util.Rename(format_string="DTI_mx_137.bvecs"), name="bvecs_rename")
    converter_wf.connect(converter_dMRI, "bvecs", bvecs_rename, "in_file")

    bvals_rename = Node(util.Rename(format_string="DTI_mx_137.bvals"), name="bvals_rename")
    converter_wf.connect(converter_dMRI, "bvals", bvals_rename, "in_file")

    # reorient to standard orientation
    reor_2_std = Node(fsl.Reorient2Std(), name="reor_2_std")
    converter_wf.connect(dMRI_rename, "out_file", reor_2_std, "in_file")
    converter_wf.connect(reor_2_std, "out_file", outputnode, "dMRI")

    # save original niftis
    converter_wf.connect(reor_2_std, "out_file", niftisink, "[email protected]")
    converter_wf.connect(bvals_rename, "out_file", niftisink, "[email protected]")
    converter_wf.connect(bvecs_rename, "out_file", niftisink, "[email protected]")

    converter_wf.write_graph(dotfilename="converter_struct", graph2use="flat", format="pdf")
    return converter_wf
开发者ID:NeuroanatomyAndConnectivity,项目名称:LeiCA,代码行数:46,代码来源:converter.py

示例11: __call__

 def __call__(self, **kwargs):
     kwargs = modify_paths(kwargs, relative=False)
     interface = self.interface()
     # Set the inputs early to get some argument checking
     interface.inputs.set(**kwargs)
     # Make a name for our node
     inputs = interface.inputs.get_hashval()
     hasher = hashlib.new('md5')
     hasher.update(pickle.dumps(inputs))
     dir_name = '%s-%s' % (interface.__class__.__module__.replace('.', '-'),
                           interface.__class__.__name__)
     job_name = hasher.hexdigest()
     node = Node(interface, name=job_name)
     node.base_dir = os.path.join(self.base_dir, dir_name)
     out = node.run()
     if self.callback is not None:
         self.callback(dir_name, job_name)
     return out
开发者ID:Alunisiira,项目名称:nipype,代码行数:18,代码来源:memory.py

示例12: smooth_data

def smooth_data(name = 'func_smoothed'):
    from nipype.pipeline.engine import Node, Workflow
    import nipype.interfaces.utility as util
    import nipype.interfaces.fsl as fsl

    flow        = Workflow(name)

    inputnode   = Node(util.IdentityInterface(fields=['func_data']),
                       name = 'inputnode')

    outputnode  =  Node(util.IdentityInterface(fields=['func_smoothed']),
                       name = 'outputnode')

    smooth      = Node(interface=fsl.Smooth(), name='func_smooth_fwhm_4')
    smooth.inputs.fwhm                 = 4.0
    smooth.terminal_output             = 'file'

    flow.connect(inputnode, 'func_data'      , smooth      , 'in_file'    )
    flow.connect(smooth,    'smoothed_file'  , outputnode  , 'func_smoothed'   )


    return flow
开发者ID:amadeuskanaan,项目名称:GluREST,代码行数:22,代码来源:smooth.py

示例13: Node

###################################################################################################################################
# # artefact detection
# ad = Node(ra.ArtifactDetect(save_plot=False,
#                             norm_threshold=1,
#                             zintensity_threshold=3,
#                             mask_type='spm_global',
#                             use_differences = [True, False],
#                             parameter_source='FSL'),
#           name='artefactdetect')
#
# #wf.connect(getmask, 'outputspec.mask',ad, 'mask_file') mask_type='file'


###################################################################################################################################
# tsnr (input is timeseries from inputnode)
tsnr = Node(TSNR(regress_poly=2), name="tsnr")
tsnr.plugin_args = {"initial_specs": "request_memory = 30000"}


###################################################################################################################################
# create noise mask file
getthresh = Node(interface=fsl.ImageStats(op_string="-p 98"), name="getthreshold")
getthresh.plugin_args = {"initial_specs": "request_memory = 30000"}

threshold_stddev = Node(fsl.Threshold(), name="threshold")
threshold_stddev.plugin_args = {"initial_specs": "request_memory = 30000"}

preproc.connect(tsnr, "stddev_file", threshold_stddev, "in_file")
preproc.connect(tsnr, "stddev_file", getthresh, "in_file")
preproc.connect(getthresh, "out_stat", threshold_stddev, "thresh")
preproc.connect(threshold_stddev, "out_file", outputnode, "noise_mask_file")
开发者ID:juhuntenburg,项目名称:nonlinear_coreg,代码行数:31,代码来源:preproc_conn.py

示例14: learning_prepare_data_wf

def learning_prepare_data_wf(working_dir,
                             ds_dir,
                             template_lookup_dict,
                             behav_file,
                             qc_file,
                             in_data_name_list,
                             data_lookup_dict,
                             use_n_procs,
                             plugin_name):
    import os
    from nipype import config
    from nipype.pipeline.engine import Node, Workflow
    import nipype.interfaces.utility as util
    import nipype.interfaces.io as nio
    from prepare_data_utils import vectorize_and_aggregate
    from itertools import chain

    # ensure in_data_name_list is list of lists
    in_data_name_list = [i if type(i) == list else [i] for i in in_data_name_list]
    in_data_name_list_unique = list(set(chain.from_iterable(in_data_name_list)))


    #####################################
    # GENERAL SETTINGS
    #####################################
    wf = Workflow(name='learning_prepare_data_wf')
    wf.base_dir = os.path.join(working_dir)

    nipype_cfg = dict(logging=dict(workflow_level='DEBUG'), execution={'stop_on_first_crash': False,
                                                                       'remove_unnecessary_outputs': False,
                                                                       'job_finished_timeout': 120,
                                                                       'hash_method': 'timestamp'})
    config.update_config(nipype_cfg)
    wf.config['execution']['crashdump_dir'] = os.path.join(working_dir, 'crash')

    ds = Node(nio.DataSink(), name='ds')
    ds.inputs.base_directory = os.path.join(ds_dir, 'group_learning_prepare_data')

    ds.inputs.regexp_substitutions = [
        # ('subject_id_', ''),
        ('_parcellation_', ''),
        ('_bp_freqs_', 'bp_'),
        ('_extraction_method_', ''),
        ('_subject_id_[A0-9]*/', '')
    ]

    ds_X = Node(nio.DataSink(), name='ds_X')
    ds_X.inputs.base_directory = os.path.join(ds_dir, 'vectorized_aggregated_data')

    ds_pdf = Node(nio.DataSink(), name='ds_pdf')
    ds_pdf.inputs.base_directory = os.path.join(ds_dir, 'pdfs')
    ds_pdf.inputs.parameterization = False


    #####################################
    # SET ITERATORS
    #####################################
    # SUBJECTS ITERATOR
    in_data_name_infosource = Node(util.IdentityInterface(fields=['in_data_name']), name='in_data_name_infosource')
    in_data_name_infosource.iterables = ('in_data_name', in_data_name_list_unique)

    mulitmodal_in_data_name_infosource = Node(util.IdentityInterface(fields=['multimodal_in_data_name']),
                                              name='mulitmodal_in_data_name_infosource')
    mulitmodal_in_data_name_infosource.iterables = ('multimodal_in_data_name', in_data_name_list)



    ###############################################################################################################
    # GET SUBJECTS INFO
    # create subjects list based on selection criteria

    def create_df_fct(behav_file, qc_file):
        import pandas as pd
        import os
        df = pd.read_pickle(behav_file)
        qc = pd.read_pickle(qc_file)
        df_all = qc.join(df, how='inner')

        assert df_all.index.is_unique, 'duplicates in df index. fix before cont.'

        df_all_subjects_pickle_file = os.path.abspath('df_all.pkl')
        df_all.to_pickle(df_all_subjects_pickle_file)

        full_subjects_list = df_all.index.values

        return df_all_subjects_pickle_file, full_subjects_list

    create_df = Node(util.Function(input_names=['behav_file', 'qc_file'],
                                   output_names=['df_all_subjects_pickle_file', 'full_subjects_list'],
                                   function=create_df_fct),
                     name='create_df')
    create_df.inputs.behav_file = behav_file
    create_df.inputs.qc_file = qc_file


    ###############################################################################################################
    # CREAE FILE LIST
    # of files that will be aggregted

    def create_file_list_fct(subjects_list, in_data_name, data_lookup_dict, template_lookup_dict):
#.........这里部分代码省略.........
开发者ID:fliem,项目名称:LeiCA_LIFE,代码行数:101,代码来源:learning_prepare_data_wf.py

示例15: learning_predict_data_2samp_wf

def learning_predict_data_2samp_wf(working_dir,
                                   ds_dir,
                                   in_data_name_list,
                                   subjects_selection_crit_dict,
                                   subjects_selection_crit_names_list,
                                   aggregated_subjects_dir,
                                   target_list,
                                   use_n_procs,
                                   plugin_name,
                                   confound_regression=[False, True],
                                   run_cv=False,
                                   n_jobs_cv=1,
                                   run_tuning=False,
                                   run_2sample_training=False,
                                   aggregated_subjects_dir_nki=None,
                                   subjects_selection_crit_dict_nki=None,
                                   subjects_selection_crit_name_nki=None,
                                   reverse_split=False,
                                   random_state_nki=666,
                                   run_learning_curve=False,
                                   life_test_size=0.5):
    import os
    from nipype import config
    from nipype.pipeline.engine import Node, Workflow
    import nipype.interfaces.utility as util
    import nipype.interfaces.io as nio
    from itertools import chain
    from learning_utils import aggregate_multimodal_metrics_fct, run_prediction_split_fct, \
        backproject_and_split_weights_fct, select_subjects_fct, select_multimodal_X_fct, learning_curve_plot
    import pandas as pd



    ###############################################################################################################
    # GENERAL SETTINGS

    wf = Workflow(name='learning_predict_data_2samp_wf')
    wf.base_dir = os.path.join(working_dir)

    nipype_cfg = dict(logging=dict(workflow_level='DEBUG'),
                      execution={'stop_on_first_crash': False,
                                 'remove_unnecessary_outputs': False,
                                 'job_finished_timeout': 120})
    config.update_config(nipype_cfg)
    wf.config['execution']['crashdump_dir'] = os.path.join(working_dir, 'crash')

    ds = Node(nio.DataSink(), name='ds')
    ds.inputs.base_directory = os.path.join(ds_dir, 'group_learning_prepare_data')

    ds.inputs.regexp_substitutions = [
        # ('subject_id_', ''),
        ('_parcellation_', ''),
        ('_bp_freqs_', 'bp_'),
        ('_extraction_method_', ''),
        ('_subject_id_[A0-9]*/', '')
    ]
    ds_pdf = Node(nio.DataSink(), name='ds_pdf')
    ds_pdf.inputs.base_directory = os.path.join(ds_dir, 'pdfs')
    ds_pdf.inputs.parameterization = False



    ###############################################################################################################
    # ensure in_data_name_list is list of lists
    in_data_name_list = [i if type(i) == list else [i] for i in in_data_name_list]
    in_data_name_list_unique = list(set(chain.from_iterable(in_data_name_list)))



    ###############################################################################################################
    # SET ITERATORS

    in_data_name_infosource = Node(util.IdentityInterface(fields=['in_data_name']), name='in_data_name_infosource')
    in_data_name_infosource.iterables = ('in_data_name', in_data_name_list_unique)

    multimodal_in_data_name_infosource = Node(util.IdentityInterface(fields=['multimodal_in_data_name']),
                                              name='multimodal_in_data_name_infosource')
    multimodal_in_data_name_infosource.iterables = ('multimodal_in_data_name', in_data_name_list)

    subject_selection_infosource = Node(util.IdentityInterface(fields=['selection_criterium']),
                                        name='subject_selection_infosource')
    subject_selection_infosource.iterables = ('selection_criterium', subjects_selection_crit_names_list)

    target_infosource = Node(util.IdentityInterface(fields=['target_name']), name='target_infosource')
    target_infosource.iterables = ('target_name', target_list)



    ###############################################################################################################
    # COMPILE LIFE DATA
    ###############################################################################################################

    ###############################################################################################################
    # GET INFO AND SELECT FILES
    df_all_subjects_pickle_file = os.path.join(aggregated_subjects_dir, 'df_all_subjects_pickle_file/df_all.pkl')
    df = pd.read_pickle(df_all_subjects_pickle_file)

    # build lookup dict for unimodal data
    X_file_template = 'X_file/_in_data_name_{in_data_name}/vectorized_aggregated_data.npy'
    info_file_template = 'unimodal_backprojection_info_file/_in_data_name_{in_data_name}/unimodal_backprojection_info.pkl'
#.........这里部分代码省略.........
开发者ID:fliem,项目名称:LeiCA_LIFE,代码行数:101,代码来源:learning_predict_data_wf.py


注:本文中的nipype.pipeline.engine.Node类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。