当前位置: 首页>>代码示例>>Python>>正文


Python Workflow.add_nodes方法代码示例

本文整理汇总了Python中nipype.pipeline.engine.Workflow.add_nodes方法的典型用法代码示例。如果您正苦于以下问题:Python Workflow.add_nodes方法的具体用法?Python Workflow.add_nodes怎么用?Python Workflow.add_nodes使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在nipype.pipeline.engine.Workflow的用法示例。


在下文中一共展示了Workflow.add_nodes方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: create

# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import add_nodes [as 别名]
    def create(self):  # , **kwargs):
        """ Create the nodes and connections for the workflow """
        # Preamble
        csvReader = CSVReader()
        csvReader.inputs.in_file = self.csv_file.default_value
        csvReader.inputs.header = self.hasHeader.default_value
        csvOut = csvReader.run()

        print(("=" * 80))
        print((csvOut.outputs.__dict__))
        print(("=" * 80))

        iters = OrderedDict()
        label = list(csvOut.outputs.__dict__.keys())[0]
        result = eval("csvOut.outputs.{0}".format(label))
        iters['tests'], iters['trains'] = subsample_crossValidationSet(result, self.sample_size.default_value)
        # Main event
        out_fields = ['T1', 'T2', 'Label', 'trainindex', 'testindex']
        inputsND = Node(interface=IdentityInterface(fields=out_fields),
                        run_without_submitting=True, name='inputs')
        inputsND.iterables = [('trainindex', iters['trains']),
                              ('testindex', iters['tests'])]
        if not self.hasHeader.default_value:
            inputsND.inputs.T1 = csvOut.outputs.column_0
            inputsND.inputs.Label = csvOut.outputs.column_1
            inputsND.inputs.T2 = csvOut.outputs.column_2
        else:
            inputsND.inputs.T1 = csvOut.outputs.__dict__['t1']
            inputsND.inputs.Label = csvOut.outputs.__dict__['label']
            inputsND.inputs.T2 = csvOut.outputs.__dict__['t2']
            pass  # TODO
        metaflow = Workflow(name='metaflow')
        metaflow.config['execution'] = {
            'plugin': 'Linear',
            'stop_on_first_crash': 'false',
            'stop_on_first_rerun': 'false',
        # This stops at first attempt to rerun, before running, and before deleting previous results.
            'hash_method': 'timestamp',
            'single_thread_matlab': 'true',  # Multi-core 2011a  multi-core for matrix multiplication.
            'remove_unnecessary_outputs': 'true',
            'use_relative_paths': 'false',  # relative paths should be on, require hash update when changed.
            'remove_node_directories': 'false',  # Experimental
            'local_hash_check': 'false'
        }

        metaflow.add_nodes([inputsND])
        """import pdb; pdb.set_trace()"""
        fusionflow = FusionLabelWorkflow()
        self.connect(
            [(metaflow, fusionflow, [('inputs.trainindex', 'trainT1s.index'), ('inputs.T1', 'trainT1s.inlist')]),
             (metaflow, fusionflow,
              [('inputs.trainindex', 'trainLabels.index'), ('inputs.Label', 'trainLabels.inlist')]),
             (metaflow, fusionflow, [('inputs.testindex', 'testT1s.index'), ('inputs.T1', 'testT1s.inlist')])
             ])
开发者ID:NIRALUser,项目名称:BRAINSTools,代码行数:56,代码来源:crossValidate.py

示例2: create_workflow

# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import add_nodes [as 别名]
def create_workflow(sources, subjects, basedir=None):
    import os.path as osp
    import tempfile
    from nipype.pipeline.engine import Workflow, Node
    if len(sources) != len(subjects):
        raise Exception('Input files and subjects should be of equal size.')
    wf_name = 'spm12_%s'%subjects[0] if len(subjects) == 1 else 'spm12'
    if len(sources) == 1 and basedir is None:
        wf_basedir = osp.dirname(sources[0])
    elif not basedir is None:
        wf_basedir = basedir
    else:
        wf_basedir = tempfile.mkdtemp()

    w = Workflow(wf_name, base_dir = wf_basedir)
    nodes = []
    for subject, source in zip(subjects, sources):
        nodes.extend(create_nodes(source, subject))
    w.add_nodes(nodes)

    for i in range(0, len(nodes), 3):
        w.connect(nodes[i], 'coregistered_source', nodes[i+1], 'source')
        w.connect(nodes[i+1], 'coregistered_source', nodes[i+2], 'channel_files')
    return w
开发者ID:xgrg,项目名称:alfa,代码行数:26,代码来源:spm12.py

示例3: create

# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import add_nodes [as 别名]
    def create(self):  # , **kwargs):
        """ Create the nodes and connections for the workflow """

        # Preamble
        csvReader = CSVReader()
        csvReader.inputs.in_file = self.csv_file.default_value
        csvReader.inputs.header = self.hasHeader.default_value
        csvOut = csvReader.run()

        print(("=" * 80))
        print((csvOut.outputs.__dict__))
        print(("=" * 80))

        iters = OrderedDict()
        label = list(csvOut.outputs.__dict__.keys())[0]
        result = eval("csvOut.outputs.{0}".format(label))
        iters["tests"], iters["trains"] = sample_crossvalidation_set(
            result, self.sample_size.default_value
        )
        # Main event
        out_fields = ["T1", "T2", "Label", "trainindex", "testindex"]
        inputsND = Node(
            interface=IdentityInterface(fields=out_fields),
            run_without_submitting=True,
            name="inputs",
        )
        inputsND.iterables = [
            ("trainindex", iters["trains"]),
            ("testindex", iters["tests"]),
        ]
        if not self.hasHeader.default_value:
            inputsND.inputs.T1 = csvOut.outputs.column_0
            inputsND.inputs.Label = csvOut.outputs.column_1
            inputsND.inputs.T2 = csvOut.outputs.column_2
        else:
            inputsND.inputs.T1 = csvOut.outputs.__dict__["t1"]
            inputsND.inputs.Label = csvOut.outputs.__dict__["label"]
            inputsND.inputs.T2 = csvOut.outputs.__dict__["t2"]
            pass  # TODO
        metaflow = Workflow(name="metaflow")
        metaflow.config["execution"] = {
            "plugin": "Linear",
            "stop_on_first_crash": "false",
            "stop_on_first_rerun": "false",
            # This stops at first attempt to rerun, before running, and before deleting previous results.
            "hash_method": "timestamp",
            "single_thread_matlab": "true",  # Multi-core 2011a  multi-core for matrix multiplication.
            "remove_unnecessary_outputs": "true",
            "use_relative_paths": "false",  # relative paths should be on, require hash update when changed.
            "remove_node_directories": "false",  # Experimental
            "local_hash_check": "false",
        }

        metaflow.add_nodes([inputsND])
        """import pdb; pdb.set_trace()"""
        fusionflow = FusionLabelWorkflow()
        self.connect(
            [
                (
                    metaflow,
                    fusionflow,
                    [
                        ("inputs.trainindex", "trainT1s.index"),
                        ("inputs.T1", "trainT1s.inlist"),
                    ],
                ),
                (
                    metaflow,
                    fusionflow,
                    [
                        ("inputs.trainindex", "trainLabels.index"),
                        ("inputs.Label", "trainLabels.inlist"),
                    ],
                ),
                (
                    metaflow,
                    fusionflow,
                    [
                        ("inputs.testindex", "testT1s.index"),
                        ("inputs.T1", "testT1s.inlist"),
                    ],
                ),
            ]
        )
开发者ID:BRAINSia,项目名称:BRAINSTools,代码行数:86,代码来源:crossValidate.py

示例4: Node

# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import add_nodes [as 别名]
# In[48]:

# Establish input/output stream

infosource = Node(IdentityInterface(fields=['subject_id']), name = "infosource")
infosource.iterables = [('subject_id', subject_list_test)]

lhtemplate_files = opj('lhtemplate*.nii.gz')
label_files = opj('{subject_id}-lab.nii.gz')
t1_files = opj('{subject_id}-t1-mask.nii.gz')
t2_files = opj('{subject_id}-t2s-bfc-mask.nii.gz')

templates = {'lhtemplate': lhtemplate_files,
            'label_files': label_files,
             't1_files': t1_files,
             't2_files': t2_files,}
selectfiles = Node(SelectFiles(templates, base_directory=datadir), name = "selectfiles")


# In[55]:

# Create pipeline and connect nodes
workflow = Workflow(name='normflow')
workflow.base_dir = '.'
workflow.add_nodes([test_antsreg_rigid])
#workflow.connect([(infosource, selectfiles, [('subject_id', 'subject_id')]),
#                (selectfiles, test_antsreg_rigid, [('lhtemplate','moving_image')]),])
workflow.write_graph()
workflow.run()

开发者ID:agt24,项目名称:cpb_d7,代码行数:31,代码来源:ants_test.py

示例5: check_if_out_files_exist

# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import add_nodes [as 别名]
        # CHECK IF ALL FILES EXIST
        def check_if_out_files_exist(check_file_dict):
            for file in check_file_dict.values():
                if not os.path.exists(file):
                    raise Exception('file missing: %s'%file)

        check_file_dict = file_dict.copy()
        check_file_dict.pop('report_file')
        check_if_out_files_exist(check_file_dict)

        report = Node(util.Function(input_names=['subject_id', 'file_dict', 'df'],
                                    output_names=[],
                                    function=create_qc_report_pdf),
                      name='report_%s_%s'%(TR, subject_id))
        report.inputs.subject_id = subject_id
        report.inputs.file_dict = file_dict
        report.inputs.df = df

        wf.add_nodes([report])

# fixme
# ignore warning from np.rank
import warnings

with warnings.catch_warnings():
    warnings.simplefilter("ignore")
    if plugin_name == 'CondorDAGMan':
        wf.run(plugin=plugin_name)
    if plugin_name == 'MultiProc':
        wf.run(plugin=plugin_name, plugin_args={'n_procs': use_n_procs})
开发者ID:NeuroanatomyAndConnectivity,项目名称:LeiCA,代码行数:32,代码来源:run_13_create_qc_reports.py

示例6: init_mriqc

# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import add_nodes [as 别名]

#.........这里部分代码省略.........
        'ants_nthreads': opts.ants_nthreads,
        'ants_float': opts.ants_float,
        'verbose_reports': opts.verbose_reports or opts.testing,
        'float32': opts.float32,
        'ica': opts.ica,
        'no_sub': opts.no_sub,
        'email': opts.email,
        'fd_thres': opts.fd_thres,
        'webapi_url': opts.webapi_url,
        'webapi_port': opts.webapi_port,
        'upload_strict': opts.upload_strict,
    }

    if opts.hmc_afni:
        settings['deoblique'] = opts.deoblique
        settings['despike'] = opts.despike
        settings['correct_slice_timing'] = opts.correct_slice_timing
        if opts.start_idx:
            settings['start_idx'] = opts.start_idx
        if opts. stop_idx:
            settings['stop_idx'] = opts.stop_idx

    if opts.ants_settings:
        settings['ants_settings'] = opts.ants_settings

    if opts.dsname:
        settings['dataset_name'] = opts.dsname

    log_dir = settings['output_dir'] / 'logs'

    # Create directories
    log_dir.mkdir(parents=True, exist_ok=True)
    settings['work_dir'].mkdir(parents=True, exist_ok=True)

    # Set nipype config
    ncfg.update_config({
        'logging': {'log_directory': str(log_dir), 'log_to_file': True},
        'execution': {
            'crashdump_dir': str(log_dir), 'crashfile_format': 'txt',
            'resource_monitor': opts.profile},
    })

    # Plugin configuration
    plugin_settings = {}
    if n_procs == 1:
        plugin_settings['plugin'] = 'Linear'

        if settings['ants_nthreads'] == 0:
            settings['ants_nthreads'] = 1
    else:
        plugin_settings['plugin'] = 'MultiProc'
        plugin_settings['plugin_args'] = {'n_procs': n_procs}
        if opts.mem_gb:
            plugin_settings['plugin_args']['memory_gb'] = opts.mem_gb

        if settings['ants_nthreads'] == 0:
            # always leave one extra thread for non ANTs work,
            # don't use more than 8 threads - the speed up is minimal
            settings['ants_nthreads'] = min(settings['n_procs'] - 1, 8)

    # Overwrite options if --use-plugin provided
    if opts.use_plugin and opts.use_plugin.exists():
        from yaml import load as loadyml
        with opts.use_plugin.open() as pfile:
            plugin_settings.update(loadyml(pfile))

    # Process data types
    modalities = opts.modalities

    layout = BIDSLayout(str(settings['bids_dir']),
                        exclude=['derivatives', 'sourcedata'])
    dataset = collect_bids_data(
        layout,
        participant_label=opts.participant_label,
        session=opts.session_id,
        run=opts.run_id,
        task=opts.task_id,
        bids_type=modalities,
    )

    workflow = Workflow(name='workflow_enumerator')
    workflow.base_dir = settings['work_dir']

    wf_list = []
    subject_list = []
    for mod in modalities:
        if dataset[mod]:
            wf_list.append(build_workflow(dataset[mod], mod, settings=settings))
            subject_list += dataset[mod]

    retval['subject_list'] = subject_list
    if not wf_list:
        retval['return_code'] = 1
        return retval

    workflow.add_nodes(wf_list)
    retval['plugin_settings'] = plugin_settings
    retval['workflow'] = workflow
    retval['return_code'] = 0
    return retval
开发者ID:oesteban,项目名称:mriqc,代码行数:104,代码来源:mriqc_run.py


注:本文中的nipype.pipeline.engine.Workflow.add_nodes方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。