当前位置: 首页>>代码示例>>Python>>正文


Python Workflow.run方法代码示例

本文整理汇总了Python中nipype.Workflow.run方法的典型用法代码示例。如果您正苦于以下问题:Python Workflow.run方法的具体用法?Python Workflow.run怎么用?Python Workflow.run使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在nipype.Workflow的用法示例。


在下文中一共展示了Workflow.run方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_serial_input

# 需要导入模块: from nipype import Workflow [as 别名]
# 或者: from nipype.Workflow import run [as 别名]
def test_serial_input():
    cwd = os.getcwd()
    wd = mkdtemp()
    os.chdir(wd)
    from nipype import MapNode, Function, Workflow
    def func1(in1):
        return in1
    n1 = MapNode(Function(input_names=['in1'],
                          output_names=['out'],
                          function=func1),
                 iterfield=['in1'],
                 name='n1')
    n1.inputs.in1 = [1,2,3]


    w1 = Workflow(name='test')
    w1.base_dir = wd
    w1.add_nodes([n1])
    # set local check
    w1.config['execution'] = {'stop_on_first_crash': 'true',
                              'local_hash_check': 'true',
                              'crashdump_dir': wd}

    # test output of num_subnodes method when serial is default (False)
    yield assert_equal, n1.num_subnodes(), len(n1.inputs.in1)

    # test running the workflow on default conditions
    error_raised = False
    try:
        w1.run(plugin='MultiProc')
    except Exception, e:
        pe.logger.info('Exception: %s' % str(e))
        error_raised = True
开发者ID:belevtsoff,项目名称:nipype,代码行数:35,代码来源:test_engine.py

示例2: test_serial_input

# 需要导入模块: from nipype import Workflow [as 别名]
# 或者: from nipype.Workflow import run [as 别名]
def test_serial_input(tmpdir):
    tmpdir.chdir()
    wd = os.getcwd()
    from nipype import MapNode, Function, Workflow

    def func1(in1):
        return in1
    n1 = MapNode(Function(input_names=['in1'],
                          output_names=['out'],
                          function=func1),
                 iterfield=['in1'],
                 name='n1')
    n1.inputs.in1 = [1, 2, 3]

    w1 = Workflow(name='test')
    w1.base_dir = wd
    w1.add_nodes([n1])
    # set local check
    w1.config['execution'] = {'stop_on_first_crash': 'true',
                              'local_hash_check': 'true',
                              'crashdump_dir': wd,
                              'poll_sleep_duration': 2}

    # test output of num_subnodes method when serial is default (False)
    assert n1.num_subnodes() == len(n1.inputs.in1)

    # test running the workflow on default conditions
    w1.run(plugin='MultiProc')

    # test output of num_subnodes method when serial is True
    n1._serial = True
    assert n1.num_subnodes() == 1

    # test running the workflow on serial conditions
    w1.run(plugin='MultiProc')
开发者ID:mick-d,项目名称:nipype,代码行数:37,代码来源:test_engine.py

示例3: test_mapnode_json

# 需要导入模块: from nipype import Workflow [as 别名]
# 或者: from nipype.Workflow import run [as 别名]
def test_mapnode_json():
    """Tests that mapnodes don't generate excess jsons
    """
    cwd = os.getcwd()
    wd = mkdtemp()
    os.chdir(wd)
    from nipype import MapNode, Function, Workflow

    def func1(in1):
        return in1 + 1
    n1 = MapNode(Function(input_names=['in1'],
                          output_names=['out'],
                          function=func1),
                 iterfield=['in1'],
                 name='n1')
    n1.inputs.in1 = [1]
    w1 = Workflow(name='test')
    w1.base_dir = wd
    w1.config['execution']['crashdump_dir'] = wd
    w1.add_nodes([n1])
    w1.run()
    n1.inputs.in1 = [2]
    w1.run()
    # should rerun
    n1.inputs.in1 = [1]
    eg = w1.run()

    node = eg.nodes()[0]
    outjson = glob(os.path.join(node.output_dir(), '_0x*.json'))
    yield assert_equal, len(outjson), 1

    # check that multiple json's don't trigger rerun
    with open(os.path.join(node.output_dir(), 'test.json'), 'wt') as fp:
        fp.write('dummy file')
    w1.config['execution'].update(**{'stop_on_first_rerun': True})
    error_raised = False
    try:
        w1.run()
    except:
        error_raised = True
    yield assert_false, error_raised
    os.chdir(cwd)
    rmtree(wd)
开发者ID:jvarada,项目名称:nipype,代码行数:45,代码来源:test_engine.py

示例4: test_mapnode_json

# 需要导入模块: from nipype import Workflow [as 别名]
# 或者: from nipype.Workflow import run [as 别名]
def test_mapnode_json(tmpdir):
    """Tests that mapnodes don't generate excess jsons
    """
    tmpdir.chdir()
    wd = os.getcwd()
    from nipype import MapNode, Function, Workflow

    def func1(in1):
        return in1 + 1
    n1 = MapNode(Function(input_names=['in1'],
                          output_names=['out'],
                          function=func1),
                 iterfield=['in1'],
                 name='n1')
    n1.inputs.in1 = [1]
    w1 = Workflow(name='test')
    w1.base_dir = wd
    w1.config['execution']['crashdump_dir'] = wd
    w1.add_nodes([n1])
    w1.run()
    n1.inputs.in1 = [2]
    w1.run()
    # should rerun
    n1.inputs.in1 = [1]
    eg = w1.run()

    node = list(eg.nodes())[0]
    outjson = glob(os.path.join(node.output_dir(), '_0x*.json'))
    assert len(outjson) == 1

    # check that multiple json's don't trigger rerun
    with open(os.path.join(node.output_dir(), 'test.json'), 'wt') as fp:
        fp.write('dummy file')
    w1.config['execution'].update(**{'stop_on_first_rerun': True})

    w1.run()
开发者ID:mick-d,项目名称:nipype,代码行数:38,代码来源:test_engine.py

示例5: StringIO

# 需要导入模块: from nipype import Workflow [as 别名]
# 或者: from nipype.Workflow import run [as 别名]
    else:
        from io import StringIO
        data = StringIO(r.content.decode())

    df = pd.read_csv(data)
    max_subjects = df.shape[0]
    if args.num_subjects:
        max_subjects = args.num_subjects
    elif ('CIRCLECI' in os.environ and os.environ['CIRCLECI'] == 'true'):
        max_subjects = 1
    
    meta_wf = Workflow('metaflow')
    count = 0
    for row in df.iterrows():
        wf = create_workflow(row[1].Subject, sink_dir, row[1]['File Path'])
        meta_wf.add_nodes([wf])
        print('Added workflow for: {}'.format(row[1].Subject))
        count = count + 1
        # run this for only one person on CircleCI
        if count >= max_subjects:
            break

    meta_wf.base_dir = work_dir
    meta_wf.config['execution']['remove_unnecessary_files'] = False
    meta_wf.config['execution']['poll_sleep_duration'] = 2
    meta_wf.config['execution']['crashdump_dir'] = work_dir
    if args.plugin_args:
        meta_wf.run(args.plugin, plugin_args=eval(args.plugin_args))
    else:
        meta_wf.run(args.plugin)
开发者ID:ReproNim,项目名称:simple_workflow,代码行数:32,代码来源:run_demo_workflow.py

示例6: dict

# 需要导入模块: from nipype import Workflow [as 别名]
# 或者: from nipype.Workflow import run [as 别名]
info = dict(T1=[['subject_id']])

infosource = Node(IdentityInterface(fields=['subject_id']), name='infosource')
infosource.iterables = ('subject_id', sids)

# Create a datasource node to get the T1 file
datasource = Node(DataGrabber(infields=['subject_id'],outfields=info.keys()),name = 'datasource')
datasource.inputs.template = '%s/%s'
datasource.inputs.base_directory = os.path.abspath('/home/data/madlab/data/mri/seqtrd/')
datasource.inputs.field_template = dict(T1='%s/anatomy/T1_*.nii.gz')
datasource.inputs.template_args = info
datasource.inputs.sort_filelist = True

reconall_node = Node(ReconAll(), name='reconall_node')
reconall_node.inputs.openmp = 2
reconall_node.inputs.subjects_dir = os.environ['SUBJECTS_DIR']
reconall_node.inputs.terminal_output = 'allatonce'
reconall_node.plugin_args={'bsub_args': ('-q PQ_madlab -n 2'), 'overwrite': True}

wf = Workflow(name='fsrecon')

wf.connect(infosource, 'subject_id', datasource, 'subject_id')
wf.connect(infosource, 'subject_id', reconall_node, 'subject_id')
wf.connect(datasource, 'T1', reconall_node, 'T1_files')

wf.base_dir = os.path.abspath('/scratch/madlab/surfaces/seqtrd')
#wf.config['execution']['job_finished_timeout'] = 65

wf.run(plugin='LSF', plugin_args={'bsub_args': ('-q PQ_madlab')})

开发者ID:mattfeld,项目名称:mri_misc,代码行数:31,代码来源:run_recon.py

示例7: Image

# 需要导入模块: from nipype import Workflow [as 别名]
# 或者: from nipype.Workflow import run [as 别名]
                                                  ('output_node.trackingFolder', 'input_node.tracking_dir'),
                                                  ('output_node.tracks_folder', 'input_node.tracks_dir'),
                                                  ('output_node.highresWmMask', 'input_node.wmmask_1mm'),
                                                  ('output_node.lowresWmMask', 'input_node.wmmask')]),
        (maskGenNode, connectivityRowNode, [(('number_of_rois', roiRange), 'roi'),
                                           ('affine_matrix', 'affine_matrix'),
                                           ('wmborder_data', 'wmborder')]),
        (inputNode, connectivityRowNode, [('subject_id', 'subid')]),
        (preprocessing.wf, connectivityRowNode, [('output_node.tracks_folder', 'tracksPath')]),
        (mrtrix.mrtrix_main.wf, connectivityRowNode, [('output_node.trk_files', 'track_files')]),
        (inputNode, aggregateConnectivityNode, [('subject_id', 'sub_id')]),
        (maskGenNode, aggregateConnectivityNode, [('wmborder_data', 'wmborder')]),
        (preprocessing.wf, aggregateConnectivityNode, [('output_node.tracks_folder', 'tracksPath')]),
        (connectivityRowNode, aggregateConnectivityNode, [('SC_cap_row_filename', 'cap_row_files'),
                                                         ('SC_dist_row_filename', 'dist_row_files')])
    ])

# ## Draw the Graph
wf.write_graph(subject_folder + subject_id + "/TVB_workflow_graph.dot", graph2use = 'colored')
# from IPython.display import Image
# Image(filename="./TVB_workflow_graph.dot.png")

# ## Run the Workflow
#wf.run(plugin='MultiProc', plugin_args={'n_procs': cpu_count()})
wf.run(plugin='OAR', plugin_args={'oarsub_args': '-l walltime=04:00:00'})
wf.run()




开发者ID:JohnGriffiths,项目名称:TVB-Pypeline,代码行数:28,代码来源:TVB_pipeline.py

示例8: Image

# 需要导入模块: from nipype import Workflow [as 别名]
# 或者: from nipype.Workflow import run [as 别名]
            (maskGenNode, aggregateConnectivityNode, [('wmborder_data', 'wmborder')]),
            (preprocessing.wf, aggregateConnectivityNode, [('output_node.tracks_folder', 'tracksPath')]),
            (connectivityRowNode, aggregateConnectivityNode, [('SC_cap_row_filename', 'cap_row_files'),
                                                                ('SC_dist_row_filename', 'dist_row_files')])])


# ######## Node-Level-Config
# With this file, it is possible to define custom parameters for processing nodes when using the pipeline on a
# High-Performance-Cluster (HPC) utilizing a specific job sheduler.
# For getting information about the job-shedulder-plugins and their parameters, please refer to:
# https://github.com/nipy/nipype/blob/master/doc/users/plugins.rst

# TODO: Get this stuff right!
# Example: OAR Job Sheduler
# preprocessing.wf.reconall.plugin_args = {'overwrite': True, 'oarsub_args': '-l nodes=1,walltime=16:00:00'}


# ## Draw the Graph
# wf.write_graph(subject_folder + "/TVB_workflow_graph.dot", graph2use = 'colored')
# from IPython.display import Image
# Image(filename="./TVB_workflow_graph.dot.png")

# ## Run the Workflow
wf.run(plugin='MultiProc', plugin_args={'n_procs': cpu_count()})
# wf.run(plugin='OAR', plugin_args={'oarsub_args': '-l walltime=04:00:00', 'template': 'examples/oarSetup.sh'})
# wf.run()




开发者ID:BrainModes,项目名称:TVB-Pypeline,代码行数:28,代码来源:TVB_pipeline.py

示例9: Workflow

# 需要导入模块: from nipype import Workflow [as 别名]
# 或者: from nipype.Workflow import run [as 别名]
    wf = Workflow("MachineLearning_Baseline_{0}".format(session_id))
    datasink = Node(DataSink(), name="DataSink")
    datasink.inputs.base_directory = os.path.join(results_dir, session_id)
    for hemisphere in ("lh", "rh"):
        for matter in ("gm", "wm"):
            wf.connect(
                logb_wf,
                "output_spec.{0}_{1}surface_file".format(hemisphere, matter),
                datasink,
                "[email protected]{0}_{1}".format(hemisphere, matter),
            )

    logb_wf.inputs.input_spec.t1_file = t1_file
    logb_wf.inputs.input_spec.orig_t1 = t1_file
    logb_wf.inputs.input_spec.t2_file = t2_file
    logb_wf.inputs.input_spec.posteriors = posterior_files
    logb_wf.inputs.input_spec.hncma_file = hncma_atlas
    logb_wf.inputs.input_spec.abc_file = abc_file
    # logb_wf.inputs.input_spec.acpc_transform = identity_transform_file
    logb_wf.inputs.input_spec.rho = direction_files["rho"]
    logb_wf.inputs.input_spec.theta = direction_files["theta"]
    logb_wf.inputs.input_spec.phi = direction_files["phi"]
    logb_wf.inputs.input_spec.lh_white_surface_file = lh_white_surface_file
    logb_wf.inputs.input_spec.rh_white_surface_file = rh_white_surface_file
    logb_wf.inputs.input_spec.wm_classifier_file = wm_classifier_file
    logb_wf.inputs.input_spec.gm_classifier_file = gm_classifier_file
    wf.base_dir = base_dir
    # wf.run(plugin="SGE", plugin_args={"qsub_args": "-q HJ,all.q,COE,UI"})
    # wf.run(plugin="MultiProc", plugin_args={"n_procs": 24})
    wf.run()
开发者ID:BRAINSia,项目名称:BRAINSTools,代码行数:32,代码来源:RunEdgePrediction.py

示例10: Workflow

# 需要导入模块: from nipype import Workflow [as 别名]
# 或者: from nipype.Workflow import run [as 别名]
                name="datasink")

# Use the following DataSink output substitutions
substitutions = [('_subject_id_', '')]
datasink.inputs.substitutions = substitutions

###
# Specify Normalization Workflow & Connect Nodes

# Initiation of the ANTS normalization workflow
regflow = Workflow(name='regflow')
regflow.base_dir = opj(experiment_dir, working_dir)

# Connect workflow nodes
regflow.connect([(infosource, selectfiles, [('subject_id', 'subject_id')]),
                 (selectfiles, antsreg, [('anat', 'moving_image')]),
                 (antsreg, datasink, [('warped_image',
                                       '[email protected]_image'),
                                      ('inverse_warped_image',
                                       '[email protected]_warped_image'),
                                      ('composite_transform',
                                       '[email protected]'),
                                      ('inverse_composite_transform',
                                       '[email protected]_transform')]),
                 ])

###
# Run Workflow
regflow.write_graph(graph2use='flat')
regflow.run('Linear')
开发者ID:miykael,项目名称:nipype_tutorial,代码行数:32,代码来源:ANTS_registration.py

示例11: Node

# 需要导入模块: from nipype import Workflow [as 别名]
# 或者: from nipype.Workflow import run [as 别名]
    
    y = np.genfromtxt(response)
    X = pd.read_csv(data)
    data_mod = data.split('/')[-1].replace('.csv', '')    

    if classifier == 'et':
        od = '/storage/gablab001/data/genus/GIT/genus/fs_cog/pred_diag/extra_trees/results/'
        on = classifier + '_{}.pkl'.format(data_mod)
        mod = Mods.FuzzyTrees(X=X, y=y, out_dir=od, out_name=on)
        outpath = mod.run()
    elif classifier == 'lg':
        od = '/storage/gablab001/data/genus/GIT/genus/fs_cog/pred_diag/lg/results/'
        on = classifier + '_{}.pkl'.format(data_mod)
        mod = Mods.Logistic(X=X, y=y, out_dir=od, out_name=on)
        outpath = mod.run()

    return outpath

Run = Node(interface=Function(
       input_names = ['data','classifier','response'],
       output_names = ['outpath'],
       function = run), name = 'Run'
)

Run.inputs.response = response_var
wf.connect(Iternode, 'data', Run, 'data')
wf.connect(Iternode, 'classifier', Run, 'classifier')
sbatch_params = '--mem=4G -t 5-23:00:00 --qos=gablab'
wf.run(plugin='SLURM', plugin_args={'sbatch_args': sbatch_params})
 
开发者ID:YSanchezAraujo,项目名称:genus,代码行数:31,代码来源:submit.py


注:本文中的nipype.Workflow.run方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。