本文整理汇总了Python中nipype.pipeline.engine.Workflow.config['execution']方法的典型用法代码示例。如果您正苦于以下问题:Python Workflow.config['execution']方法的具体用法?Python Workflow.config['execution']怎么用?Python Workflow.config['execution']使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类nipype.pipeline.engine.Workflow
的用法示例。
在下文中一共展示了Workflow.config['execution']方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create
# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import config['execution'] [as 别名]
def create(self): # , **kwargs):
""" Create the nodes and connections for the workflow """
# Preamble
csvReader = CSVReader()
csvReader.inputs.in_file = self.csv_file.default_value
csvReader.inputs.header = self.hasHeader.default_value
csvOut = csvReader.run()
print(("=" * 80))
print((csvOut.outputs.__dict__))
print(("=" * 80))
iters = OrderedDict()
label = list(csvOut.outputs.__dict__.keys())[0]
result = eval("csvOut.outputs.{0}".format(label))
iters['tests'], iters['trains'] = subsample_crossValidationSet(result, self.sample_size.default_value)
# Main event
out_fields = ['T1', 'T2', 'Label', 'trainindex', 'testindex']
inputsND = Node(interface=IdentityInterface(fields=out_fields),
run_without_submitting=True, name='inputs')
inputsND.iterables = [('trainindex', iters['trains']),
('testindex', iters['tests'])]
if not self.hasHeader.default_value:
inputsND.inputs.T1 = csvOut.outputs.column_0
inputsND.inputs.Label = csvOut.outputs.column_1
inputsND.inputs.T2 = csvOut.outputs.column_2
else:
inputsND.inputs.T1 = csvOut.outputs.__dict__['t1']
inputsND.inputs.Label = csvOut.outputs.__dict__['label']
inputsND.inputs.T2 = csvOut.outputs.__dict__['t2']
pass # TODO
metaflow = Workflow(name='metaflow')
metaflow.config['execution'] = {
'plugin': 'Linear',
'stop_on_first_crash': 'false',
'stop_on_first_rerun': 'false',
# This stops at first attempt to rerun, before running, and before deleting previous results.
'hash_method': 'timestamp',
'single_thread_matlab': 'true', # Multi-core 2011a multi-core for matrix multiplication.
'remove_unnecessary_outputs': 'true',
'use_relative_paths': 'false', # relative paths should be on, require hash update when changed.
'remove_node_directories': 'false', # Experimental
'local_hash_check': 'false'
}
metaflow.add_nodes([inputsND])
"""import pdb; pdb.set_trace()"""
fusionflow = FusionLabelWorkflow()
self.connect(
[(metaflow, fusionflow, [('inputs.trainindex', 'trainT1s.index'), ('inputs.T1', 'trainT1s.inlist')]),
(metaflow, fusionflow,
[('inputs.trainindex', 'trainLabels.index'), ('inputs.Label', 'trainLabels.inlist')]),
(metaflow, fusionflow, [('inputs.testindex', 'testT1s.index'), ('inputs.T1', 'testT1s.inlist')])
])
示例2: open
# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import config['execution'] [as 别名]
topup.connect([(inputnode, convertwarp, [('anat_head', 'reference')]),
(epireg, convertwarp, [('shiftmap', 'shiftmap')]),
(concat, convertwarp, [('out_file', 'postmat')]),
(inputnode, applywarp, [('epi_mean', 'in_file'),
('anat_head', 'ref_file')]),
(convertwarp, applywarp, [('out_field', 'field_file')]),
(applywarp, outputnode, [('out_file', 'topup_mean_coreg')]),
(convertwarp, outputnode, [('out_field', 'topup_fullwarp')])
])
##### in and output ############
topup.base_dir='/scr/kansas1/huntenburg/'
topup.config['execution']={'remove_unnecessary_outputs': 'False'}
data_dir='/scr/jessica2/Schaare/LEMON/'
fs_subjects_dir='/scr/jessica2/Schaare/LEMON/freesurfer/freesurfer/'
out_dir = '/scr/jessica2/Schaare/LEMON/preprocessed/'
subjects=['LEMON001']
# subjects=[]
# f = open('/scr/jessica2/Schaare/LEMON/done_freesurfer.txt','r')
# for line in f:
# subjects.append(line.strip())
# create inforsource to iterate over subjects
infosource = Node(util.IdentityInterface(fields=['subject_id','fs_subjects_dir']),
name='infosource')
infosource.inputs.fs_subjects_dir=fs_subjects_dir
infosource.iterables=('subject_id', subjects)