本文整理汇总了Python中nipype.Workflow.config['execution']['crashdump_dir']方法的典型用法代码示例。如果您正苦于以下问题:Python Workflow.config['execution']['crashdump_dir']方法的具体用法?Python Workflow.config['execution']['crashdump_dir']怎么用?Python Workflow.config['execution']['crashdump_dir']使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类nipype.Workflow
的用法示例。
在下文中一共展示了Workflow.config['execution']['crashdump_dir']方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_mapnode_json
# 需要导入模块: from nipype import Workflow [as 别名]
# 或者: from nipype.Workflow import config['execution']['crashdump_dir'] [as 别名]
def test_mapnode_json():
"""Tests that mapnodes don't generate excess jsons
"""
cwd = os.getcwd()
wd = mkdtemp()
os.chdir(wd)
from nipype import MapNode, Function, Workflow
def func1(in1):
return in1 + 1
n1 = MapNode(Function(input_names=['in1'],
output_names=['out'],
function=func1),
iterfield=['in1'],
name='n1')
n1.inputs.in1 = [1]
w1 = Workflow(name='test')
w1.base_dir = wd
w1.config['execution']['crashdump_dir'] = wd
w1.add_nodes([n1])
w1.run()
n1.inputs.in1 = [2]
w1.run()
# should rerun
n1.inputs.in1 = [1]
eg = w1.run()
node = eg.nodes()[0]
outjson = glob(os.path.join(node.output_dir(), '_0x*.json'))
yield assert_equal, len(outjson), 1
# check that multiple json's don't trigger rerun
with open(os.path.join(node.output_dir(), 'test.json'), 'wt') as fp:
fp.write('dummy file')
w1.config['execution'].update(**{'stop_on_first_rerun': True})
error_raised = False
try:
w1.run()
except:
error_raised = True
yield assert_false, error_raised
os.chdir(cwd)
rmtree(wd)
示例2: test_mapnode_json
# 需要导入模块: from nipype import Workflow [as 别名]
# 或者: from nipype.Workflow import config['execution']['crashdump_dir'] [as 别名]
def test_mapnode_json(tmpdir):
"""Tests that mapnodes don't generate excess jsons
"""
tmpdir.chdir()
wd = os.getcwd()
from nipype import MapNode, Function, Workflow
def func1(in1):
return in1 + 1
n1 = MapNode(Function(input_names=['in1'],
output_names=['out'],
function=func1),
iterfield=['in1'],
name='n1')
n1.inputs.in1 = [1]
w1 = Workflow(name='test')
w1.base_dir = wd
w1.config['execution']['crashdump_dir'] = wd
w1.add_nodes([n1])
w1.run()
n1.inputs.in1 = [2]
w1.run()
# should rerun
n1.inputs.in1 = [1]
eg = w1.run()
node = list(eg.nodes())[0]
outjson = glob(os.path.join(node.output_dir(), '_0x*.json'))
assert len(outjson) == 1
# check that multiple json's don't trigger rerun
with open(os.path.join(node.output_dir(), 'test.json'), 'wt') as fp:
fp.write('dummy file')
w1.config['execution'].update(**{'stop_on_first_rerun': True})
w1.run()
示例3: StringIO
# 需要导入模块: from nipype import Workflow [as 别名]
# 或者: from nipype.Workflow import config['execution']['crashdump_dir'] [as 别名]
else:
from io import StringIO
data = StringIO(r.content.decode())
df = pd.read_csv(data)
max_subjects = df.shape[0]
if args.num_subjects:
max_subjects = args.num_subjects
elif ('CIRCLECI' in os.environ and os.environ['CIRCLECI'] == 'true'):
max_subjects = 1
meta_wf = Workflow('metaflow')
count = 0
for row in df.iterrows():
wf = create_workflow(row[1].Subject, sink_dir, row[1]['File Path'])
meta_wf.add_nodes([wf])
print('Added workflow for: {}'.format(row[1].Subject))
count = count + 1
# run this for only one person on CircleCI
if count >= max_subjects:
break
meta_wf.base_dir = work_dir
meta_wf.config['execution']['remove_unnecessary_files'] = False
meta_wf.config['execution']['poll_sleep_duration'] = 2
meta_wf.config['execution']['crashdump_dir'] = work_dir
if args.plugin_args:
meta_wf.run(args.plugin, plugin_args=eval(args.plugin_args))
else:
meta_wf.run(args.plugin)