本文整理汇总了Python中nipype.Workflow.add_nodes方法的典型用法代码示例。如果您正苦于以下问题:Python Workflow.add_nodes方法的具体用法?Python Workflow.add_nodes怎么用?Python Workflow.add_nodes使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类nipype.Workflow
的用法示例。
在下文中一共展示了Workflow.add_nodes方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_serial_input
# 需要导入模块: from nipype import Workflow [as 别名]
# 或者: from nipype.Workflow import add_nodes [as 别名]
def test_serial_input():
cwd = os.getcwd()
wd = mkdtemp()
os.chdir(wd)
from nipype import MapNode, Function, Workflow
def func1(in1):
return in1
n1 = MapNode(Function(input_names=['in1'],
output_names=['out'],
function=func1),
iterfield=['in1'],
name='n1')
n1.inputs.in1 = [1,2,3]
w1 = Workflow(name='test')
w1.base_dir = wd
w1.add_nodes([n1])
# set local check
w1.config['execution'] = {'stop_on_first_crash': 'true',
'local_hash_check': 'true',
'crashdump_dir': wd}
# test output of num_subnodes method when serial is default (False)
yield assert_equal, n1.num_subnodes(), len(n1.inputs.in1)
# test running the workflow on default conditions
error_raised = False
try:
w1.run(plugin='MultiProc')
except Exception, e:
pe.logger.info('Exception: %s' % str(e))
error_raised = True
示例2: test_serial_input
# 需要导入模块: from nipype import Workflow [as 别名]
# 或者: from nipype.Workflow import add_nodes [as 别名]
def test_serial_input(tmpdir):
tmpdir.chdir()
wd = os.getcwd()
from nipype import MapNode, Function, Workflow
def func1(in1):
return in1
n1 = MapNode(Function(input_names=['in1'],
output_names=['out'],
function=func1),
iterfield=['in1'],
name='n1')
n1.inputs.in1 = [1, 2, 3]
w1 = Workflow(name='test')
w1.base_dir = wd
w1.add_nodes([n1])
# set local check
w1.config['execution'] = {'stop_on_first_crash': 'true',
'local_hash_check': 'true',
'crashdump_dir': wd,
'poll_sleep_duration': 2}
# test output of num_subnodes method when serial is default (False)
assert n1.num_subnodes() == len(n1.inputs.in1)
# test running the workflow on default conditions
w1.run(plugin='MultiProc')
# test output of num_subnodes method when serial is True
n1._serial = True
assert n1.num_subnodes() == 1
# test running the workflow on serial conditions
w1.run(plugin='MultiProc')
示例3: test_mapnode_json
# 需要导入模块: from nipype import Workflow [as 别名]
# 或者: from nipype.Workflow import add_nodes [as 别名]
def test_mapnode_json():
"""Tests that mapnodes don't generate excess jsons
"""
cwd = os.getcwd()
wd = mkdtemp()
os.chdir(wd)
from nipype import MapNode, Function, Workflow
def func1(in1):
return in1 + 1
n1 = MapNode(Function(input_names=['in1'],
output_names=['out'],
function=func1),
iterfield=['in1'],
name='n1')
n1.inputs.in1 = [1]
w1 = Workflow(name='test')
w1.base_dir = wd
w1.config['execution']['crashdump_dir'] = wd
w1.add_nodes([n1])
w1.run()
n1.inputs.in1 = [2]
w1.run()
# should rerun
n1.inputs.in1 = [1]
eg = w1.run()
node = eg.nodes()[0]
outjson = glob(os.path.join(node.output_dir(), '_0x*.json'))
yield assert_equal, len(outjson), 1
# check that multiple json's don't trigger rerun
with open(os.path.join(node.output_dir(), 'test.json'), 'wt') as fp:
fp.write('dummy file')
w1.config['execution'].update(**{'stop_on_first_rerun': True})
error_raised = False
try:
w1.run()
except:
error_raised = True
yield assert_false, error_raised
os.chdir(cwd)
rmtree(wd)
示例4: test_mapnode_json
# 需要导入模块: from nipype import Workflow [as 别名]
# 或者: from nipype.Workflow import add_nodes [as 别名]
def test_mapnode_json(tmpdir):
"""Tests that mapnodes don't generate excess jsons
"""
tmpdir.chdir()
wd = os.getcwd()
from nipype import MapNode, Function, Workflow
def func1(in1):
return in1 + 1
n1 = MapNode(Function(input_names=['in1'],
output_names=['out'],
function=func1),
iterfield=['in1'],
name='n1')
n1.inputs.in1 = [1]
w1 = Workflow(name='test')
w1.base_dir = wd
w1.config['execution']['crashdump_dir'] = wd
w1.add_nodes([n1])
w1.run()
n1.inputs.in1 = [2]
w1.run()
# should rerun
n1.inputs.in1 = [1]
eg = w1.run()
node = list(eg.nodes())[0]
outjson = glob(os.path.join(node.output_dir(), '_0x*.json'))
assert len(outjson) == 1
# check that multiple json's don't trigger rerun
with open(os.path.join(node.output_dir(), 'test.json'), 'wt') as fp:
fp.write('dummy file')
w1.config['execution'].update(**{'stop_on_first_rerun': True})
w1.run()
示例5: group_multregress_openfmri
# 需要导入模块: from nipype import Workflow [as 别名]
# 或者: from nipype.Workflow import add_nodes [as 别名]
#.........这里部分代码省略.........
'cope_id', '.gz']]
dg.inputs.template_args['varcopes'] = [['model_id', 'task_id', subj_list, 'var', '',
'var', 'cope_id', '.gz']]
dg.iterables=('cope_id', cope_ids)
dg.inputs.sort_filelist = False
wk.connect(info, 'model_id', dg, 'model_id')
wk.connect(info, 'task_id', dg, 'task_id')
model = Node(MultipleRegressDesign(), name='l2model')
model.inputs.groups = groups
model.inputs.contrasts = contrasts[idx]
model.inputs.regressors = regressors_needed[idx]
mergecopes = Node(Merge(dimension='t'), name='merge_copes')
wk.connect(dg, 'copes', mergecopes, 'in_files')
if flamemodel != 'ols':
mergevarcopes = Node(Merge(dimension='t'), name='merge_varcopes')
wk.connect(dg, 'varcopes', mergevarcopes, 'in_files')
mask_file = fsl.Info.standard_image('MNI152_T1_2mm_brain_mask.nii.gz')
flame = Node(FLAMEO(), name='flameo')
flame.inputs.mask_file = mask_file
flame.inputs.run_mode = flamemodel
#flame.inputs.infer_outliers = True
wk.connect(model, 'design_mat', flame, 'design_file')
wk.connect(model, 'design_con', flame, 't_con_file')
wk.connect(mergecopes, 'merged_file', flame, 'cope_file')
if flamemodel != 'ols':
wk.connect(mergevarcopes, 'merged_file', flame, 'var_cope_file')
wk.connect(model, 'design_grp', flame, 'cov_split_file')
if nonparametric:
palm = Node(Function(input_names=['cope_file', 'design_file', 'contrast_file',
'group_file', 'mask_file', 'cluster_threshold'],
output_names=['palm_outputs'],
function=run_palm),
name='palm')
palm.inputs.cluster_threshold = 3.09
palm.inputs.mask_file = mask_file
palm.plugin_args = {'sbatch_args': '-p om_all_nodes -N1 -c2 --mem=10G', 'overwrite': True}
wk.connect(model, 'design_mat', palm, 'design_file')
wk.connect(model, 'design_con', palm, 'contrast_file')
wk.connect(mergecopes, 'merged_file', palm, 'cope_file')
wk.connect(model, 'design_grp', palm, 'group_file')
smoothest = Node(SmoothEstimate(), name='smooth_estimate')
wk.connect(flame, 'zstats', smoothest, 'zstat_file')
smoothest.inputs.mask_file = mask_file
cluster = Node(Cluster(), name='cluster')
wk.connect(smoothest,'dlh', cluster, 'dlh')
wk.connect(smoothest, 'volume', cluster, 'volume')
cluster.inputs.connectivity = 26
cluster.inputs.threshold = 2.3
cluster.inputs.pthreshold = 0.05
cluster.inputs.out_threshold_file = True
cluster.inputs.out_index_file = True
cluster.inputs.out_localmax_txt_file = True
wk.connect(flame, 'zstats', cluster, 'in_file')
ztopval = Node(ImageMaths(op_string='-ztop', suffix='_pval'),
name='z2pval')
wk.connect(flame, 'zstats', ztopval,'in_file')
sinker = Node(DataSink(), name='sinker')
sinker.inputs.base_directory = os.path.join(out_dir, 'task%03d' % task, contrast[0][0])
sinker.inputs.substitutions = [('_cope_id', 'contrast'),
('_maths_', '_reversed_')]
wk.connect(flame, 'zstats', sinker, 'stats')
wk.connect(cluster, 'threshold_file', sinker, '[email protected]')
wk.connect(cluster, 'index_file', sinker, '[email protected]')
wk.connect(cluster, 'localmax_txt_file', sinker, '[email protected]')
if nonparametric:
wk.connect(palm, 'palm_outputs', sinker, 'stats.palm')
if not no_reversal:
zstats_reverse = Node( BinaryMaths() , name='zstats_reverse')
zstats_reverse.inputs.operation = 'mul'
zstats_reverse.inputs.operand_value = -1
wk.connect(flame, 'zstats', zstats_reverse, 'in_file')
cluster2=cluster.clone(name='cluster2')
wk.connect(smoothest, 'dlh', cluster2, 'dlh')
wk.connect(smoothest, 'volume', cluster2, 'volume')
wk.connect(zstats_reverse, 'out_file', cluster2, 'in_file')
ztopval2 = ztopval.clone(name='ztopval2')
wk.connect(zstats_reverse, 'out_file', ztopval2, 'in_file')
wk.connect(zstats_reverse, 'out_file', sinker, '[email protected]')
wk.connect(cluster2, 'threshold_file', sinker, '[email protected]_thr')
wk.connect(cluster2, 'index_file',sinker, '[email protected]_index')
wk.connect(cluster2, 'localmax_txt_file', sinker, '[email protected]_localmax')
meta_workflow.add_nodes([wk])
return meta_workflow
示例6: create_resting_workflow
# 需要导入模块: from nipype import Workflow [as 别名]
# 或者: from nipype.Workflow import add_nodes [as 别名]
def create_resting_workflow(args, workdir, outdir):
if not os.path.exists(args.fsdir):
raise ValueError('FreeSurfer directory has to exist')
# remap freesurfer directory to a working directory
if not os.path.exists(workdir):
os.makedirs(workdir)
# create a local subjects dir
new_subjects_dir = os.path.join(workdir, 'subjects_dir')
if not os.path.exists(new_subjects_dir):
os.mkdir(new_subjects_dir)
# create a link for each freesurfer target
from glob import glob
res = CommandLine('which mri_convert').run()
average_dirs = glob(os.path.join(os.path.dirname(res.runtime.stdout), '..', 'subjects', ('*average*')))
for dirname in average_dirs:
dirlink = os.path.join(new_subjects_dir, dirname.split('/')[-1])
if not os.path.islink(dirlink):
os.symlink(os.path.realpath(dirname), dirlink)
meta_wf = Workflow('meta_level')
subjects_to_analyze = []
bids_dir = os.path.abspath(args.bids_dir)
# only for a subset of subjects
if args.participant_label:
subjects_to_analyze = ['sub-{}'.format(val) for val in args.participant_label]
# for all subjects
else:
subject_dirs = sorted(glob(os.path.join(bids_dir, "sub-*")))
subjects_to_analyze = [subject_dir.split("/")[-1] for subject_dir in subject_dirs]
for subject_label in subjects_to_analyze:
# create a link to the subject
subject_link = os.path.join(new_subjects_dir, subject_label)
orig_dir = os.path.join(os.path.abspath(args.fsdir), subject_label)
if not os.path.exists(orig_dir):
continue
if not os.path.islink(subject_link):
os.symlink(orig_dir,
subject_link)
from bids.grabbids import BIDSLayout
layout = layout = BIDSLayout(bids_dir)
for task in layout.get_tasks():
TR, slice_times, slice_thickness, files = get_info(bids_dir, subject_label, task)
name = 'resting_{sub}_{task}'.format(sub=subject_label, task=task)
kwargs = dict(files=files,
target_file=os.path.abspath(args.target_file),
subject_id=subject_label,
TR=TR,
slice_times=slice_times,
vol_fwhm=args.vol_fwhm,
surf_fwhm=args.surf_fwhm,
norm_threshold=2.,
subjects_dir=new_subjects_dir,
target_subject=args.target_surfs,
lowpass_freq=args.lowpass_freq,
highpass_freq=args.highpass_freq,
sink_directory=os.path.abspath(os.path.join(out_dir, subject_label, task)),
name=name)
wf = create_workflow(**kwargs)
meta_wf.add_nodes([wf])
return meta_wf
示例7: StringIO
# 需要导入模块: from nipype import Workflow [as 别名]
# 或者: from nipype.Workflow import add_nodes [as 别名]
else:
from io import StringIO
data = StringIO(r.content.decode())
df = pd.read_csv(data)
max_subjects = df.shape[0]
if args.num_subjects:
max_subjects = args.num_subjects
elif ('CIRCLECI' in os.environ and os.environ['CIRCLECI'] == 'true'):
max_subjects = 1
meta_wf = Workflow('metaflow')
count = 0
for row in df.iterrows():
wf = create_workflow(row[1].Subject, sink_dir, row[1]['File Path'])
meta_wf.add_nodes([wf])
print('Added workflow for: {}'.format(row[1].Subject))
count = count + 1
# run this for only one person on CircleCI
if count >= max_subjects:
break
meta_wf.base_dir = work_dir
meta_wf.config['execution']['remove_unnecessary_files'] = False
meta_wf.config['execution']['poll_sleep_duration'] = 2
meta_wf.config['execution']['crashdump_dir'] = work_dir
if args.plugin_args:
meta_wf.run(args.plugin, plugin_args=eval(args.plugin_args))
else:
meta_wf.run(args.plugin)