本文整理汇总了Python中nipype.pipeline.engine.Workflow.write_graph方法的典型用法代码示例。如果您正苦于以下问题:Python Workflow.write_graph方法的具体用法?Python Workflow.write_graph怎么用?Python Workflow.write_graph使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类nipype.pipeline.engine.Workflow
的用法示例。
在下文中一共展示了Workflow.write_graph方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create_converter_structural_pipeline
# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import write_graph [as 别名]
def create_converter_structural_pipeline(working_dir, ds_dir, name="converter_struct"):
# initiate workflow
converter_wf = Workflow(name=name)
converter_wf.base_dir = os.path.join(working_dir, "LeiCA_resting")
# set fsl output
fsl.FSLCommand.set_default_output_type("NIFTI_GZ")
# inputnode
inputnode = Node(util.IdentityInterface(fields=["t1w_dicom"]), name="inputnode")
outputnode = Node(util.IdentityInterface(fields=["t1w"]), name="outputnode")
niftisink = Node(nio.DataSink(), name="niftisink")
niftisink.inputs.base_directory = os.path.join(ds_dir, "raw_niftis")
# convert to nifti
# todo check if geometry bugs attac. use dcm2nii?
converter_t1w = Node(DcmStack(embed_meta=True), name="converter_t1w")
converter_t1w.plugin_args = {"submit_specs": "request_memory = 2000"}
converter_t1w.inputs.out_format = "t1w"
converter_wf.connect(inputnode, "t1w_dicom", converter_t1w, "dicom_files")
# reorient to standard orientation
reor_2_std = Node(fsl.Reorient2Std(), name="reor_2_std")
converter_wf.connect(converter_t1w, "out_file", reor_2_std, "in_file")
converter_wf.connect(reor_2_std, "out_file", outputnode, "t1w")
# save original niftis
converter_wf.connect(reor_2_std, "out_file", niftisink, "sMRI")
converter_wf.write_graph(dotfilename="converter_struct", graph2use="flat", format="pdf")
return converter_wf
示例2: create_converter_diffusion_pipeline
# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import write_graph [as 别名]
def create_converter_diffusion_pipeline(working_dir, ds_dir, name="converter_diffusion"):
# initiate workflow
converter_wf = Workflow(name=name)
converter_wf.base_dir = os.path.join(working_dir, "LeiCA_resting")
# set fsl output
fsl.FSLCommand.set_default_output_type("NIFTI_GZ")
# inputnode
inputnode = Node(util.IdentityInterface(fields=["dMRI_dicom"]), name="inputnode")
outputnode = Node(util.IdentityInterface(fields=["dMRI"]), name="outputnode")
niftisink = Node(nio.DataSink(), name="niftisink")
niftisink.inputs.base_directory = os.path.join(ds_dir, "raw_niftis")
#######
converter_dMRI = Node(Dcm2nii(), name="converter_dMRI")
converter_dMRI.inputs.gzip_output = True
converter_dMRI.inputs.nii_output = True
converter_dMRI.inputs.anonymize = False
converter_dMRI.plugin_args = {"submit_specs": "request_memory = 2000"}
converter_wf.connect(inputnode, "dMRI_dicom", converter_dMRI, "source_names")
dMRI_rename = Node(util.Rename(format_string="DTI_mx_137.nii.gz"), name="dMRI_rename")
converter_wf.connect(converter_dMRI, "converted_files", dMRI_rename, "in_file")
bvecs_rename = Node(util.Rename(format_string="DTI_mx_137.bvecs"), name="bvecs_rename")
converter_wf.connect(converter_dMRI, "bvecs", bvecs_rename, "in_file")
bvals_rename = Node(util.Rename(format_string="DTI_mx_137.bvals"), name="bvals_rename")
converter_wf.connect(converter_dMRI, "bvals", bvals_rename, "in_file")
# reorient to standard orientation
reor_2_std = Node(fsl.Reorient2Std(), name="reor_2_std")
converter_wf.connect(dMRI_rename, "out_file", reor_2_std, "in_file")
converter_wf.connect(reor_2_std, "out_file", outputnode, "dMRI")
# save original niftis
converter_wf.connect(reor_2_std, "out_file", niftisink, "[email protected]")
converter_wf.connect(bvals_rename, "out_file", niftisink, "[email protected]")
converter_wf.connect(bvecs_rename, "out_file", niftisink, "[email protected]")
converter_wf.write_graph(dotfilename="converter_struct", graph2use="flat", format="pdf")
return converter_wf
示例3: create_structural
# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import write_graph [as 别名]
def create_structural(subject, working_dir, data_dir, freesurfer_dir, out_dir, standard_brain):
# main workflow
struct_preproc = Workflow(name='anat_preproc')
struct_preproc.base_dir = working_dir
struct_preproc.config['execution']['crashdump_dir'] = struct_preproc.base_dir + "/crash_files"
# workflow to get brain, head and wmseg from freesurfer and convert to nifti
mgzconvert = create_mgzconvert_pipeline()
mgzconvert.inputs.inputnode.fs_subjects_dir = freesurfer_dir
mgzconvert.inputs.inputnode.fs_subject_id = subject
normalize = create_normalize_pipeline()
normalize.inputs.inputnode.standard = standard_brain
# sink to store files
sink = Node(nio.DataSink(base_directory=out_dir,
parameterization=False,
substitutions=[
('transform_Warped', 'T1_brain2mni')]),
name='sink')
# connections
struct_preproc.connect(
[(mgzconvert, normalize, [('outputnode.anat_brain', 'inputnode.anat')]),
(mgzconvert, sink, [('outputnode.anat_head', '@head')]),
(mgzconvert, sink, [('outputnode.anat_brain', '@brain')]),
(mgzconvert, sink, [('outputnode.anat_brain_mask', '@mask')]),
(normalize, sink, [('outputnode.anat2std', '@anat2std'),
('outputnode.anat2std_transforms', '[email protected]_transforms'),
('outputnode.std2anat_transforms', '[email protected]_transforms')])
])
struct_preproc.write_graph(dotfilename='struct_preproc.dot', graph2use='colored', format='pdf', simple_form=True)
# struct_preproc.run()
struct_preproc.run(plugin='CondorDAGMan', plugin_args = {'initial_specs': 'request_memory = 1500'}) #
示例4:
# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import write_graph [as 别名]
container=output_dir),
name="datasink")
# Use the following DataSink output substitutions
substitutions = [('_subject_id_', ''),
('_apply2con', 'apply2con'),
('_warpall', 'warpall')]
datasink.inputs.substitutions = substitutions
# Connect SelectFiles and DataSink to the workflow
normflow.connect([(infosource, selectfiles, [('subject_id', 'subject_id')]),
(selectfiles, apply2con, [('func', 'input_image')]),
(selectfiles, apply2mean, [('mean', 'input_image')]),
(selectfiles, antsreg, [('anat', 'moving_image')]),
(antsreg, datasink, [('warped_image',
'[email protected]_image'),
('inverse_warped_image',
'[email protected]_warped_image'),
('composite_transform',
'[email protected]'),
('inverse_composite_transform',
'[email protected]_transform')]),
(apply2con, datasink, [('output_image',
'[email protected]')]),
(apply2mean, datasink, [('output_image',
'[email protected]')]),
])
normflow.write_graph(graph2use='colored')
normflow.run('MultiProc', plugin_args={'n_procs': 8})
示例5: learning_prepare_data_wf
# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import write_graph [as 别名]
#.........这里部分代码省略.........
if 'parcellation_path' in data_lookup_dict[in_data_name].keys():
parcellation_path = data_lookup_dict[in_data_name]['parcellation_path']
else:
parcellation_path = None
if 'fwhm' in data_lookup_dict[in_data_name].keys():
fwhm = data_lookup_dict[in_data_name]['fwhm']
if fwhm == 0:
fwhm = None
else:
fwhm = None
if 'mask_name' in data_lookup_dict[in_data_name].keys():
mask_path = template_lookup_dict[data_lookup_dict[in_data_name]['mask_name']]
else:
mask_path = None
if 'use_diagonal' in data_lookup_dict[in_data_name].keys():
use_diagonal = data_lookup_dict[in_data_name]['use_diagonal']
else:
use_diagonal = False
if 'use_fishers_z' in data_lookup_dict[in_data_name].keys():
use_fishers_z = data_lookup_dict[in_data_name]['use_fishers_z']
else:
use_fishers_z = False
if 'df_col_names' in data_lookup_dict[in_data_name].keys():
df_col_names = data_lookup_dict[in_data_name]['df_col_names']
else:
df_col_names = None
return file_list, matrix_name, parcellation_path, fwhm, mask_path, use_diagonal, use_fishers_z, df_col_names
create_file_list = Node(util.Function(input_names=['subjects_list',
'in_data_name',
'data_lookup_dict',
'template_lookup_dict',
],
output_names=['file_list',
'matrix_name',
'parcellation_path',
'fwhm',
'mask_path',
'use_diagonal',
'use_fishers_z',
'df_col_names'],
function=create_file_list_fct),
name='create_file_list')
wf.connect(create_df, 'full_subjects_list', create_file_list, 'subjects_list')
wf.connect(in_data_name_infosource, 'in_data_name', create_file_list, 'in_data_name')
create_file_list.inputs.data_lookup_dict = data_lookup_dict
create_file_list.inputs.template_lookup_dict = template_lookup_dict
###############################################################################################################
# VECTORIZE AND AGGREGATE SUBJECTS
# stack single subject np arrays vertically
vectorize_aggregate_subjects = Node(util.Function(input_names=['in_data_file_list',
'mask_file',
'matrix_name',
'parcellation_path',
'fwhm',
'use_diagonal',
'use_fishers_z',
'df_file',
'df_col_names'],
output_names=['vectorized_aggregated_file',
'unimodal_backprojection_info_file'],
function=vectorize_and_aggregate),
name='vectorize_aggregate_subjects')
wf.connect(create_file_list, 'file_list', vectorize_aggregate_subjects, 'in_data_file_list')
wf.connect(create_file_list, 'mask_path', vectorize_aggregate_subjects, 'mask_file')
wf.connect(create_file_list, 'matrix_name', vectorize_aggregate_subjects, 'matrix_name')
wf.connect(create_file_list, 'parcellation_path', vectorize_aggregate_subjects, 'parcellation_path')
wf.connect(create_file_list, 'fwhm', vectorize_aggregate_subjects, 'fwhm')
wf.connect(create_file_list, 'use_diagonal', vectorize_aggregate_subjects, 'use_diagonal')
wf.connect(create_file_list, 'use_fishers_z', vectorize_aggregate_subjects, 'use_fishers_z')
wf.connect(create_df, 'df_all_subjects_pickle_file', vectorize_aggregate_subjects, 'df_file')
wf.connect(create_file_list, 'df_col_names', vectorize_aggregate_subjects, 'df_col_names')
wf.connect(create_df, 'df_all_subjects_pickle_file', ds_X, 'df_all_subjects_pickle_file')
wf.connect(vectorize_aggregate_subjects, 'vectorized_aggregated_file', ds_X, 'X_file')
wf.connect(vectorize_aggregate_subjects, 'unimodal_backprojection_info_file', ds_X, 'unimodal_backprojection_info_file')
#####################################
# RUN WF
#####################################
wf.write_graph(dotfilename=wf.name, graph2use='colored', format='pdf') # 'hierarchical')
wf.write_graph(dotfilename=wf.name, graph2use='orig', format='pdf')
wf.write_graph(dotfilename=wf.name, graph2use='flat', format='pdf')
if plugin_name == 'CondorDAGMan':
wf.run(plugin=plugin_name)
if plugin_name == 'MultiProc':
wf.run(plugin=plugin_name, plugin_args={'n_procs': use_n_procs})
示例6: learning_predict_data_2samp_wf
# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import write_graph [as 别名]
#.........这里部分代码省略.........
'df_file_nki',
'reverse_split',
'random_state_nki',
'run_learning_curve',
'life_test_size'],
output_names=['scatter_file',
'brain_age_scatter_file',
'df_life_out_file',
'df_nki_out_file',
'df_big_out_file',
'model_out_file',
'df_res_out_file',
'tuning_curve_file',
'scatter_file_cv',
'learning_curve_plot_file',
'learning_curve_df_file'],
function=run_prediction_split_fct),
name='prediction_split')
backproject_and_split_weights = Node(util.Function(input_names=['trained_model_file',
'multimodal_backprojection_info',
'data_str',
'target_name'],
output_names=['out_file_list',
'out_file_render_list'],
function=backproject_and_split_weights_fct),
name='backproject_and_split_weights')
i = 0
for reg in confound_regression:
the_out_node_str = 'single_source_model_reg_%s_' % (reg)
prediction_node_dict[i] = prediction_split.clone(the_out_node_str)
the_in_node = prediction_node_dict[i]
the_in_node.inputs.regress_confounds = reg
the_in_node.inputs.run_cv = run_cv
the_in_node.inputs.n_jobs_cv = n_jobs_cv
the_in_node.inputs.run_tuning = run_tuning
the_in_node.inputs.reverse_split = reverse_split
the_in_node.inputs.random_state_nki = random_state_nki
the_in_node.inputs.run_learning_curve = run_learning_curve
the_in_node.inputs.life_test_size = life_test_size
wf.connect(select_multimodal_X, 'X_multimodal_selected_file', the_in_node, 'X_file')
wf.connect(target_infosource, 'target_name', the_in_node, 'target_name')
wf.connect(subject_selection_infosource, 'selection_criterium', the_in_node, 'selection_criterium')
wf.connect(select_subjects, 'df_use_pickle_file', the_in_node, 'df_file')
wf.connect(aggregate_multimodal_metrics, 'multimodal_name', the_in_node, 'data_str')
wf.connect(the_in_node, 'model_out_file', ds, the_out_node_str + 'trained_model')
wf.connect(the_in_node, 'scatter_file', ds_pdf, the_out_node_str + 'scatter')
wf.connect(the_in_node, 'brain_age_scatter_file', ds_pdf, the_out_node_str + 'brain_age_scatter')
wf.connect(the_in_node, 'df_life_out_file', ds_pdf, the_out_node_str + 'predicted_life')
wf.connect(the_in_node, 'df_nki_out_file', ds_pdf, the_out_node_str + 'predicted_nki')
wf.connect(the_in_node, 'df_big_out_file', ds_pdf, the_out_node_str + 'predicted')
wf.connect(the_in_node, 'df_res_out_file', ds_pdf, the_out_node_str + 'results_error')
wf.connect(the_in_node, 'tuning_curve_file', ds_pdf, the_out_node_str + 'tuning_curve')
wf.connect(the_in_node, 'scatter_file_cv', ds_pdf, the_out_node_str + 'scatter_cv')
wf.connect(the_in_node, 'learning_curve_plot_file', ds_pdf, the_out_node_str + '[email protected]')
wf.connect(the_in_node, 'learning_curve_df_file', ds_pdf, the_out_node_str + '[email protected]')
# NKI
if run_2sample_training:
wf.connect(select_multimodal_X_nki, 'X_multimodal_selected_file', the_in_node, 'X_file_nki')
wf.connect(select_subjects_nki, 'df_use_pickle_file', the_in_node, 'df_file_nki')
else:
the_in_node.inputs.df_file_nki = None
the_in_node.inputs.X_file_nki = None
# BACKPROJECT PREDICTION WEIGHTS
# map weights back to single modality original format (e.g., nifti or matrix)
the_out_node_str = 'backprojection_single_source_model_reg_%s_' % (reg)
backprojection_node_dict[i] = backproject_and_split_weights.clone(the_out_node_str)
the_from_node = prediction_node_dict[i]
the_in_node = backprojection_node_dict[i]
wf.connect(the_from_node, 'model_out_file', the_in_node, 'trained_model_file')
wf.connect(aggregate_multimodal_metrics, 'multimodal_backprojection_info', the_in_node,
'multimodal_backprojection_info')
wf.connect(aggregate_multimodal_metrics, 'multimodal_name', the_in_node, 'data_str')
wf.connect(target_infosource, 'target_name', the_in_node, 'target_name')
wf.connect(the_in_node, 'out_file_list', ds_pdf, the_out_node_str + '[email protected]')
wf.connect(the_in_node, 'out_file_render_list', ds_pdf, the_out_node_str + '[email protected]')
i += 1
###############################################################################################################
# # RUN WF
wf.write_graph(dotfilename=wf.name, graph2use='colored', format='pdf') # 'hierarchical')
wf.write_graph(dotfilename=wf.name, graph2use='orig', format='pdf')
wf.write_graph(dotfilename=wf.name, graph2use='flat', format='pdf')
if plugin_name == 'CondorDAGMan':
wf.run(plugin=plugin_name)
if plugin_name == 'MultiProc':
wf.run(plugin=plugin_name, plugin_args={'n_procs': use_n_procs})
示例7: calc_centrality_metrics
# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import write_graph [as 别名]
#.........这里部分代码省略.........
'vmhc_symm_brain': 'cpac_image_resources/symmetric/MNI152_T1_2mm_brain_symmetric.nii.gz',
'vmhc_symm_brain_3mm': 'cpac_image_resources/symmetric/MNI152_T1_3mm_brain_symmetric.nii.gz',
'vmhc_symm_skull': 'cpac_image_resources/symmetric/MNI152_T1_2mm_symmetric.nii.gz',
'vmhc_symm_brain_mask_dil': 'cpac_image_resources/symmetric/MNI152_T1_2mm_brain_mask_symmetric_dil.nii.gz',
'vmhc_config_file_2mm': 'cpac_image_resources/symmetric/T1_2_MNI152_2mm_symmetric.cnf'
}
selectfiles_anat_templates = Node(nio.SelectFiles(templates_atlases,
base_directory=template_dir),
name="selectfiles_anat_templates")
# GET SUBJECT SPECIFIC FUNCTIONAL AND STRUCTURAL DATA
selectfiles_templates = {
'epi_2_MNI_warp': '{subject_id}/rsfMRI_preprocessing/registration/epi_2_MNI_warp/TR_{TR_id}/*.nii.gz',
'epi_mask': '{subject_id}/rsfMRI_preprocessing/masks/brain_mask_epiSpace/TR_{TR_id}/*.nii.gz',
'preproc_epi_full_spectrum': '{subject_id}/rsfMRI_preprocessing/epis/01_denoised/TR_{TR_id}/*.nii.gz',
'preproc_epi_bp': '{subject_id}/rsfMRI_preprocessing/epis/02_denoised_BP/TR_{TR_id}/*.nii.gz',
'preproc_epi_bp_tNorm': '{subject_id}/rsfMRI_preprocessing/epis/03_denoised_BP_tNorm/TR_{TR_id}/*.nii.gz',
'epi_2_struct_mat': '{subject_id}/rsfMRI_preprocessing/registration/epi_2_struct_mat/TR_{TR_id}/*.mat',
't1w': '{subject_id}/raw_niftis/sMRI/t1w_reoriented.nii.gz',
't1w_brain': '{subject_id}/rsfMRI_preprocessing/struct_prep/t1w_brain/t1w_reoriented_maths.nii.gz',
'epi_bp_tNorm_MNIspace_3mm': '{subject_id}/rsfMRI_preprocessing/epis_MNI_3mm/03_denoised_BP_tNorm/TR_645/residual_filt_norm_warp.nii.gz'
}
selectfiles = Node(nio.SelectFiles(selectfiles_templates,
base_directory=preprocessed_data_dir),
name="selectfiles")
wf.connect(scan_infosource, 'TR_id', selectfiles, 'TR_id')
wf.connect(subjects_infosource, 'subject_id', selectfiles, 'subject_id')
# selectfiles.inputs.subject_id = subject_id
# CREATE TRANSFORMATIONS
# creat MNI 2 epi warp
MNI_2_epi_warp = Node(fsl.InvWarp(), name='MNI_2_epi_warp')
MNI_2_epi_warp.inputs.reference = fsl.Info.standard_image('MNI152_T1_2mm.nii.gz')
wf.connect(selectfiles, 'epi_mask', MNI_2_epi_warp, 'reference')
wf.connect(selectfiles, 'epi_2_MNI_warp', MNI_2_epi_warp, 'warp')
#####################
# CALCULATE METRICS
#####################
# DEGREE
# fixme
# a_mem = 5
# fixme
a_mem = 20
dc = cpac_centrality.create_resting_state_graphs(allocated_memory=a_mem,
wf_name='dc') # allocated_memory = a_mem, wf_name = 'dc')
# dc.plugin_args = {'submit_specs': 'request_memory = 6000'}
# fixme
dc.plugin_args = {'submit_specs': 'request_memory = 20000'}
dc.inputs.inputspec.method_option = 0 # 0 for degree centrality, 1 for eigenvector centrality, 2 for lFCD
dc.inputs.inputspec.threshold_option = 0 # 0 for probability p_value, 1 for sparsity threshold, any other for threshold value
dc.inputs.inputspec.threshold = 0.0001
dc.inputs.inputspec.weight_options = [True,
True] # list of two booleans for binarize and weighted options respectively
wf.connect(selectfiles, 'epi_bp_tNorm_MNIspace_3mm', dc, 'inputspec.subject')
wf.connect(selectfiles_anat_templates, 'GM_mask_MNI_3mm', dc, 'inputspec.template')
wf.connect(dc, 'outputspec.centrality_outputs', ds, '[email protected]_outputs')
wf.connect(dc, 'outputspec.correlation_matrix', ds, '[email protected]_matrix')
wf.connect(dc, 'outputspec.graph_outputs', ds, '[email protected]_outputs')
# DC Z-SCORE
dc_Z = cpac_centrality_z_score.get_cent_zscore(wf_name='dc_Z')
wf.connect(dc, 'outputspec.centrality_outputs', dc_Z, 'inputspec.input_file')
wf.connect(selectfiles_anat_templates, 'GM_mask_MNI_3mm', dc_Z, 'inputspec.mask_file')
wf.connect(dc_Z, 'outputspec.z_score_img', ds, '[email protected]')
a_mem = 20
evc = cpac_centrality.create_resting_state_graphs(allocated_memory=a_mem, wf_name='evc')
evc.plugin_args = {'submit_specs': 'request_memory = 20000'}
evc.inputs.inputspec.method_option = 1 # 0 for degree centrality, 1 for eigenvector centrality, 2 for lFCD
evc.inputs.inputspec.threshold_option = 0 # 0 for probability p_value, 1 for sparsity threshold, any other for threshold value
evc.inputs.inputspec.threshold = 0.0001
evc.inputs.inputspec.weight_options = [True,
True] # list of two booleans for binarize and weighted options respectively
wf.connect(selectfiles, 'epi_bp_tNorm_MNIspace_3mm', evc, 'inputspec.subject')
wf.connect(selectfiles_anat_templates, 'GM_mask_MNI_3mm', evc, 'inputspec.template')
wf.connect(evc, 'outputspec.centrality_outputs', ds, '[email protected]_outputs')
wf.connect(evc, 'outputspec.correlation_matrix', ds, '[email protected]_matrix')
wf.connect(evc, 'outputspec.graph_outputs', ds, '[email protected]_outputs')
# EVC Z-SCORE
evc_Z = cpac_centrality_z_score.get_cent_zscore(wf_name='evc_Z')
wf.connect(evc, 'outputspec.centrality_outputs', evc_Z, 'inputspec.input_file')
wf.connect(selectfiles_anat_templates, 'GM_mask_MNI_3mm', evc_Z, 'inputspec.mask_file')
wf.connect(evc_Z, 'outputspec.z_score_img', ds, '[email protected]')
wf.write_graph(dotfilename=wf.name, graph2use='colored', format='pdf') # 'hierarchical')
wf.write_graph(dotfilename=wf.name, graph2use='orig', format='pdf')
wf.write_graph(dotfilename=wf.name, graph2use='flat', format='pdf')
if plugin_name == 'CondorDAGMan':
wf.run(plugin=plugin_name)
if plugin_name == 'MultiProc':
wf.run(plugin=plugin_name, plugin_args={'n_procs': use_n_procs})
示例8: create_registration_pipeline
# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import write_graph [as 别名]
#.........这里部分代码省略.........
struct_2_MNI_warp.inputs.field_file = 'struct_2_MNI_warp.nii.gz'
struct_2_MNI_warp.plugin_args = {'submit_specs': 'request_memory = 4000'}
reg_wf.connect(inputnode, 't1w', struct_2_MNI_warp, 'in_file')
reg_wf.connect(struct_2_MNI_mat, 'out_matrix_file', struct_2_MNI_warp, 'affine_file')
reg_wf.connect(struct_2_MNI_warp, 'field_file', ds, 'registration.struct_2_MNI_warp')
reg_wf.connect(struct_2_MNI_warp, 'field_file', outputnode, 'struct_2_MNI_warp')
reg_wf.connect(struct_2_MNI_warp, 'warped_file', outputnode, 'struct_MNIspace')
reg_wf.connect(struct_2_MNI_warp, 'warped_file', ds, 'registration.struct_MNIspace')
# II.EPI -> STRUCT (via bbr)
##########################################
# 3. calc EPI->STRUCT initial registration with flirt dof=6 and corratio
epi_2_struct_flirt6_mat = Node(fsl.FLIRT(dof=6, cost='corratio'), name='epi_2_struct_flirt6_mat')
epi_2_struct_flirt6_mat.inputs.out_file = 'epi_structSpace_flirt6.nii.gz'
reg_wf.connect(inputnode, 't1w_brain', epi_2_struct_flirt6_mat, 'reference')
reg_wf.connect(inputnode, 'initial_mean_epi_moco', epi_2_struct_flirt6_mat, 'in_file')
# 4. run EPI->STRUCT via bbr
bbr_shedule = os.path.join(os.getenv('FSLDIR'), 'etc/flirtsch/bbr.sch')
epi_2_struct_bbr_mat = Node(interface=fsl.FLIRT(dof=6, cost='bbr'), name='epi_2_struct_bbr_mat')
epi_2_struct_bbr_mat.inputs.schedule = bbr_shedule
epi_2_struct_bbr_mat.inputs.out_file = 'epi_structSpace.nii.gz'
reg_wf.connect(inputnode, 'initial_mean_epi_moco', epi_2_struct_bbr_mat, 'in_file')
reg_wf.connect(inputnode, 't1w_brain', epi_2_struct_bbr_mat, 'reference')
reg_wf.connect(epi_2_struct_flirt6_mat, 'out_matrix_file', epi_2_struct_bbr_mat, 'in_matrix_file')
reg_wf.connect(inputnode, 'wm_mask_4_bbr', epi_2_struct_bbr_mat, 'wm_seg')
reg_wf.connect(epi_2_struct_bbr_mat, 'out_matrix_file', ds, 'registration.epi_2_struct_mat')
reg_wf.connect(epi_2_struct_bbr_mat, 'out_file', outputnode, 'mean_epi_structSpace')
# 5. INVERT to get: STRUCT -> EPI
struct_2_epi_mat = Node(fsl.ConvertXFM(invert_xfm=True), name='struct_2_epi_mat')
reg_wf.connect(epi_2_struct_bbr_mat, 'out_matrix_file', struct_2_epi_mat, 'in_file')
reg_wf.connect(struct_2_epi_mat, 'out_file', outputnode, 'struct_2_epi_mat')
# III. COMBINE I. & II.: EPI -> MNI
##########################################
# 6. COMBINE MATS: EPI -> MNI
epi_2_MNI_warp = Node(fsl.ConvertWarp(), name='epi_2_MNI_warp')
epi_2_MNI_warp.inputs.reference = fsl.Info.standard_image('MNI152_T1_2mm.nii.gz')
reg_wf.connect(epi_2_struct_bbr_mat, 'out_matrix_file', epi_2_MNI_warp, 'premat') # epi2struct
reg_wf.connect(struct_2_MNI_warp, 'field_file', epi_2_MNI_warp, 'warp1') # struct2mni
reg_wf.connect(epi_2_MNI_warp, 'out_file', outputnode, 'epi_2_MNI_warp')
reg_wf.connect(epi_2_MNI_warp, 'out_file', ds, 'registration.epi_2_MNI_warp')
# output: out_file
# 7. MNI -> EPI
MNI_2_epi_warp = Node(fsl.InvWarp(), name='MNI_2_epi_warp')
MNI_2_epi_warp.inputs.reference = fsl.Info.standard_image('MNI152_T1_2mm.nii.gz')
reg_wf.connect(epi_2_MNI_warp, 'out_file', MNI_2_epi_warp, 'warp')
reg_wf.connect(inputnode, 'initial_mean_epi_moco', MNI_2_epi_warp, 'reference')
reg_wf.connect(MNI_2_epi_warp, 'inverse_warp', outputnode, 'MNI_2_epi_warp')
# output: inverse_warp
##########################################
# TRANSFORM VOLUMES
##########################################
# CREATE STRUCT IN EPI SPACE FOR DEBUGGING
struct_epiSpace = Node(fsl.ApplyXfm(), name='struct_epiSpace')
struct_epiSpace.inputs.out_file = 'struct_brain_epiSpace.nii.gz'
reg_wf.connect(inputnode, 't1w_brain', struct_epiSpace, 'in_file')
reg_wf.connect(inputnode, 'initial_mean_epi_moco', struct_epiSpace, 'reference')
reg_wf.connect(struct_2_epi_mat, 'out_file', struct_epiSpace, 'in_matrix_file')
reg_wf.connect(struct_epiSpace, 'out_file', ds, 'QC.struct_brain_epiSpace')
# CREATE EPI IN MNI SPACE
mean_epi_MNIspace = Node(fsl.ApplyWarp(), name='mean_epi_MNIspace')
mean_epi_MNIspace.inputs.ref_file = fsl.Info.standard_image('MNI152_T1_2mm_brain.nii.gz')
mean_epi_MNIspace.inputs.out_file = 'mean_epi_MNIspace.nii.gz'
reg_wf.connect(inputnode, 'initial_mean_epi_moco', mean_epi_MNIspace, 'in_file')
reg_wf.connect(epi_2_MNI_warp, 'out_file', mean_epi_MNIspace, 'field_file')
reg_wf.connect(mean_epi_MNIspace, 'out_file', ds, 'registration.mean_epi_MNIspace')
reg_wf.connect(mean_epi_MNIspace, 'out_file', outputnode, 'mean_epi_MNIspace')
# CREATE MNI IN EPI SPACE FOR DEBUGGING
MNI_epiSpace = Node(fsl.ApplyWarp(), name='MNI_epiSpace')
MNI_epiSpace.inputs.in_file = fsl.Info.standard_image('MNI152_T1_2mm_brain.nii.gz')
MNI_epiSpace.inputs.out_file = 'MNI_epiSpace.nii.gz'
reg_wf.connect(inputnode, 'initial_mean_epi_moco', MNI_epiSpace, 'ref_file')
reg_wf.connect(MNI_2_epi_warp, 'inverse_warp', MNI_epiSpace, 'field_file')
reg_wf.connect(MNI_epiSpace, 'out_file', ds, 'registration.MNI_epiSpace')
reg_wf.write_graph(dotfilename=reg_wf.name, graph2use='flat', format='pdf')
return reg_wf
示例9: Node
# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import write_graph [as 别名]
# Datasink
datasink = Node(DataSink(base_directory=experiment_dir,
container=output_dir),
name="datasink")
# Use the following DataSink output substitutions
substitutions = [('_subject_id', ''),
('_session_id_', '')]
datasink.inputs.substitutions = substitutions
# Connect SelectFiles and DataSink to the workflow
preproc.connect([(infosource, selectfiles, [('subject_id', 'subject_id'),
('session_id', 'session_id')]),
(selectfiles, gunzip, [('func', 'in_file')]),
(realign, datasink, [('mean_image', '[email protected]'),
('realignment_parameters',
'[email protected]'),
]),
(smooth, datasink, [('smoothed_files', 'smooth')]),
(art, datasink, [('outlier_files', '[email protected]'),
('plot_files', '[email protected]'),
]),
])
###
# Run Workflow
preproc.write_graph(graph2use='flat')
preproc.run('MultiProc', plugin_args={'n_procs': 8})
示例10: run_tbss_wf
# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import write_graph [as 别名]
#.........这里部分代码省略.........
# denoise = Node(dipy.Denoise(), name='denoise')
# wf.connect(combine_corrections, 'outputnode.out_file', denoise, 'in_file')
# wf.connect(b0_mask, 'mask_file', denoise, 'in_mask')
# wf.connect(denoise, 'out_file', ds, 'denoised')
# check if ok fixme
denoise = nlmeans_pipeline()
wf.connect(combine_corrections, 'outputnode.out_file', denoise, 'inputnode.in_file')
wf.connect(b0_mask, 'mask_file', denoise, 'inputnode.in_mask')
wf.connect(denoise, 'outputnode.out_file', ds, 'denoised')
# DTIFIT
dtifit = Node(interface=fsl.DTIFit(), name='dtifit')
wf.connect(combine_corrections, 'outputnode.out_file', dtifit, 'dwi')
wf.connect(b0_mask, 'mask_file', dtifit, 'mask')
wf.connect(hmc, 'outputnode.out_bvec', dtifit, 'bvecs')
wf.connect(selectfiles, 'bval_file', dtifit, 'bvals')
wf.connect(dtifit, 'FA', ds, '[email protected]')
wf.connect(dtifit, 'L1', ds, '[email protected]')
wf.connect(dtifit, 'L2', ds, '[email protected]')
wf.connect(dtifit, 'L3', ds, '[email protected]')
wf.connect(dtifit, 'MD', ds, '[email protected]')
wf.connect(dtifit, 'MO', ds, '[email protected]')
wf.connect(dtifit, 'S0', ds, '[email protected]')
wf.connect(dtifit, 'V1', ds, '[email protected]')
wf.connect(dtifit, 'V2', ds, '[email protected]')
wf.connect(dtifit, 'V3', ds, '[email protected]')
wf.connect(dtifit, 'tensor', ds, '[email protected]')
RD_sum = Node(fsl.ImageMaths(op_string='-add '), name='RD_sum')
wf.connect(dtifit, 'L2', RD_sum, 'in_file')
wf.connect(dtifit, 'L3', RD_sum, 'in_file2')
RD = Node(fsl.ImageMaths(op_string='-div 2', out_file='RD.nii.gz'), name='RD')
wf.connect(RD_sum, 'out_file', RD, 'in_file')
wf.connect(RD, 'out_file', ds, '[email protected]')
simple_ecc = Node(fsl.EddyCorrect(), name='simple_ecc')
wf.connect(selectfiles, 'dMRI_data', simple_ecc, 'in_file')
wf.connect(simple_ecc, 'eddy_corrected', ds, 'simple_ecc')
# DTIFIT DENOISED
dtifit_denoised = Node(interface=fsl.DTIFit(), name='dtifit_denoised')
wf.connect(denoise, 'outputnode.out_file', dtifit_denoised, 'dwi')
wf.connect(b0_mask, 'mask_file', dtifit_denoised, 'mask')
wf.connect(hmc, 'outputnode.out_bvec', dtifit_denoised, 'bvecs')
wf.connect(selectfiles, 'bval_file', dtifit_denoised, 'bvals')
wf.connect(dtifit_denoised, 'FA', ds, '[email protected]')
wf.connect(dtifit_denoised, 'L1', ds, '[email protected]')
wf.connect(dtifit_denoised, 'L2', ds, '[email protected]')
wf.connect(dtifit_denoised, 'L3', ds, '[email protected]')
wf.connect(dtifit_denoised, 'MD', ds, '[email protected]')
wf.connect(dtifit_denoised, 'MO', ds, '[email protected]')
wf.connect(dtifit_denoised, 'S0', ds, '[email protected]')
wf.connect(dtifit_denoised, 'V1', ds, '[email protected]')
wf.connect(dtifit_denoised, 'V2', ds, '[email protected]')
wf.connect(dtifit_denoised, 'V3', ds, '[email protected]')
#
def _file_to_list(in_file):
if type(in_file) is not list:
return [in_file]
else:
return in_file
in_file_to_list = Node(util.Function(input_names=['in_file'], output_names=['out_file'], function=_file_to_list), name='in_file_to_list')
wf.connect(dtifit, 'FA', in_file_to_list, 'in_file')
# TBSS
tbss = create_tbss_all(estimate_skeleton=False)
tbss.inputs.inputnode.skeleton_thresh = 0.2
wf.connect(in_file_to_list, 'out_file', tbss, 'inputnode.fa_list')
wf.connect(tbss, 'outputall_node.mergefa_file3', ds, '[email protected]')
wf.connect(tbss, 'outputnode.projectedfa_file', ds, '[email protected]_file')
wf.connect(tbss, 'outputnode.skeleton_file4', ds, '[email protected]_file')
wf.connect(tbss, 'outputnode.skeleton_mask', ds, '[email protected]_mask')
# outputnode.meanfa_file
# outputnode.projectedfa_file
# outputnode.skeleton_file
# outputnode.skeleton_mask
#####################################
# RUN WF
#####################################
wf.write_graph(dotfilename=wf.name, graph2use='colored', format='pdf') # 'hierarchical')
wf.write_graph(dotfilename=wf.name, graph2use='orig', format='pdf')
wf.write_graph(dotfilename=wf.name, graph2use='flat', format='pdf')
if plugin_name == 'CondorDAGMan':
wf.run(plugin=plugin_name)
if plugin_name == 'MultiProc':
wf.run(plugin=plugin_name, plugin_args={'n_procs': use_n_procs})
示例11: preprocessing_pipeline
# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import write_graph [as 别名]
#.........这里部分代码省略.........
base_directory=dicom_dir),
name="selectfiles_struct")
selectfiles_struct.inputs.subject_id = subject_id
#####################################
# COPY RUNNING SCRIPTS
#####################################
copy_scripts = Node(util.Function(input_names=['subject_id', 'script_dir'], output_names=['zip_file'], function=zip_and_save_running_scripts), name='copy_scripts')
copy_scripts.inputs.script_dir = script_dir
copy_scripts.inputs.subject_id = subject_id
wf.connect(copy_scripts, 'zip_file', ds, 'scripts')
#####################################
# CONVERT DICOMs
#####################################
# CONVERT STRUCT 2 NIFTI
converter_struct = create_converter_structural_pipeline(working_dir, ds_dir, 'converter_struct')
wf.connect(selectfiles_struct, 't1w_dicom', converter_struct, 'inputnode.t1w_dicom')
# CONVERT dMRI 2 NIFTI
converter_dMRI = create_converter_diffusion_pipeline(working_dir, ds_dir, 'converter_dMRI')
wf.connect(selectfiles_struct, 'dMRI_dicom', converter_dMRI, 'inputnode.dMRI_dicom')
# CONVERT FUNCT 2 NIFTI
converter_funct = create_converter_functional_pipeline(working_dir, ds_dir, 'converter_funct')
wf.connect(selectfiles_funct, 'funct_dicom', converter_funct, 'inputnode.epi_dicom')
wf.connect(scan_infosource, 'TR_id', converter_funct, 'inputnode.out_format')
#####################################
# START RSFMRI PREPROCESSING ANALYSIS
#####################################
# rsfMRI PREPROCESSING
rsfMRI_preproc = create_rsfMRI_preproc_pipeline(working_dir,freesurfer_dir, ds_dir, use_fs_brainmask, 'rsfMRI_preprocessing')
rsfMRI_preproc.inputs.inputnode.vols_to_drop = vols_to_drop
rsfMRI_preproc.inputs.inputnode.lp_cutoff_freq = lp_cutoff_freq
rsfMRI_preproc.inputs.inputnode.hp_cutoff_freq = hp_cutoff_freq
rsfMRI_preproc.inputs.inputnode.subject_id = subject_id
wf.connect(converter_struct, 'outputnode.t1w', rsfMRI_preproc, 'inputnode.t1w')
wf.connect(converter_funct, 'outputnode.epi', rsfMRI_preproc, 'inputnode.epi')
wf.connect(converter_funct, 'outputnode.TR_ms', rsfMRI_preproc, 'inputnode.TR_ms')
#wf.connect(subjects_infosource, 'subject_id', rsfMRI_preproc, 'inputnode.subject_id')
wf.connect(selectfiles_templates, 'lat_ventricle_mask_MNI', rsfMRI_preproc, 'inputnode.lat_ventricle_mask_MNI')
#####################################
# SCA
#####################################
# sca = create_sca_pipeline(working_dir, rois_list, ds_dir, name='sca')
# wf.connect(rsfMRI_preproc, 'outputnode.rs_preprocessed', sca, 'inputnode.rs_preprocessed')
# wf.connect(rsfMRI_preproc, 'outputnode.epi_2_MNI_warp', sca, 'inputnode.epi_2_MNI_warp')
# if len(subjects_list)>1:
# def test_fct(in_files):
# print('cxcxcx')
# print in_files
# out_files = in_files
# return out_files
#
# collect_files = JoinNode(util.Function(input_names=['in_files'],
# output_names=['out_files'],
# function=test_fct),
# joinsource='subjects_infosource', #'selectfiles_funct',
# joinfield='in_files',
# name='collect_files')
# wf.connect(sca, 'outputnode.seed_based_z', collect_files, 'in_files')
#
#
# collect_sca = Node(fsl.Merge(dimension='t', merged_file='concat_corr_z.nii.gz'),
# joinsource='subjects_infosource', #'selectfiles_funct',
# joinfield='in_files',
# name='collect_sca')
# wf.connect(collect_files, 'out_files', collect_sca, 'in_files')
#
# mean_sca = Node(fsl.MeanImage(), name='mean_sca')
# wf.connect(collect_sca, 'merged_file', mean_sca, 'in_file')
#####################################
# RUN WF
#####################################
wf.write_graph(dotfilename=wf.name, graph2use='colored', format='pdf') # 'hierarchical')
wf.write_graph(dotfilename=wf.name, graph2use='orig', format='pdf')
wf.write_graph(dotfilename=wf.name, graph2use='flat', format='pdf')
if plugin_name == 'CondorDAGMan':
wf.run(plugin=plugin_name)
if plugin_name == 'MultiProc':
wf.run(plugin=plugin_name, plugin_args={'n_procs': use_n_procs})
示例12: create_deskull_pipeline
# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import write_graph [as 别名]
def create_deskull_pipeline(working_dir, ds_dir, name='deskull'):
# initiate workflow
deskull_wf = Workflow(name=name)
deskull_wf.base_dir = os.path.join(working_dir,'LeiCA_resting', 'rsfMRI_preprocessing')
# set fsl output
fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
# I/O NODES
inputnode = Node(util.IdentityInterface(fields=['epi_moco',
'struct_brain_mask',
'struct_2_epi_mat']),
name='inputnode')
outputnode = Node(util.IdentityInterface(fields=['epi_deskulled',
'mean_epi',
'brain_mask_epiSpace']),
name='outputnode')
ds = Node(nio.DataSink(base_directory=ds_dir), name='ds')
ds.inputs.substitutions = [('_TR_id_', 'TR_')]
# TRANSFORM BRAIN MASK TO EPI SPACE
brain_mask_epiSpace = Node(fsl.ApplyXfm(apply_xfm=True, interp='nearestneighbour'), name='brain_mask_epiSpace')
brain_mask_epiSpace.inputs.out_file = 'brain_mask_epiSpace.nii.gz'
deskull_wf.connect([(inputnode, brain_mask_epiSpace, [('struct_brain_mask', 'in_file'),
('epi_moco', 'reference'),
('struct_2_epi_mat', 'in_matrix_file')])])
deskull_wf.connect(brain_mask_epiSpace, 'out_file', outputnode, 'brain_mask_epiSpace')
deskull_wf.connect(brain_mask_epiSpace, 'out_file', ds, 'masks.brain_mask_epiSpace')
# DESKULL EPI
epi_brain = Node(fsl.maths.BinaryMaths(operation = 'mul'), name='epi_brain')
deskull_wf.connect(inputnode, 'epi_moco', epi_brain, 'in_file')
deskull_wf.connect(brain_mask_epiSpace, 'out_file', epi_brain, 'operand_file')
# GLOBAL INTENSITY NORMALIZATION
epi_intensity_norm = Node(interface=fsl.ImageMaths(), name='epi_intensity_norm')
epi_intensity_norm.inputs.op_string = '-ing 10000'
epi_intensity_norm.out_data_type = 'float'
deskull_wf.connect(epi_brain, 'out_file', epi_intensity_norm, 'in_file')
deskull_wf.connect(epi_intensity_norm, 'out_file', outputnode, 'epi_deskulled')
# CREATE MEAN EPI (INTENSITY NORMALIZED)
mean_epi = Node(fsl.maths.MeanImage(dimension='T',
out_file='rest_realigned_mean.nii.gz'),
name='mean_epi')
deskull_wf.connect(epi_intensity_norm, 'out_file', mean_epi, 'in_file')
deskull_wf.connect(mean_epi, 'out_file', ds, 'QC.mean_epi')
deskull_wf.connect(mean_epi, 'out_file', outputnode, 'mean_epi')
deskull_wf.write_graph(dotfilename=deskull_wf.name, graph2use='flat', format='pdf')
return deskull_wf
示例13:
# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import write_graph [as 别名]
(median, sink, [('median_file', '[email protected]')]),
(biasfield, sink, [('output_image', '[email protected]')]),
(coreg, sink, [('outputnode.uni_lowres', '[email protected]_lowres'),
('outputnode.epi2lowres', '[email protected]'),
('outputnode.epi2lowres_mat','[email protected]_mat'),
('outputnode.epi2lowres_dat','[email protected]_dat'),
('outputnode.highres2lowres', '[email protected]'),
('outputnode.highres2lowres_dat', '[email protected]_dat'),
('outputnode.highres2lowres_mat', '[email protected]_mat'),
('outputnode.epi2highres_lin', '[email protected]_lin'),
('outputnode.epi2highres_lin_itk', '[email protected]_lin_itk'),
('outputnode.epi2highres_lin_mat', '[email protected]_lin_mat')]),
(nonreg, sink, [('outputnode.epi2highres_warp', '[email protected]_warp'),
('outputnode.epi2highres_invwarp', '[email protected]_invwarp'),
('outputnode.epi2highres_nonlin', '[email protected]_nonlin')]),
(struct2func, sink, [(('output_image', selectindex, [0,1,2]), '[email protected]')]),
(artefact, sink, [('norm_files', '[email protected]_motion'),
('outlier_files', '[email protected]_files'),
('intensity_files', '[email protected]_files'),
('statistic_files', '[email protected]_stats'),
('plot_files', '[email protected]_plots')]),
(motreg, sink, [('out_files', '[email protected]')]),
(denoise, sink, [('denoised_file', '[email protected]'),
# in this case denoised data was saved at
# myelinconnect/resting/final/ instead
('confounds_file', '[email protected]')])
])
#preproc.run(plugin='MultiProc', plugin_args={'n_procs' : 9})
preproc.write_graph(dotfilename='func_preproc.dot', graph2use='colored', format='pdf', simple_form=True)
示例14:
# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import write_graph [as 别名]
first_level.connect([
(infosource, selectfiles, [('subject_path', 'subject_path')]),
(selectfiles, merge, [('func', 'in_files')]),
(merge, ts, [('merged_file', 'in_file')]),
(infosource, seed_plus_nuisance, [('seed', 'in1')]),
(seed_plus_nuisance, ts, [('out', 'mask')]),
(ts, make_regressors_files, [('out_file', 'regressors_ts_list')]),
(selectfiles, make_regressors_files, [('motion', 'mot_params')]),
(selectfiles, make_regressors_files, [('func', 'func')]),
(make_regressors_files, datasink, [('nr', 'timeseries'),
('nr_td', '[email protected]_td'),
('snr', '[email protected]')]),
(make_regressors_files, model_helper, [('snr', 'regressors_file')]),
(selectfiles, session_info, [(('func', make_list), 'functional_runs')]),
(model_helper, session_info, [('subject_info', 'subject_info')]),
(session_info, model_spec, [('session_info', 'session_info')]),
(model_spec, est_model, [('spm_mat_file', 'spm_mat_file')]),
(est_model, est_con, [('beta_images', 'beta_images'),
('residual_image', 'residual_image'),
('spm_mat_file', 'spm_mat_file')]),
(model_helper, est_con, [('contrasts', 'contrasts')]),
(infosource, datasink, [(('subject_path', make_basedir), 'base_directory'),
(('seed', make_stats_FC), 'container')]),
(est_con, datasink, [('spm_mat_file', '[email protected]'),
('con_images', '[email protected]'),
('spmT_images', '[email protected]')])
])
# Visualize the workflow and run it with SGE
first_level.write_graph(graph2use='flat')
first_level.run(plugin='SGE', plugin_args=dict(template='/data/mridata/jdeng/tools/grid/q.sh'))
示例15: mean_con_mat_wf
# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import write_graph [as 别名]
#.........这里部分代码省略.........
('_bp_freqs_', 'bp_'),
('_extraction_method_', ''),
('_subject_id_[A0-9]*/', '')
]
#####################################
# SET ITERATORS
#####################################
# SUBJECTS ITERATOR
subjects_infosource = Node(util.IdentityInterface(fields=['subject_id']), name='subjects_infosource')
subjects_infosource.iterables = ('subject_id', subjects_list)
# PARCELLATION ITERATOR
parcellation_infosource = Node(util.IdentityInterface(fields=['parcellation']), name='parcellation_infosource')
parcellation_infosource.iterables = ('parcellation', parcellations_list)
# BP FILTER ITERATOR
bp_filter_infosource = Node(util.IdentityInterface(fields=['bp_freqs']), name='bp_filter_infosource')
bp_filter_infosource.iterables = ('bp_freqs', bp_freq_list)
# EXTRACTION METHOD ITERATOR
extraction_method_infosource = Node(util.IdentityInterface(fields=['extraction_method']),
name='extraction_method_infosource')
extraction_method_infosource.iterables = ('extraction_method', extraction_methods_list)
def create_file_list_fct(subjects_list, base_path, parcellation, bp_freqs, extraction_method):
import os
file_list = []
for s in subjects_list:
file_list.append(os.path.join(base_path, s, 'metrics/con_mat/matrix',
'bp_%s.%s'%(bp_freqs),
parcellation, extraction_method, 'matrix.pkl'))
return file_list
create_file_list = Node(util.Function(input_names=['subjects_list', 'base_path', 'parcellation', 'bp_freqs', 'extraction_method'],
output_names=['file_list'],
function=create_file_list_fct),
name='create_file_list')
create_file_list.inputs.subjects_list = subjects_list
create_file_list.inputs.base_path = preprocessed_data_dir
wf.connect(parcellation_infosource, 'parcellation', create_file_list, 'parcellation')
wf.connect(bp_filter_infosource, 'bp_freqs', create_file_list, 'bp_freqs')
wf.connect(extraction_method_infosource, 'extraction_method', create_file_list, 'extraction_method')
aggregate = Node(util.Function(input_names=['file_list'],
output_names=['merged_file'],
function=aggregate_data),
name='aggregate')
wf.connect(create_file_list, 'file_list', aggregate, 'file_list')
def plot_matrix_fct(in_file):
import pickle, os
import pylab as plt
import numpy as np
with open(in_file, 'r') as f:
matrix_dict = pickle.load(f)
out_file_list = []
for m_type in matrix_dict.keys():
mean_matrix = np.mean(matrix_dict[m_type], axis=0)
fig = plt.imshow(mean_matrix, interpolation='nearest')
plt.title(in_file)
out_file = os.path.join(os.getcwd(), m_type+'.png')
out_file_list.append(out_file)
plt.savefig(out_file)
return out_file_list
plot_matrix = Node(util.Function(input_names=['in_file'],
output_names=['out_file_list'],
function=plot_matrix_fct),
name='plot_matrix')
wf.connect(aggregate, 'merged_file', plot_matrix, 'in_file')
wf.connect(plot_matrix, 'out_file_list', ds, '[email protected]')
#####################################
# RUN WF
#####################################
wf.write_graph(dotfilename=wf.name, graph2use='colored', format='pdf') # 'hierarchical')
wf.write_graph(dotfilename=wf.name, graph2use='orig', format='pdf')
wf.write_graph(dotfilename=wf.name, graph2use='flat', format='pdf')
if plugin_name == 'CondorDAGMan':
wf.run(plugin=plugin_name)
if plugin_name == 'MultiProc':
wf.run(plugin=plugin_name, plugin_args={'n_procs': use_n_procs})