当前位置: 首页>>代码示例>>Python>>正文


Python Workflow.config["execution"]["crashdump_dir"]方法代码示例

本文整理汇总了Python中nipype.pipeline.engine.Workflow.config["execution"]["crashdump_dir"]方法的典型用法代码示例。如果您正苦于以下问题:Python Workflow.config["execution"]["crashdump_dir"]方法的具体用法?Python Workflow.config["execution"]["crashdump_dir"]怎么用?Python Workflow.config["execution"]["crashdump_dir"]使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在nipype.pipeline.engine.Workflow的用法示例。


在下文中一共展示了Workflow.config["execution"]["crashdump_dir"]方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: create_conversion

# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import config["execution"]["crashdump_dir"] [as 别名]
def create_conversion(
    name, subject, scans, working_dir, out_dir, folder, xnat_server, xnat_user, xnat_pass, project_id, exp_id
):

    convert = Workflow(name=name)
    convert.base_dir = working_dir
    convert.config["execution"]["crashdump_dir"] = convert.base_dir + "/crash_files"

    # infosource to iterate over scans
    scan_infosource = Node(util.IdentityInterface(fields=["scan_key", "scan_val"]), name="scan_infosource")
    scan_infosource.iterables = [("scan_key", scans.keys()), ("scan_val", scans.values())]
    scan_infosource.synchronize = True

    # xnat source
    xnatsource = Node(
        nio.XNATSource(
            infields=["project_id", "subject_id", "exp_id", "scan_id"],
            outfields=["dicom"],
            server=xnat_server,
            user=xnat_user,
            pwd=xnat_pass,
            cache_dir=working_dir,
        ),
        name="xnatsource",
    )

    xnatsource.inputs.query_template = (
        "/projects/%s/subjects/%s/experiments/%s/scans/%d/resources/DICOM/files"
    )  # files')
    xnatsource.inputs.query_template_args["dicom"] = [["project_id", "subject_id", "exp_id", "scan_id"]]
    xnatsource.inputs.project_id = project_id
    xnatsource.inputs.subject_id = subject
    xnatsource.inputs.exp_id = exp_id
    convert.connect([(scan_infosource, xnatsource, [("scan_val", "scan_id")])])

    # workflow to convert dicoms
    dcmconvert = create_dcmconvert_pipeline()
    convert.connect(
        [
            (scan_infosource, dcmconvert, [("scan_key", "inputnode.filename")]),
            (xnatsource, dcmconvert, [("dicom", "inputnode.dicoms")]),
        ]
    )

    # xnat sink
    sink = Node(nio.DataSink(base_directory=out_dir, parameterization=False), name="sink")

    convert.connect([(dcmconvert, sink, [("outputnode.nifti", folder)])])

    convert.run()
开发者ID:JanisReinelt,项目名称:pipelines,代码行数:52,代码来源:convert.py

示例2: calc_local_metrics

# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import config["execution"]["crashdump_dir"] [as 别名]
def calc_local_metrics(
    preprocessed_data_dir,
    subject_id,
    parcellations_dict,
    bp_freq_list,
    fd_thresh,
    working_dir,
    ds_dir,
    use_n_procs,
    plugin_name,
):
    import os
    from nipype import config
    from nipype.pipeline.engine import Node, Workflow, MapNode
    import nipype.interfaces.utility as util
    import nipype.interfaces.io as nio
    import nipype.interfaces.fsl as fsl
    import utils as calc_metrics_utils

    #####################################
    # GENERAL SETTINGS
    #####################################
    fsl.FSLCommand.set_default_output_type("NIFTI_GZ")

    wf = Workflow(name="LeiCA_LIFE_metrics")
    wf.base_dir = os.path.join(working_dir)

    nipype_cfg = dict(
        logging=dict(workflow_level="DEBUG"),
        execution={"stop_on_first_crash": True, "remove_unnecessary_outputs": True, "job_finished_timeout": 15},
    )
    config.update_config(nipype_cfg)
    wf.config["execution"]["crashdump_dir"] = os.path.join(working_dir, "crash")

    ds = Node(nio.DataSink(base_directory=ds_dir), name="ds")
    ds.inputs.regexp_substitutions = [
        ("MNI_resampled_brain_mask_calc.nii.gz", "falff.nii.gz"),
        ("residual_filtered_3dT.nii.gz", "alff.nii.gz"),
        ("_parcellation_", ""),
        ("_bp_freqs_", "bp_"),
    ]

    #####################
    # ITERATORS
    #####################
    # PARCELLATION ITERATOR
    parcellation_infosource = Node(util.IdentityInterface(fields=["parcellation"]), name="parcellation_infosource")
    parcellation_infosource.iterables = ("parcellation", parcellations_dict.keys())

    bp_filter_infosource = Node(util.IdentityInterface(fields=["bp_freqs"]), name="bp_filter_infosource")
    bp_filter_infosource.iterables = ("bp_freqs", bp_freq_list)

    selectfiles = Node(
        nio.SelectFiles(
            {
                "parcellation_time_series": "{subject_id}/con_mat/parcellated_time_series/bp_{bp_freqs}/{parcellation}/parcellation_time_series.npy"
            },
            base_directory=preprocessed_data_dir,
        ),
        name="selectfiles",
    )
    selectfiles.inputs.subject_id = subject_id
    wf.connect(parcellation_infosource, "parcellation", selectfiles, "parcellation")
    wf.connect(bp_filter_infosource, "bp_freqs", selectfiles, "bp_freqs")

    fd_file = Node(
        nio.SelectFiles({"fd_p": "{subject_id}/QC/FD_P_ts"}, base_directory=preprocessed_data_dir), name="fd_file"
    )
    fd_file.inputs.subject_id = subject_id

    ##############
    ## CON MATS
    ##############
    ##############
    ## extract ts
    ##############

    get_good_trs = Node(
        util.Function(
            input_names=["fd_file", "fd_thresh"],
            output_names=["good_trs", "fd_scrubbed_file"],
            function=calc_metrics_utils.get_good_trs,
        ),
        name="get_good_trs",
    )
    wf.connect(fd_file, "fd_p", get_good_trs, "fd_file")
    get_good_trs.inputs.fd_thresh = fd_thresh

    parcellated_ts_scrubbed = Node(
        util.Function(
            input_names=["parcellation_time_series_file", "good_trs"],
            output_names=["parcellation_time_series_scrubbed"],
            function=calc_metrics_utils.parcellation_time_series_scrubbing,
        ),
        name="parcellated_ts_scrubbed",
    )

    wf.connect(selectfiles, "parcellation_time_series", parcellated_ts_scrubbed, "parcellation_time_series_file")
    wf.connect(get_good_trs, "good_trs", parcellated_ts_scrubbed, "good_trs")

#.........这里部分代码省略.........
开发者ID:fliem,项目名称:LeiCA_LIFE,代码行数:103,代码来源:calc_metrics_scrubbing.py

示例3: Workflow

# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import config["execution"]["crashdump_dir"] [as 别名]
# sessions to loop over
sessions = ["rest1_1", "rest1_2", "rest2_1", "rest2_2"]

# directories
working_dir = "/scr/ilz3/myelinconnect/working_dir/mappings_fixhdr/"
data_dir = "/scr/ilz3/myelinconnect/"
out_dir = "/scr/ilz3/myelinconnect/transformations/"

# set fsl output type to nii.gz
fsl.FSLCommand.set_default_output_type("NIFTI_GZ")


# main workflow
mappings = Workflow(name="mappings")
mappings.base_dir = working_dir
mappings.config["execution"]["crashdump_dir"] = mappings.base_dir + "/crash_files"

# iterate over subjects
subject_infosource = Node(util.IdentityInterface(fields=["subject"]), name="subject_infosource")
subject_infosource.iterables = [("subject", subjects_db)]

# iterate over sessions
session_infosource = Node(util.IdentityInterface(fields=["session"]), name="session_infosource")
session_infosource.iterables = [("session", sessions)]

# select files
templates = {
    "median": "resting/preprocessed/{subject}/{session}/realignment/corr_{subject}_{session}_roi_detrended_median_corrected.nii.gz",
    "median_mapping": "mappings/rest/fixed_hdr/corr_{subject}_{session}_*mapping_fixed.nii.gz",
    #'t1_mapping': 'mappings/t1/{subject}*T1_Images_merged_mapping.nii.gz',
    "t1_highres": "struct/t1/{subject}*T1_Images_merged.nii.gz",
开发者ID:fliem,项目名称:myelinconnect,代码行数:33,代码来源:mappings.py

示例4: create_lemon_resting

# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import config["execution"]["crashdump_dir"] [as 别名]
def create_lemon_resting(
    subject,
    working_dir,
    data_dir,
    freesurfer_dir,
    out_dir,
    vol_to_remove,
    TR,
    epi_resolution,
    highpass,
    lowpass,
    echo_space,
    te_diff,
    pe_dir,
    standard_brain,
    standard_brain_resampled,
    standard_brain_mask,
    standard_brain_mask_resampled,
    fwhm_smoothing,
):
    # set fsl output type to nii.gz
    fsl.FSLCommand.set_default_output_type("NIFTI_GZ")
    # main workflow
    func_preproc = Workflow(name="lemon_resting")
    func_preproc.base_dir = working_dir
    func_preproc.config["execution"]["crashdump_dir"] = func_preproc.base_dir + "/crash_files"
    # select files
    templates = {
        "func": "func/EPI_t2.nii",
        "fmap_phase": "unwarp/B0_ph.nii",
        "fmap_mag": "unwarp/B0_mag.nii",
        "anat_head": "preprocessed/mod/anat/T1.nii.gz",  # either with mod or without
        "anat_brain": "preprocessed/mod/anat/brain.nii.gz",  # new version with brain_extraction from freesurfer  #T1_brain_brain.nii.gz',
        "brain_mask": "preprocessed/mod/anat/T1_brain_mask.nii.gz",  # T1_brain_brain_mask.nii.gz',
        "ants_affine": "preprocessed/mod/anat/transforms2mni/transform0GenericAffine.mat",
        "ants_warp": "preprocessed/mod/anat/transforms2mni/transform1Warp.nii.gz",
    }

    selectfiles = Node(nio.SelectFiles(templates, base_directory=data_dir), name="selectfiles")

    # node to remove first volumes
    remove_vol = Node(
        util.Function(input_names=["in_file", "t_min"], output_names=["out_file"], function=strip_rois_func),
        name="remove_vol",
    )
    remove_vol.inputs.t_min = vol_to_remove
    # workflow for motion correction
    moco = create_moco_pipeline()
    # workflow for fieldmap correction and coregistration
    fmap_coreg = create_fmap_coreg_pipeline()
    fmap_coreg.inputs.inputnode.fs_subjects_dir = freesurfer_dir
    fmap_coreg.inputs.inputnode.fs_subject_id = subject
    fmap_coreg.inputs.inputnode.echo_space = echo_space
    fmap_coreg.inputs.inputnode.te_diff = te_diff
    fmap_coreg.inputs.inputnode.pe_dir = pe_dir
    # workflow for applying transformations to timeseries
    transform_ts = create_transform_pipeline()
    transform_ts.inputs.inputnode.resolution = epi_resolution

    # workflow to convert signal into percent signal change
    normalize = create_normalize_pipeline()
    normalize.inputs.inputnode.tr = TR

    # workflow to transform timeseries to MNI
    ants_registration = create_ants_registration_pipeline()
    ants_registration.inputs.inputnode.ref = standard_brain_resampled

    # workflow to smooth
    smoothing = create_smoothing_pipeline()
    smoothing.inputs.inputnode.fwhm = fwhm_smoothing

    # workflow to correct slice timing
    slicetiming = create_slice_timing_pipeline()

    # visualize registration results
    visualize = create_visualize_pipeline()
    visualize.inputs.inputnode.mni_template = standard_brain_resampled

    # sink to store files
    sink = Node(
        nio.DataSink(
            parameterization=False,
            base_directory=out_dir,
            substitutions=[
                ("fmap_phase_fslprepared", "fieldmap"),
                ("fieldmap_fslprepared_fieldmap_unmasked_vsm", "shiftmap"),
                ("plot.rest_coregistered", "outlier_plot"),
                ("filter_motion_comp_norm_compcor_art_dmotion", "nuissance_matrix"),
                ("rest_realigned.nii.gz_abs.rms", "rest_realigned_abs.rms"),
                ("rest_realigned.nii.gz.par", "rest_realigned.par"),
                ("rest_realigned.nii.gz_rel.rms", "rest_realigned_rel.rms"),
                ("rest_realigned.nii.gz_abs_disp", "abs_displacement_plot"),
                ("rest_realigned.nii.gz_rel_disp", "rel_displacment_plot"),
                ("art.rest_coregistered_outliers", "outliers"),
                ("global_intensity.rest_coregistered", "global_intensity"),
                ("norm.rest_coregistered", "composite_norm"),
                ("stats.rest_coregistered", "stats"),
                ("rest_denoised_bandpassed_norm.nii.gz", "rest_preprocessed.nii.gz"),
                ("rest_denoised_bandpassed_norm_trans.nii.gz", "rest_mni.nii.gz"),
                ("rest2anat_masked_st_norm_trans_smooth.nii", "rest_mni_smoothed.nii"),
#.........这里部分代码省略.........
开发者ID:fBeyer89,项目名称:LIFE_rs_ICA_preprocessing,代码行数:103,代码来源:lemon_resting.py

示例5: connectivity_matrix_wf

# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import config["execution"]["crashdump_dir"] [as 别名]
def connectivity_matrix_wf(
    subjects_list,
    preprocessed_data_dir,
    working_dir,
    ds_dir,
    parcellations_dict,
    extraction_methods_list,
    bp_freq_list,
    use_n_procs,
    plugin_name,
):
    """
    Uses nilearn to calculate connectivity matrices
    parcellations_list: list of dicts with keys: name, nii, is_probabilistic
    bp_freq_list: list of tuples of bp cutoff frequencies (hp, lp). e.g. [(), (0.01, 0.1)]. empty tuple-> no bp filter
    """

    import os
    from nipype import config
    from nipype.pipeline.engine import Node, Workflow, MapNode
    import nipype.interfaces.utility as util
    import nipype.interfaces.io as nio
    from nipype.interfaces.freesurfer.utils import ImageInfo

    #####################################
    # GENERAL SETTINGS
    #####################################
    wf = Workflow(name="calc_con_mats")
    wf.base_dir = os.path.join(working_dir)

    nipype_cfg = dict(
        logging=dict(workflow_level="DEBUG"),
        execution={"stop_on_first_crash": True, "remove_unnecessary_outputs": True, "job_finished_timeout": 120},
    )
    config.update_config(nipype_cfg)
    wf.config["execution"]["crashdump_dir"] = os.path.join(working_dir, "crash")

    ds = Node(nio.DataSink(), name="ds")
    ds.inputs.regexp_substitutions = [
        # ('subject_id_', ''),
        ("_parcellation_", ""),
        ("_bp_freqs_", "bp_"),
        ("_extraction_method_", ""),
        ("_subject_id_[A0-9]*/", ""),
    ]

    #####################################
    # SET ITERATORS
    #####################################
    # SUBJECTS ITERATOR
    subjects_infosource = Node(util.IdentityInterface(fields=["subject_id"]), name="subjects_infosource")
    subjects_infosource.iterables = ("subject_id", subjects_list)

    def add_subject_id_to_ds_dir_fct(subject_id, ds_path):
        import os

        out_path = os.path.join(ds_path, subject_id)
        return out_path

    add_subject_id_to_ds_dir = Node(
        util.Function(
            input_names=["subject_id", "ds_path"], output_names=["out_path"], function=add_subject_id_to_ds_dir_fct
        ),
        name="add_subject_id_to_ds_dir",
    )
    wf.connect(subjects_infosource, "subject_id", add_subject_id_to_ds_dir, "subject_id")
    add_subject_id_to_ds_dir.inputs.ds_path = ds_dir
    wf.connect(add_subject_id_to_ds_dir, "out_path", ds, "base_directory")

    # PARCELLATION ITERATOR
    parcellation_infosource = Node(util.IdentityInterface(fields=["parcellation"]), name="parcellation_infosource")
    parcellation_infosource.iterables = ("parcellation", parcellations_dict.keys())

    # BP FILTER ITERATOR
    bp_filter_infosource = Node(util.IdentityInterface(fields=["bp_freqs"]), name="bp_filter_infosource")
    bp_filter_infosource.iterables = ("bp_freqs", bp_freq_list)

    # EXTRACTION METHOD ITERATOR
    extraction_method_infosource = Node(
        util.IdentityInterface(fields=["extraction_method"]), name="extraction_method_infosource"
    )
    extraction_method_infosource.iterables = ("extraction_method", extraction_methods_list)

    # GET SUBJECT SPECIFIC FUNCTIONAL DATA
    selectfiles_templates = {
        "preproc_epi_mni": "{subject_id}/rsfMRI_preprocessing/epis_MNI_3mm/01_denoised/TR_645/preprocessed_fullspectrum_MNI_3mm.nii.gz"
    }

    selectfiles = Node(nio.SelectFiles(selectfiles_templates, base_directory=preprocessed_data_dir), name="selectfiles")
    wf.connect(subjects_infosource, "subject_id", selectfiles, "subject_id")

    ##############
    ## extract ts
    ##############
    # returns TR in ms
    get_TR = Node(ImageInfo(), name="get_TR")
    wf.connect(selectfiles, "preproc_epi_mni", get_TR, "in_file")

    parcellated_ts = Node(
        util.Function(
#.........这里部分代码省略.........
开发者ID:Yaqiongxiao,项目名称:LeiCA,代码行数:103,代码来源:calc_con_mats.py

示例6: open

# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import config["execution"]["crashdump_dir"] [as 别名]
#                          name='applytransform_ts')
#
#
# applywarp_linear.connect([(inputnode, applytransform_ts, [('epi_moco_ts','input_image')]),
#                           (transformlist, applytransform_ts, [('transformlist', 'transformation_series')]),
#                           (inputnode, applytransform_ts, [('mni', 'reference_image')]),
#                           (applytransform_ts, outputnode, [('output_image','lin_ts_fullwarped')])
#                           ])


# set up workflow, in- and output
applywarp_fmap.base_dir = "/scr/kansas1/huntenburg/lemon_missing/working_dir/"
data_dir = "/scr/jessica2/Schaare/LEMON/"
out_dir = "/scr/jessica2/Schaare/LEMON/preprocessed/"
# applywarp_linear.config['execution']={'remove_unnecessary_outputs': 'False'}
applywarp_fmap.config["execution"]["crashdump_dir"] = applywarp_fmap.base_dir + "/crash_files"

# reading subjects from file
# subjects=['LEMON001']
subjects = []
f = open("/scr/jessica2/Schaare/LEMON/missing_subjects.txt", "r")
for line in f:
    subjects.append(line.strip())


# create inforsource to iterate over subjects
infosource = Node(util.IdentityInterface(fields=["subject_id"]), name="infosource")
infosource.iterables = ("subject_id", subjects)

# select files
templates = {
开发者ID:juhuntenburg,项目名称:nonlinear_coreg,代码行数:33,代码来源:applywarp_fmap.py


注:本文中的nipype.pipeline.engine.Workflow.config["execution"]["crashdump_dir"]方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。