当前位置: 首页>>代码示例>>Python>>正文


Python Workflow.config["execution"]方法代码示例

本文整理汇总了Python中nipype.pipeline.engine.Workflow.config["execution"]方法的典型用法代码示例。如果您正苦于以下问题:Python Workflow.config["execution"]方法的具体用法?Python Workflow.config["execution"]怎么用?Python Workflow.config["execution"]使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在nipype.pipeline.engine.Workflow的用法示例。


在下文中一共展示了Workflow.config["execution"]方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: create

# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import config["execution"] [as 别名]
    def create(self):  # , **kwargs):
        """ Create the nodes and connections for the workflow """

        # Preamble
        csvReader = CSVReader()
        csvReader.inputs.in_file = self.csv_file.default_value
        csvReader.inputs.header = self.hasHeader.default_value
        csvOut = csvReader.run()

        print(("=" * 80))
        print((csvOut.outputs.__dict__))
        print(("=" * 80))

        iters = OrderedDict()
        label = list(csvOut.outputs.__dict__.keys())[0]
        result = eval("csvOut.outputs.{0}".format(label))
        iters["tests"], iters["trains"] = sample_crossvalidation_set(
            result, self.sample_size.default_value
        )
        # Main event
        out_fields = ["T1", "T2", "Label", "trainindex", "testindex"]
        inputsND = Node(
            interface=IdentityInterface(fields=out_fields),
            run_without_submitting=True,
            name="inputs",
        )
        inputsND.iterables = [
            ("trainindex", iters["trains"]),
            ("testindex", iters["tests"]),
        ]
        if not self.hasHeader.default_value:
            inputsND.inputs.T1 = csvOut.outputs.column_0
            inputsND.inputs.Label = csvOut.outputs.column_1
            inputsND.inputs.T2 = csvOut.outputs.column_2
        else:
            inputsND.inputs.T1 = csvOut.outputs.__dict__["t1"]
            inputsND.inputs.Label = csvOut.outputs.__dict__["label"]
            inputsND.inputs.T2 = csvOut.outputs.__dict__["t2"]
            pass  # TODO
        metaflow = Workflow(name="metaflow")
        metaflow.config["execution"] = {
            "plugin": "Linear",
            "stop_on_first_crash": "false",
            "stop_on_first_rerun": "false",
            # This stops at first attempt to rerun, before running, and before deleting previous results.
            "hash_method": "timestamp",
            "single_thread_matlab": "true",  # Multi-core 2011a  multi-core for matrix multiplication.
            "remove_unnecessary_outputs": "true",
            "use_relative_paths": "false",  # relative paths should be on, require hash update when changed.
            "remove_node_directories": "false",  # Experimental
            "local_hash_check": "false",
        }

        metaflow.add_nodes([inputsND])
        """import pdb; pdb.set_trace()"""
        fusionflow = FusionLabelWorkflow()
        self.connect(
            [
                (
                    metaflow,
                    fusionflow,
                    [
                        ("inputs.trainindex", "trainT1s.index"),
                        ("inputs.T1", "trainT1s.inlist"),
                    ],
                ),
                (
                    metaflow,
                    fusionflow,
                    [
                        ("inputs.trainindex", "trainLabels.index"),
                        ("inputs.Label", "trainLabels.inlist"),
                    ],
                ),
                (
                    metaflow,
                    fusionflow,
                    [
                        ("inputs.testindex", "testT1s.index"),
                        ("inputs.T1", "testT1s.inlist"),
                    ],
                ),
            ]
        )
开发者ID:BRAINSia,项目名称:BRAINSTools,代码行数:86,代码来源:crossValidate.py

示例2: Node

# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import config["execution"] [as 别名]
# https://www.jiscmail.ac.uk/cgi-bin/webadmin?A2=ind1205&L=FSL&P=R57592&1=FSL&9=A&I=-3&J=on&d=No+Match%3BMatch%3BMatches&z=4
lp = 1.0 / (2 * 1.4 * 0.1)
hp = 1.0 / (2 * 1.4 * 0.01)

bandpass_filter = Node(fsl.TemporalFilter(lowpass_sigma=lp, highpass_sigma=hp), name="bandpass_filter")
bandpass_filter.plugin_args = {"initial_specs": "request_memory = 30000"}


preproc.connect(remove_noise, "out_file", bandpass_filter, "in_file")
preproc.connect(bandpass_filter, "out_file", outputnode, "filtered_file")


###################################################################################################################################
# in and out
preproc.base_dir = "/scr/kansas1/huntenburg/"
preproc.config["execution"] = {"remove_unnecessary_outputs": "False"}
out_dir = "/scr/"
data_dir = "/scr/"
subjects = ["LEMON006"]  # ,'LEMON001','LEMON087','LEMON030','LEMON044','LEMON071']


# infosource to iterate over subjects
subject_infosource = Node(util.IdentityInterface(fields=["subject_id"]), name="subject_infosource")
subject_infosource.iterables = ("subject_id", subjects)


# infosource to iterate over coregistration methods
cor_method_infosource = Node(util.IdentityInterface(fields=["cor_method"]), name="cor_method_infosource")
cor_method_infosource.iterables = ("cor_method", ["lin_ts"])  # , 'lin_ts', 'nonlin_ts', 'fmap_ts', 'topup_ts'])

开发者ID:juhuntenburg,项目名称:nonlinear_coreg,代码行数:31,代码来源:preproc_conn.py

示例3: open

# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import config["execution"] [as 别名]
    name="write_file",
)

similarity.connect(
    [
        (lin_sim, write_txt, [("similarity", "lin_metrics")]),
        (nonlin_sim, write_txt, [("similarity", "nonlin_metrics")]),
        (fmap_sim, write_txt, [("similarity", "fmap_metrics")]),
        (topup_sim, write_txt, [("similarity", "topup_metrics")]),
        (write_txt, outputnode, [("txtfile", "textfile")]),
    ]
)

# in and out
similarity.base_dir = "/scr/kansas1/huntenburg/"
similarity.config["execution"] = {"remove_unnecessary_outputs": "False"}
out_dir = "/scr/jessica2/Schaare/LEMON/preprocessed/"
data_dir = "/scr/jessica2/Schaare/LEMON/preprocessed/"
# subjects=['LEMON001']
subjects = []
f = open("/scr/jessica2/Schaare/LEMON/done_freesurfer.txt", "r")
for line in f:
    subjects.append(line.strip())
subjects.remove("LEMON027")


# create inforsource to iterate over subjects
infosource = Node(util.IdentityInterface(fields=["subject_id"]), name="infosource")
infosource.iterables = ("subject_id", subjects)

开发者ID:juhuntenburg,项目名称:nonlinear_coreg,代码行数:31,代码来源:similarity_individual.py


注:本文中的nipype.pipeline.engine.Workflow.config["execution"]方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。