本文整理汇总了Python中nipype.pipeline.engine.Workflow.config["execution"]方法的典型用法代码示例。如果您正苦于以下问题:Python Workflow.config["execution"]方法的具体用法?Python Workflow.config["execution"]怎么用?Python Workflow.config["execution"]使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类nipype.pipeline.engine.Workflow
的用法示例。
在下文中一共展示了Workflow.config["execution"]方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create
# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import config["execution"] [as 别名]
def create(self): # , **kwargs):
""" Create the nodes and connections for the workflow """
# Preamble
csvReader = CSVReader()
csvReader.inputs.in_file = self.csv_file.default_value
csvReader.inputs.header = self.hasHeader.default_value
csvOut = csvReader.run()
print(("=" * 80))
print((csvOut.outputs.__dict__))
print(("=" * 80))
iters = OrderedDict()
label = list(csvOut.outputs.__dict__.keys())[0]
result = eval("csvOut.outputs.{0}".format(label))
iters["tests"], iters["trains"] = sample_crossvalidation_set(
result, self.sample_size.default_value
)
# Main event
out_fields = ["T1", "T2", "Label", "trainindex", "testindex"]
inputsND = Node(
interface=IdentityInterface(fields=out_fields),
run_without_submitting=True,
name="inputs",
)
inputsND.iterables = [
("trainindex", iters["trains"]),
("testindex", iters["tests"]),
]
if not self.hasHeader.default_value:
inputsND.inputs.T1 = csvOut.outputs.column_0
inputsND.inputs.Label = csvOut.outputs.column_1
inputsND.inputs.T2 = csvOut.outputs.column_2
else:
inputsND.inputs.T1 = csvOut.outputs.__dict__["t1"]
inputsND.inputs.Label = csvOut.outputs.__dict__["label"]
inputsND.inputs.T2 = csvOut.outputs.__dict__["t2"]
pass # TODO
metaflow = Workflow(name="metaflow")
metaflow.config["execution"] = {
"plugin": "Linear",
"stop_on_first_crash": "false",
"stop_on_first_rerun": "false",
# This stops at first attempt to rerun, before running, and before deleting previous results.
"hash_method": "timestamp",
"single_thread_matlab": "true", # Multi-core 2011a multi-core for matrix multiplication.
"remove_unnecessary_outputs": "true",
"use_relative_paths": "false", # relative paths should be on, require hash update when changed.
"remove_node_directories": "false", # Experimental
"local_hash_check": "false",
}
metaflow.add_nodes([inputsND])
"""import pdb; pdb.set_trace()"""
fusionflow = FusionLabelWorkflow()
self.connect(
[
(
metaflow,
fusionflow,
[
("inputs.trainindex", "trainT1s.index"),
("inputs.T1", "trainT1s.inlist"),
],
),
(
metaflow,
fusionflow,
[
("inputs.trainindex", "trainLabels.index"),
("inputs.Label", "trainLabels.inlist"),
],
),
(
metaflow,
fusionflow,
[
("inputs.testindex", "testT1s.index"),
("inputs.T1", "testT1s.inlist"),
],
),
]
)
示例2: Node
# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import config["execution"] [as 别名]
# https://www.jiscmail.ac.uk/cgi-bin/webadmin?A2=ind1205&L=FSL&P=R57592&1=FSL&9=A&I=-3&J=on&d=No+Match%3BMatch%3BMatches&z=4
lp = 1.0 / (2 * 1.4 * 0.1)
hp = 1.0 / (2 * 1.4 * 0.01)
bandpass_filter = Node(fsl.TemporalFilter(lowpass_sigma=lp, highpass_sigma=hp), name="bandpass_filter")
bandpass_filter.plugin_args = {"initial_specs": "request_memory = 30000"}
preproc.connect(remove_noise, "out_file", bandpass_filter, "in_file")
preproc.connect(bandpass_filter, "out_file", outputnode, "filtered_file")
###################################################################################################################################
# in and out
preproc.base_dir = "/scr/kansas1/huntenburg/"
preproc.config["execution"] = {"remove_unnecessary_outputs": "False"}
out_dir = "/scr/"
data_dir = "/scr/"
subjects = ["LEMON006"] # ,'LEMON001','LEMON087','LEMON030','LEMON044','LEMON071']
# infosource to iterate over subjects
subject_infosource = Node(util.IdentityInterface(fields=["subject_id"]), name="subject_infosource")
subject_infosource.iterables = ("subject_id", subjects)
# infosource to iterate over coregistration methods
cor_method_infosource = Node(util.IdentityInterface(fields=["cor_method"]), name="cor_method_infosource")
cor_method_infosource.iterables = ("cor_method", ["lin_ts"]) # , 'lin_ts', 'nonlin_ts', 'fmap_ts', 'topup_ts'])
示例3: open
# 需要导入模块: from nipype.pipeline.engine import Workflow [as 别名]
# 或者: from nipype.pipeline.engine.Workflow import config["execution"] [as 别名]
name="write_file",
)
similarity.connect(
[
(lin_sim, write_txt, [("similarity", "lin_metrics")]),
(nonlin_sim, write_txt, [("similarity", "nonlin_metrics")]),
(fmap_sim, write_txt, [("similarity", "fmap_metrics")]),
(topup_sim, write_txt, [("similarity", "topup_metrics")]),
(write_txt, outputnode, [("txtfile", "textfile")]),
]
)
# in and out
similarity.base_dir = "/scr/kansas1/huntenburg/"
similarity.config["execution"] = {"remove_unnecessary_outputs": "False"}
out_dir = "/scr/jessica2/Schaare/LEMON/preprocessed/"
data_dir = "/scr/jessica2/Schaare/LEMON/preprocessed/"
# subjects=['LEMON001']
subjects = []
f = open("/scr/jessica2/Schaare/LEMON/done_freesurfer.txt", "r")
for line in f:
subjects.append(line.strip())
subjects.remove("LEMON027")
# create inforsource to iterate over subjects
infosource = Node(util.IdentityInterface(fields=["subject_id"]), name="infosource")
infosource.iterables = ("subject_id", subjects)