本文整理汇总了Python中nipype.config.update_config函数的典型用法代码示例。如果您正苦于以下问题:Python update_config函数的具体用法?Python update_config怎么用?Python update_config使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了update_config函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: ants_ct_wf
def ants_ct_wf(subjects_id,
preprocessed_data_dir,
working_dir,
ds_dir,
template_dir,
plugin_name):
import os
from nipype import config
from nipype.pipeline.engine import Node, Workflow, MapNode
import nipype.interfaces.utility as util
import nipype.interfaces.io as nio
from nipype.interfaces.freesurfer.utils import ImageInfo
#####################################
# GENERAL SETTINGS
#####################################
wf = Workflow(name='ants_ct')
wf.base_dir = os.path.join(working_dir)
nipype_cfg = dict(logging=dict(workflow_level='DEBUG'), execution={'stop_on_first_crash': True,
'remove_unnecessary_outputs': True,
'job_finished_timeout': 120})
config.update_config(nipype_cfg)
wf.config['execution']['crashdump_dir'] = os.path.join(working_dir, 'crash')
ds = Node(nio.DataSink(base_directory=ds_dir), name='ds')
#####################################
# GET DATA
#####################################
# GET SUBJECT SPECIFIC STRUCTURAL DATA
in_data_templates = {
't1w': '{subject_id}/raw_niftis/sMRI/t1w_reoriented.nii.gz',
}
in_data = Node(nio.SelectFiles(in_data_templates,
base_directory=preprocessed_data_dir),
name="in_data")
in_data.inputs.subject_id = subjects_id
# GET NKI ANTs templates
ants_templates_templates = {
'brain_template': 'NKI/T_template.nii.gz',
'brain_probability_mask': 'NKI/T_templateProbabilityMask.nii.gz',
'segmentation_priors': 'NKI/Priors/*.nii.gz',
't1_registration_template': 'NKI/T_template_BrainCerebellum.nii.gz'
}
ants_templates = Node(nio.SelectFiles(ants_templates_templates,
base_directory=template_dir),
name="ants_templates")
示例2: _create_singleSession
def _create_singleSession(dataDict, master_config, interpMode, pipeline_name):
"""
create singleSession workflow on a single session
This is the main function to call when processing a data set with T1 & T2
data. ExperimentBaseDirectoryPrefix is the base of the directory to place results, T1Images & T2Images
are the lists of images to be used in the auto-workup. atlas_fname_wpath is
the path and filename of the atlas to use.
"""
assert 'tissue_classify' in master_config['components'] or \
'auxlmk' in master_config['components'] or \
'denoise' in master_config['components'] or \
'landmark' in master_config['components'] or \
'segmentation' in master_config['components'] or \
'malf_2012_neuro' in master_config['components']
from nipype import config, logging
config.update_config(master_config) # Set universal pipeline options
logging.update_logging(config)
from workflows.baseline import generate_single_session_template_WF
project = dataDict['project']
subject = dataDict['subject']
session = dataDict['session']
blackListFileName = dataDict['T1s'][0] + '_noDenoise'
isBlackList = os.path.isfile(blackListFileName)
pname = "{0}_{1}_{2}".format(master_config['workflow_phase'], subject, session)
onlyT1 = not (len(dataDict['T2s']) > 0)
if onlyT1:
print "T1 Only processing starts ..."
else:
print "Multimodal processing starts ..."
sessionWorkflow = generate_single_session_template_WF(project, subject, session, onlyT1, master_config,
phase=master_config['workflow_phase'],
interpMode=interpMode,
pipeline_name=pipeline_name,
doDenoise=(not isBlackList))
sessionWorkflow.base_dir = master_config['cachedir']
sessionWorkflow_inputsspec = sessionWorkflow.get_node('inputspec')
sessionWorkflow_inputsspec.inputs.T1s = dataDict['T1s']
sessionWorkflow_inputsspec.inputs.T2s = dataDict['T2s']
sessionWorkflow_inputsspec.inputs.PDs = dataDict['PDs']
sessionWorkflow_inputsspec.inputs.FLs = dataDict['FLs']
sessionWorkflow_inputsspec.inputs.OTHERs = dataDict['OTs']
return sessionWorkflow
示例3: execute_task
def execute_task(pckld_task, node_config, updatehash):
from socket import gethostname
from traceback import format_exc
from nipype import config, logging
traceback=None
result=None
try:
config.update_config(node_config)
logging.update_logging(config)
from cPickle import loads
task = loads(pckld_task)
result = task.run(updatehash=updatehash)
except:
traceback = format_exc()
result = task.result
return result, traceback, gethostname()
示例4: learning_prepare_data_wf
def learning_prepare_data_wf(working_dir,
ds_dir,
template_dir,
df_file,
in_data_name_list,
data_lookup_dict,
use_n_procs,
plugin_name):
import os
from nipype import config
from nipype.pipeline.engine import Node, Workflow, MapNode, JoinNode
import nipype.interfaces.utility as util
import nipype.interfaces.io as nio
from nipype.interfaces.freesurfer.utils import ImageInfo
from utils import aggregate_data, vectorize_data
from itertools import chain
# ensure in_data_name_list is list of lists
in_data_name_list = [i if type(i) == list else [i] for i in in_data_name_list]
in_data_name_list_unique = list(set(chain.from_iterable(in_data_name_list)))
#####################################
# GENERAL SETTINGS
#####################################
wf = Workflow(name='learning_prepare_data_wf')
wf.base_dir = os.path.join(working_dir)
nipype_cfg = dict(logging=dict(workflow_level='DEBUG'), execution={'stop_on_first_crash': True,
'remove_unnecessary_outputs': False,
'job_finished_timeout': 120})
config.update_config(nipype_cfg)
wf.config['execution']['crashdump_dir'] = os.path.join(working_dir, 'crash')
ds = Node(nio.DataSink(), name='ds')
ds.inputs.base_directory = os.path.join(ds_dir, 'group_learning_prepare_data')
ds.inputs.regexp_substitutions = [
# ('subject_id_', ''),
('_parcellation_', ''),
('_bp_freqs_', 'bp_'),
('_extraction_method_', ''),
('_subject_id_[A0-9]*/', '')
]
ds_pdf = Node(nio.DataSink(), name='ds_pdf')
ds_pdf.inputs.base_directory = os.path.join(ds_dir, 'pdfs')
ds_pdf.inputs.parameterization = False
# get atlas data
templates_atlases = {'GM_mask_MNI_2mm': 'SPM_GM/SPM_GM_mask_2mm.nii.gz',
'GM_mask_MNI_3mm': 'SPM_GM/SPM_GM_mask_3mm.nii.gz',
'brain_mask_MNI_3mm': 'cpac_image_resources/MNI_3mm/MNI152_T1_3mm_brain_mask.nii.gz',
'brain_template_MNI_3mm': 'cpac_image_resources/MNI_3mm/MNI152_T1_3mm.nii.gz'
}
selectfiles_anat_templates = Node(nio.SelectFiles(templates_atlases,
base_directory=template_dir),
name="selectfiles_anat_templates")
#####################################
# SET ITERATORS
#####################################
# SUBJECTS ITERATOR
in_data_name_infosource = Node(util.IdentityInterface(fields=['in_data_name']), name='in_data_name_infosource')
in_data_name_infosource.iterables = ('in_data_name', in_data_name_list_unique)
mulitmodal_in_data_name_infosource = Node(util.IdentityInterface(fields=['multimodal_in_data_name']),
name='mulitmodal_in_data_name_infosource')
mulitmodal_in_data_name_infosource.iterables = ('multimodal_in_data_name', in_data_name_list)
subjects_selection_crit_dict = {}
subjects_selection_crit_dict['adult_healthy_F'] = ["df[df.sex == \'F\']", 'df[df.no_axis_1]', 'df[df.age >= 18]']
subjects_selection_crit_dict['adult_F'] = ["df[df.sex == \'F\']", 'df[df.age >= 18]']
subjects_selection_crit_dict['F'] = ["df[df.sex == \'F\']"]
subjects_selection_crit_dict['adult_healthy_M'] = ["df[df.sex == \'M\']", 'df[df.no_axis_1]', 'df[df.age >= 18]']
subjects_selection_crit_dict['adult_M'] = ["df[df.sex == \'M\']", 'df[df.age >= 18]']
subjects_selection_crit_dict['adult'] = ['df[df.age >= 18]']
# subjects_selection_crit_names_list = subjects_selection_crit_dict.keys()
subjects_selection_crit_names_list = ['adult_F']
subject_selection_infosource = Node(util.IdentityInterface(fields=['selection_criterium']),
name='subject_selection_infosource')
subject_selection_infosource.iterables = ('selection_criterium', subjects_selection_crit_names_list)
def out_name_str_fct(selection_criterium, in_data_name):
return selection_criterium + '_' + in_data_name
out_name_str = Node(util.Function(input_names=['selection_criterium', 'in_data_name'],
output_names=['out_name_str'],
function=out_name_str_fct),
name='out_name_str')
wf.connect(in_data_name_infosource, 'in_data_name', out_name_str, 'in_data_name')
wf.connect(subject_selection_infosource, 'selection_criterium', out_name_str, 'selection_criterium')
def get_subjects_info_fct(df_file, subjects_selection_crit_dict, selection_criterium):
import pandas as pd
import os
#.........这里部分代码省略.........
示例5: segmentation
def segmentation(projectid, subjectid, sessionid, master_config, onlyT1=True, pipeline_name=''):
import os.path
import nipype.pipeline.engine as pe
import nipype.interfaces.io as nio
from nipype.interfaces import ants
from nipype.interfaces.utility import IdentityInterface, Function, Merge
# Set universal pipeline options
from nipype import config
config.update_config(master_config)
assert config.get('execution', 'plugin') == master_config['execution']['plugin']
from PipeLineFunctionHelpers import ClipT1ImageWithBrainMask
from WorkupT1T2BRAINSCut import CreateBRAINSCutWorkflow
from utilities.distributed import modify_qsub_args
from SEMTools import BRAINSSnapShotWriter
baw200 = pe.Workflow(name=pipeline_name)
# HACK: print for debugging
for key, itme in master_config.items():
print "-" * 30
print key, ":", itme
print "-" * 30
#END HACK
inputsSpec = pe.Node(interface=IdentityInterface(fields=['t1_average',
't2_average',
'template_t1',
'hncma-atlas',
'LMIatlasToSubject_tx',
'inputLabels',
'inputHeadLabels',
'posteriorImages',
'TissueClassifyatlasToSubjectInverseTransform',
'UpdatedPosteriorsList']),
run_without_submitting=True, name='inputspec')
# outputsSpec = pe.Node(interface=IdentityInterface(fields=[...]),
# run_without_submitting=True, name='outputspec')
currentClipT1ImageWithBrainMaskName = 'ClipT1ImageWithBrainMask_' + str(subjectid) + "_" + str(sessionid)
ClipT1ImageWithBrainMaskNode = pe.Node(interface=Function(function=ClipT1ImageWithBrainMask,
input_names=['t1_image', 'brain_labels',
'clipped_file_name'],
output_names=['clipped_file']),
name=currentClipT1ImageWithBrainMaskName)
ClipT1ImageWithBrainMaskNode.inputs.clipped_file_name = 'clipped_from_BABC_labels_t1.nii.gz'
baw200.connect([(inputsSpec, ClipT1ImageWithBrainMaskNode, [('t1_average', 't1_image'),
('inputLabels', 'brain_labels')])])
currentAtlasToSubjectantsRegistration = 'AtlasToSubjectANTsRegistration_' + str(subjectid) + "_" + str(sessionid)
AtlasToSubjectantsRegistration = pe.Node(interface=ants.Registration(), name=currentAtlasToSubjectantsRegistration)
AtlasToSubjectantsRegistration.inputs.dimension = 3
AtlasToSubjectantsRegistration.inputs.transforms = ["Affine", "SyN"]
AtlasToSubjectantsRegistration.inputs.transform_parameters = [[0.1], [0.15, 3.0, 0.0]]
AtlasToSubjectantsRegistration.inputs.metric = ['Mattes', 'CC']
AtlasToSubjectantsRegistration.inputs.sampling_strategy = ['Regular', None]
AtlasToSubjectantsRegistration.inputs.sampling_percentage = [1.0, 1.0]
AtlasToSubjectantsRegistration.inputs.metric_weight = [1.0, 1.0]
AtlasToSubjectantsRegistration.inputs.radius_or_number_of_bins = [32, 4]
AtlasToSubjectantsRegistration.inputs.number_of_iterations = [[1000, 1000, 1000], [10000, 500, 500, 200]]
AtlasToSubjectantsRegistration.inputs.convergence_threshold = [5e-7, 5e-7]
AtlasToSubjectantsRegistration.inputs.convergence_window_size = [25, 25]
AtlasToSubjectantsRegistration.inputs.use_histogram_matching = [True, True]
AtlasToSubjectantsRegistration.inputs.shrink_factors = [[4, 2, 1], [5, 4, 2, 1]]
AtlasToSubjectantsRegistration.inputs.smoothing_sigmas = [[4, 2, 0], [5, 4, 2, 0]]
AtlasToSubjectantsRegistration.inputs.sigma_units = ["vox","vox"]
AtlasToSubjectantsRegistration.inputs.use_estimate_learning_rate_once = [False, False]
AtlasToSubjectantsRegistration.inputs.write_composite_transform = True
AtlasToSubjectantsRegistration.inputs.collapse_output_transforms = True
AtlasToSubjectantsRegistration.inputs.output_transform_prefix = 'AtlasToSubject_'
AtlasToSubjectantsRegistration.inputs.winsorize_lower_quantile = 0.025
AtlasToSubjectantsRegistration.inputs.winsorize_upper_quantile = 0.975
AtlasToSubjectantsRegistration.inputs.collapse_linear_transforms_to_fixed_image_header = False
AtlasToSubjectantsRegistration.inputs.output_warped_image = 'atlas2subject.nii.gz'
AtlasToSubjectantsRegistration.inputs.output_inverse_warped_image = 'subject2atlas.nii.gz'
baw200.connect([(inputsSpec, AtlasToSubjectantsRegistration, [('LMIatlasToSubject_tx', 'initial_moving_transform'),
('t1_average', 'fixed_image'),
('template_t1', 'moving_image')])
])
myLocalSegWF = CreateBRAINSCutWorkflow(projectid,
subjectid,
sessionid,
master_config['queue'],
master_config['long_q'],
t1Only=onlyT1)
MergeStage2AverageImagesName = "99_mergeAvergeStage2Images_" + str(sessionid)
MergeStage2AverageImages = pe.Node(interface=Merge(2), run_without_submitting=True,
name=MergeStage2AverageImagesName)
baw200.connect([(inputsSpec, myLocalSegWF, [('t1_average', 'inputspec.T1Volume'),
('posteriorImages', "inputspec.posteriorDictionary"),
('inputLabels', 'inputspec.RegistrationROI'),]),
(inputsSpec, MergeStage2AverageImages, [('t1_average', 'in1')]),
(AtlasToSubjectantsRegistration, myLocalSegWF, [('composite_transform',
'inputspec.atlasToSubjectTransform')])
#.........这里部分代码省略.........
示例6: segmentation
def segmentation(projectid, subjectid, sessionid, master_config, onlyT1=True, pipeline_name=''):
import os.path
import nipype.pipeline.engine as pe
import nipype.interfaces.io as nio
from nipype.interfaces import ants
from nipype.interfaces.utility import IdentityInterface, Function, Merge
# Set universal pipeline options
from nipype import config
config.update_config(master_config)
from PipeLineFunctionHelpers import ClipT1ImageWithBrainMask
from .WorkupT1T2BRAINSCut import CreateBRAINSCutWorkflow
from utilities.distributed import modify_qsub_args
from nipype.interfaces.semtools import BRAINSSnapShotWriter
# CLUSTER_QUEUE=master_config['queue']
CLUSTER_QUEUE_LONG = master_config['long_q']
baw200 = pe.Workflow(name=pipeline_name)
# HACK: print for debugging
for key, itme in list(master_config.items()):
print(("-" * 30))
print((key, ":", itme))
print(("-" * 30))
# END HACK
inputsSpec = pe.Node(interface=IdentityInterface(fields=['t1_average',
't2_average',
'template_t1',
'hncma_atlas',
'LMIatlasToSubject_tx',
'inputLabels',
'inputHeadLabels',
'posteriorImages',
'UpdatedPosteriorsList',
'atlasToSubjectRegistrationState',
'rho',
'phi',
'theta',
'l_caudate_ProbabilityMap',
'r_caudate_ProbabilityMap',
'l_hippocampus_ProbabilityMap',
'r_hippocampus_ProbabilityMap',
'l_putamen_ProbabilityMap',
'r_putamen_ProbabilityMap',
'l_thalamus_ProbabilityMap',
'r_thalamus_ProbabilityMap',
'l_accumben_ProbabilityMap',
'r_accumben_ProbabilityMap',
'l_globus_ProbabilityMap',
'r_globus_ProbabilityMap',
'trainModelFile_txtD0060NT0060_gz',
]),
run_without_submitting=True, name='inputspec')
# outputsSpec = pe.Node(interface=IdentityInterface(fields=[...]),
# run_without_submitting=True, name='outputspec')
currentClipT1ImageWithBrainMaskName = 'ClipT1ImageWithBrainMask_' + str(subjectid) + "_" + str(sessionid)
ClipT1ImageWithBrainMaskNode = pe.Node(interface=Function(function=ClipT1ImageWithBrainMask,
input_names=['t1_image', 'brain_labels',
'clipped_file_name'],
output_names=['clipped_file']),
name=currentClipT1ImageWithBrainMaskName)
ClipT1ImageWithBrainMaskNode.inputs.clipped_file_name = 'clipped_from_BABC_labels_t1.nii.gz'
baw200.connect([(inputsSpec, ClipT1ImageWithBrainMaskNode, [('t1_average', 't1_image'),
('inputLabels', 'brain_labels')])])
currentA2SantsRegistrationPostABCSyN = 'A2SantsRegistrationPostABCSyN_' + str(subjectid) + "_" + str(sessionid)
## TODO: It would be great to update the BRAINSABC atlasToSubjectTransform at this point, but
## That requires more testing, and fixes to ANTS to properly collapse transforms.
## For now we are simply creating a dummy node to pass through
A2SantsRegistrationPostABCSyN = pe.Node(interface=ants.Registration(), name=currentA2SantsRegistrationPostABCSyN)
many_cpu_ANTsSyN_options_dictionary = {'qsub_args': modify_qsub_args(CLUSTER_QUEUE_LONG, 8, 8, 16),
'overwrite': True}
A2SantsRegistrationPostABCSyN.plugin_args = many_cpu_ANTsSyN_options_dictionary
CommonANTsRegistrationSettings(
antsRegistrationNode=A2SantsRegistrationPostABCSyN,
registrationTypeDescription="A2SantsRegistrationPostABCSyN",
output_transform_prefix='AtlasToSubjectPostBABC_SyN',
output_warped_image='atlas2subjectPostBABC.nii.gz',
output_inverse_warped_image='subject2atlasPostBABC.nii.gz',
save_state='SavedInternalSyNStatePostBABC.h5',
invert_initial_moving_transform=False,
initial_moving_transform=None)
## TODO: Try multi-modal registration here
baw200.connect([(inputsSpec, A2SantsRegistrationPostABCSyN, [('atlasToSubjectRegistrationState', 'restore_state'),
('t1_average', 'fixed_image'),
('template_t1', 'moving_image')])
])
myLocalSegWF = CreateBRAINSCutWorkflow(projectid,
subjectid,
sessionid,
master_config['queue'],
#.........这里部分代码省略.........
示例7: Workflow
######################
# WF
######################
wd_dir = '/scr/kansas1/data/lsd-lemon/lemon_wd_meanDist_%s' % distype
ds_dir = '/scr/kansas1/data/lsd-lemon/lemon_results_meanDist_%s' % distype
wf = Workflow(name='distconnect_meanDist_%s' % distype)
wf.base_dir = os.path.join(wd_dir)
nipype_cfg = dict(logging=dict(workflow_level='DEBUG'), execution={'stop_on_first_crash': False,
'remove_unnecessary_outputs': False,
'job_finished_timeout': 120})
config.update_config(nipype_cfg)
wf.config['execution']['crashdump_dir'] = os.path.join(wd_dir, 'crash')
ds = Node(nio.DataSink(base_directory=ds_dir), name='ds')
######################
# GET DATA
######################
# SUBJECTS ITERATOR
subjects_infosource = Node(util.IdentityInterface(fields=['subject_id']), name='subjects_infosource')
subjects_infosource.iterables = ('subject_id', subjects_list)
run_mean_dist = Node(util.Function(input_names=['sub'],
output_names=[],
示例8: learning_predict_data_2samp_wf
def learning_predict_data_2samp_wf(working_dir,
ds_dir,
in_data_name_list,
subjects_selection_crit_dict,
subjects_selection_crit_names_list,
aggregated_subjects_dir,
target_list,
use_n_procs,
plugin_name,
confound_regression=[False, True],
run_cv=False,
n_jobs_cv=1,
run_tuning=False,
run_2sample_training=False,
aggregated_subjects_dir_nki=None,
subjects_selection_crit_dict_nki=None,
subjects_selection_crit_name_nki=None,
reverse_split=False,
random_state_nki=666,
run_learning_curve=False,
life_test_size=0.5):
import os
from nipype import config
from nipype.pipeline.engine import Node, Workflow
import nipype.interfaces.utility as util
import nipype.interfaces.io as nio
from itertools import chain
from learning_utils import aggregate_multimodal_metrics_fct, run_prediction_split_fct, \
backproject_and_split_weights_fct, select_subjects_fct, select_multimodal_X_fct, learning_curve_plot
import pandas as pd
###############################################################################################################
# GENERAL SETTINGS
wf = Workflow(name='learning_predict_data_2samp_wf')
wf.base_dir = os.path.join(working_dir)
nipype_cfg = dict(logging=dict(workflow_level='DEBUG'),
execution={'stop_on_first_crash': False,
'remove_unnecessary_outputs': False,
'job_finished_timeout': 120})
config.update_config(nipype_cfg)
wf.config['execution']['crashdump_dir'] = os.path.join(working_dir, 'crash')
ds = Node(nio.DataSink(), name='ds')
ds.inputs.base_directory = os.path.join(ds_dir, 'group_learning_prepare_data')
ds.inputs.regexp_substitutions = [
# ('subject_id_', ''),
('_parcellation_', ''),
('_bp_freqs_', 'bp_'),
('_extraction_method_', ''),
('_subject_id_[A0-9]*/', '')
]
ds_pdf = Node(nio.DataSink(), name='ds_pdf')
ds_pdf.inputs.base_directory = os.path.join(ds_dir, 'pdfs')
ds_pdf.inputs.parameterization = False
###############################################################################################################
# ensure in_data_name_list is list of lists
in_data_name_list = [i if type(i) == list else [i] for i in in_data_name_list]
in_data_name_list_unique = list(set(chain.from_iterable(in_data_name_list)))
###############################################################################################################
# SET ITERATORS
in_data_name_infosource = Node(util.IdentityInterface(fields=['in_data_name']), name='in_data_name_infosource')
in_data_name_infosource.iterables = ('in_data_name', in_data_name_list_unique)
multimodal_in_data_name_infosource = Node(util.IdentityInterface(fields=['multimodal_in_data_name']),
name='multimodal_in_data_name_infosource')
multimodal_in_data_name_infosource.iterables = ('multimodal_in_data_name', in_data_name_list)
subject_selection_infosource = Node(util.IdentityInterface(fields=['selection_criterium']),
name='subject_selection_infosource')
subject_selection_infosource.iterables = ('selection_criterium', subjects_selection_crit_names_list)
target_infosource = Node(util.IdentityInterface(fields=['target_name']), name='target_infosource')
target_infosource.iterables = ('target_name', target_list)
###############################################################################################################
# COMPILE LIFE DATA
###############################################################################################################
###############################################################################################################
# GET INFO AND SELECT FILES
df_all_subjects_pickle_file = os.path.join(aggregated_subjects_dir, 'df_all_subjects_pickle_file/df_all.pkl')
df = pd.read_pickle(df_all_subjects_pickle_file)
# build lookup dict for unimodal data
X_file_template = 'X_file/_in_data_name_{in_data_name}/vectorized_aggregated_data.npy'
info_file_template = 'unimodal_backprojection_info_file/_in_data_name_{in_data_name}/unimodal_backprojection_info.pkl'
#.........这里部分代码省略.........
示例9: open
wf.config['execution'] = {'hash_method': 'timestamp', 'crashdump_dir': os.path.abspath(c.crashLogDirectory)}
log_dir = os.path.join(c.outputDirectory, 'logs', 'group_analysis', resource, 'model_%s' % (os.path.basename(model)))
try:
os.makedirs(log_dir)
except:
print "log_dir already exist"
# enable logging
from nipype import config
from nipype import logging
config.update_config({'logging': {'log_directory': log_dir,
'log_to_file': True}})
# Temporarily disable until solved
#logging.update_logging(config)
iflogger = logging.getLogger('interface')
group_sublist = open(subject_list, 'r')
#print >>diag, "> Opened subject list: ", subject_list
#print >>diag, ""
sublist_items = group_sublist.readlines()
input_subject_list = [line.rstrip('\n') for line in sublist_items \
示例10: RunSubjectWorkflow
def RunSubjectWorkflow(args):
"""
.-----------.
--- | Session 1 | ---> /project/subjectA/session1/phase/
/ *-----------*
.-----------. /
| Subject A | <
*-----------* \
\ .-----------.
--- | Session 2 | ---> /project/subjectA/session2/phase/
*-----------*
**** Replaces WorkflowT1T2.py ****
"""
database, start_time, subject, master_config = args
assert 'baseline' in master_config['components'] or 'longitudinal' in master_config['components'], "Baseline or Longitudinal is not in WORKFLOW_COMPONENTS!"
# HACK:
# To avoid a "sqlite3.ProgrammingError: Base Cursor.__init__ not called" error
# using multiprocessing.map_async(), re-instantiate database
# database.__init__(defaultDBName=database.dbName, subject_list=database.subjectList)
#
# END HACK
import time
from nipype import config, logging
config.update_config(master_config) # Set universal pipeline options
assert config.get('execution', 'plugin') == master_config['execution']['plugin']
# DEBUG
# config.enable_debug_mode()
# config.set('execution', 'stop_on_first_rerun', 'true')
# END DEBUG
logging.update_logging(config)
import nipype.pipeline.engine as pe
import nipype.interfaces.base as nbase
import nipype.interfaces.io as nio
from nipype.interfaces.utility import IdentityInterface, Function
import traits
from baw_exp import OpenSubjectDatabase
from SessionDB import SessionDB
from PipeLineFunctionHelpers import convertToList
from atlasNode import MakeAtlasNode
from utilities.misc import GenerateSubjectOutputPattern as outputPattern
from utilities.misc import GenerateWFName
while time.time() < start_time:
time.sleep(start_time - time.time() + 1)
print "Delaying start for {subject}".format(subject=subject)
print("===================== SUBJECT: {0} ===========================".format(subject))
subjectWorkflow = pe.Workflow(name="BAW_StandardWorkup_subject_{0}".format(subject))
subjectWorkflow.base_dir = config.get('logging', 'log_directory')
# subjectWorkflow.config['execution']['plugin'] = 'Linear' # Hardcodeded in WorkupT1T2.py - why?
# DEBUG
# subjectWorkflow.config['execution']['stop_on_first_rerun'] = 'true'
# END DEBUG
atlasNode = MakeAtlasNode(master_config['atlascache'], 'BAtlas')
sessionWorkflow = dict()
inputsSpec = dict()
sessions = database.getSessionsFromSubject(subject)
# print "These are the sessions: ", sessions
if 'baseline' in master_config['components']:
current_phase = 'baseline'
from baseline import create_baseline as create_wkfl
elif 'longitudinal' in master_config['components']:
current_phase = 'longitudinal'
from longitudinal import create_longitudial as create_wkfl
for session in sessions: # TODO (future): Replace with iterable inputSpec node and add Function node for getAllFiles()
project = database.getProjFromSession(session)
pname = "{0}_{1}".format(session, current_phase) # Long node names make graphs a pain to read/print
# pname = GenerateWFName(project, subject, session, current_phase)
print "Building session pipeline for {0}".format(session)
inputsSpec[session] = pe.Node(name='inputspec_{0}'.format(session),
interface=IdentityInterface(fields=['T1s', 'T2s', 'PDs', 'FLs', 'OTs']))
inputsSpec[session].inputs.T1s = database.getFilenamesByScantype(session, ['T1-15', 'T1-30'])
inputsSpec[session].inputs.T2s = database.getFilenamesByScantype(session, ['T2-15', 'T2-30'])
inputsSpec[session].inputs.PDs = database.getFilenamesByScantype(session, ['PD-15', 'PD-30'])
inputsSpec[session].inputs.FLs = database.getFilenamesByScantype(session, ['FL-15', 'FL-30'])
inputsSpec[session].inputs.OTs = database.getFilenamesByScantype(session, ['OTHER-15', 'OTHER-30'])
sessionWorkflow[session] = create_wkfl(project, subject, session, master_config,
interpMode='Linear', pipeline_name=pname)
subjectWorkflow.connect([(inputsSpec[session], sessionWorkflow[session], [('T1s', 'inputspec.T1s'),
('T2s', 'inputspec.T2s'),
('PDs', 'inputspec.PDs'),
('FLs', 'inputspec.FLs'),
('OTs', 'inputspec.OTHERs'),
]),
(atlasNode, sessionWorkflow[session], [('template_landmarks_50Lmks_fcsv',
'inputspec.atlasLandmarkFilename'),
('template_weights_50Lmks_wts',
'inputspec.atlasWeightFilename'),
('LLSModel_50Lmks_hdf5', 'inputspec.LLSModel'),
('T1_50Lmks_mdl', 'inputspec.inputTemplateModel')]),
])
if current_phase == 'baseline':
subjectWorkflow.connect([(atlasNode, sessionWorkflow[session], [('template_t1', 'inputspec.template_t1'),
#.........这里部分代码省略.........
示例11: calc_local_metrics
def calc_local_metrics(brain_mask,
preprocessed_data_dir,
subject_id,
parcellations_dict,
bp_freq_list,
TR,
selectfiles_templates,
working_dir,
ds_dir,
use_n_procs,
plugin_name):
import os
from nipype import config
from nipype.pipeline.engine import Node, Workflow, MapNode
import nipype.interfaces.utility as util
import nipype.interfaces.io as nio
import nipype.interfaces.fsl as fsl
from nipype.interfaces.freesurfer.preprocess import MRIConvert
import CPAC.alff.alff as cpac_alff
import CPAC.reho.reho as cpac_reho
import CPAC.utils.utils as cpac_utils
import utils as calc_metrics_utils
from motion import calculate_FD_P, calculate_FD_J
#####################################
# GENERAL SETTINGS
#####################################
fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
wf = Workflow(name='LeiCA_LIFE_metrics')
wf.base_dir = os.path.join(working_dir)
nipype_cfg = dict(logging=dict(workflow_level='DEBUG'), execution={'stop_on_first_crash': True,
'remove_unnecessary_outputs': True,
'job_finished_timeout': 15})
config.update_config(nipype_cfg)
wf.config['execution']['crashdump_dir'] = os.path.join(working_dir, 'crash')
ds = Node(nio.DataSink(base_directory=ds_dir), name='ds')
ds.inputs.regexp_substitutions = [('MNI_resampled_brain_mask_calc.nii.gz', 'falff.nii.gz'),
('residual_filtered_3dT.nii.gz', 'alff.nii.gz'),
('_parcellation_', ''),
('_bp_freqs_', 'bp_'),
]
#####################
# ITERATORS
#####################
# PARCELLATION ITERATOR
parcellation_infosource = Node(util.IdentityInterface(fields=['parcellation']), name='parcellation_infosource')
parcellation_infosource.iterables = ('parcellation', parcellations_dict.keys())
# BP FILTER ITERATOR
bp_filter_infosource = Node(util.IdentityInterface(fields=['bp_freqs']), name='bp_filter_infosource')
bp_filter_infosource.iterables = ('bp_freqs', bp_freq_list)
selectfiles = Node(nio.SelectFiles(selectfiles_templates,
base_directory=preprocessed_data_dir),
name='selectfiles')
selectfiles.inputs.subject_id = subject_id
# #####################
# # FIX TR IN HEADER
# #####################
# tr_msec = int(TR * 1000)
# tr_str = '-tr %s' % tr_msec
#
# fixed_tr_bp = Node(MRIConvert(out_type='niigz', args=tr_str), name='fixed_tr_bp')
# wf.connect(selectfiles, 'epi_MNI_bp', fixed_tr_bp, 'in_file')
#
# fixed_tr_fullspectrum = Node(MRIConvert(out_type='niigz', args=tr_str), name='fixed_tr_fullspectrum')
# wf.connect(selectfiles, 'epi_MNI_fullspectrum', fixed_tr_fullspectrum, 'in_file')
#####################
# calc FD
#####################
FD_P = Node(util.Function(input_names=['in_file'],
output_names=['FD_ts_file', 'mean_FD_file', 'max_FD_file'],
function=calculate_FD_P),
name='FD_P')
wf.connect(selectfiles, 'moco_parms_file', FD_P, 'in_file')
wf.connect(FD_P, 'FD_ts_file', ds, '[email protected]')
wf.connect(FD_P, 'mean_FD_file', ds, '[email protected]_FD')
wf.connect(FD_P, 'max_FD_file', ds, '[email protected]_FD')
FD_J = Node(util.Function(input_names=['in_file'],
output_names=['FD_ts_file', 'mean_FD_file', 'max_FD_file'],
function=calculate_FD_J),
name='FD_J')
wf.connect(selectfiles, 'jenkinson_file', FD_J, 'in_file')
wf.connect(FD_J, 'FD_ts_file', ds, '[email protected]_J')
wf.connect(FD_J, 'mean_FD_file', ds, '[email protected]_FD_J')
#.........这里部分代码省略.........
示例12: init_mriqc
def init_mriqc(opts, retval):
"""Build the workflow enumerator"""
from bids.grabbids import BIDSLayout
from nipype import config as ncfg
from nipype.pipeline.engine import Workflow
from ..utils.bids import collect_bids_data
from ..workflows.core import build_workflow
retval['workflow'] = None
retval['plugin_settings'] = None
# Build settings dict
bids_dir = Path(opts.bids_dir).expanduser()
output_dir = Path(opts.output_dir).expanduser()
# Number of processes
n_procs = opts.n_procs or cpu_count()
settings = {
'bids_dir': bids_dir.resolve(),
'output_dir': output_dir.resolve(),
'work_dir': opts.work_dir.expanduser().resolve(),
'write_graph': opts.write_graph,
'n_procs': n_procs,
'testing': opts.testing,
'hmc_afni': opts.hmc_afni,
'hmc_fsl': opts.hmc_fsl,
'fft_spikes_detector': opts.fft_spikes_detector,
'ants_nthreads': opts.ants_nthreads,
'ants_float': opts.ants_float,
'verbose_reports': opts.verbose_reports or opts.testing,
'float32': opts.float32,
'ica': opts.ica,
'no_sub': opts.no_sub,
'email': opts.email,
'fd_thres': opts.fd_thres,
'webapi_url': opts.webapi_url,
'webapi_port': opts.webapi_port,
'upload_strict': opts.upload_strict,
}
if opts.hmc_afni:
settings['deoblique'] = opts.deoblique
settings['despike'] = opts.despike
settings['correct_slice_timing'] = opts.correct_slice_timing
if opts.start_idx:
settings['start_idx'] = opts.start_idx
if opts. stop_idx:
settings['stop_idx'] = opts.stop_idx
if opts.ants_settings:
settings['ants_settings'] = opts.ants_settings
if opts.dsname:
settings['dataset_name'] = opts.dsname
log_dir = settings['output_dir'] / 'logs'
# Create directories
log_dir.mkdir(parents=True, exist_ok=True)
settings['work_dir'].mkdir(parents=True, exist_ok=True)
# Set nipype config
ncfg.update_config({
'logging': {'log_directory': str(log_dir), 'log_to_file': True},
'execution': {
'crashdump_dir': str(log_dir), 'crashfile_format': 'txt',
'resource_monitor': opts.profile},
})
# Plugin configuration
plugin_settings = {}
if n_procs == 1:
plugin_settings['plugin'] = 'Linear'
if settings['ants_nthreads'] == 0:
settings['ants_nthreads'] = 1
else:
plugin_settings['plugin'] = 'MultiProc'
plugin_settings['plugin_args'] = {'n_procs': n_procs}
if opts.mem_gb:
plugin_settings['plugin_args']['memory_gb'] = opts.mem_gb
if settings['ants_nthreads'] == 0:
# always leave one extra thread for non ANTs work,
# don't use more than 8 threads - the speed up is minimal
settings['ants_nthreads'] = min(settings['n_procs'] - 1, 8)
# Overwrite options if --use-plugin provided
if opts.use_plugin and opts.use_plugin.exists():
from yaml import load as loadyml
with opts.use_plugin.open() as pfile:
plugin_settings.update(loadyml(pfile))
# Process data types
modalities = opts.modalities
layout = BIDSLayout(str(settings['bids_dir']),
#.........这里部分代码省略.........
示例13: process
def process(self):
# Process time
now = datetime.datetime.now().strftime("%Y%m%d_%H%M")
# Initialization
if os.path.exists(os.path.join(self.base_directory,"LOG","pypeline.log")):
os.unlink(os.path.join(self.base_directory,"LOG","pypeline.log"))
config.update_config({'logging': {'log_directory': os.path.join(self.base_directory,"LOG"),
'log_to_file': True},
'execution': {'remove_unnecessary_outputs': False}
})
logging.update_logging(config)
iflogger = logging.getLogger('interface')
# Data import
datasource = pe.Node(interface=nio.DataGrabber(outfields = ['fMRI','T1','T2']), name='datasource')
datasource.inputs.base_directory = os.path.join(self.base_directory,'NIFTI')
datasource.inputs.template = '*'
datasource.inputs.raise_on_empty = False
datasource.inputs.field_template = dict(fMRI='fMRI.nii.gz',T1='T1.nii.gz',T2='T2.nii.gz')
datasource.inputs.sort_filelist=False
# Data sinker for output
sinker = pe.Node(nio.DataSink(), name="fMRI_sinker")
sinker.inputs.base_directory = os.path.join(self.base_directory, "RESULTS")
# Clear previous outputs
self.clear_stages_outputs()
# Create common_flow
common_flow = self.create_common_flow()
# Create fMRI flow
fMRI_flow = pe.Workflow(name='fMRI_pipeline')
fMRI_inputnode = pe.Node(interface=util.IdentityInterface(fields=["fMRI","T1","T2","subjects_dir","subject_id","wm_mask_file","roi_volumes","wm_eroded","brain_eroded","csf_eroded","parcellation_scheme","atlas_info"]),name="inputnode")
fMRI_outputnode = pe.Node(interface=util.IdentityInterface(fields=["connectivity_matrices"]),name="outputnode")
fMRI_flow.add_nodes([fMRI_inputnode,fMRI_outputnode])
if self.stages['Preprocessing'].enabled:
preproc_flow = self.create_stage_flow("Preprocessing")
fMRI_flow.connect([
(fMRI_inputnode,preproc_flow,[("fMRI","inputnode.functional")]),
])
if self.stages['Registration'].enabled:
reg_flow = self.create_stage_flow("Registration")
fMRI_flow.connect([
(fMRI_inputnode,reg_flow,[('T1','inputnode.T1')]),(fMRI_inputnode,reg_flow,[('T2','inputnode.T2')]),
(preproc_flow,reg_flow, [('outputnode.mean_vol','inputnode.target')]),
(fMRI_inputnode,reg_flow, [('wm_mask_file','inputnode.wm_mask'),('roi_volumes','inputnode.roi_volumes'),
('wm_eroded','inputnode.eroded_wm')])
])
if self.stages['Functional'].config.global_nuisance:
fMRI_flow.connect([
(fMRI_inputnode,reg_flow,[('brain_eroded','inputnode.eroded_brain')])
])
if self.stages['Functional'].config.csf:
fMRI_flow.connect([
(fMRI_inputnode,reg_flow,[('csf_eroded','inputnode.eroded_csf')])
])
if self.stages['Registration'].config.registration_mode == "BBregister (FS)":
fMRI_flow.connect([
(fMRI_inputnode,reg_flow, [('subjects_dir','inputnode.subjects_dir'),
('subject_id','inputnode.subject_id')]),
])
if self.stages['Functional'].enabled:
func_flow = self.create_stage_flow("Functional")
fMRI_flow.connect([
(preproc_flow,func_flow, [('outputnode.functional_preproc','inputnode.preproc_file')]),
(reg_flow,func_flow, [('outputnode.wm_mask_registered','inputnode.registered_wm'),('outputnode.roi_volumes_registered','inputnode.registered_roi_volumes'),
('outputnode.eroded_wm_registered','inputnode.eroded_wm'),('outputnode.eroded_csf_registered','inputnode.eroded_csf'),
('outputnode.eroded_brain_registered','inputnode.eroded_brain')])
])
if self.stages['Functional'].config.scrubbing or self.stages['Functional'].config.motion:
fMRI_flow.connect([
(preproc_flow,func_flow,[("outputnode.par_file","inputnode.motion_par_file")])
])
if self.stages['Connectome'].enabled:
con_flow = self.create_stage_flow("Connectome")
fMRI_flow.connect([
(fMRI_inputnode,con_flow, [('parcellation_scheme','inputnode.parcellation_scheme')]),
(func_flow,con_flow, [('outputnode.func_file','inputnode.func_file'),("outputnode.FD","inputnode.FD"),
("outputnode.DVARS","inputnode.DVARS")]),
(reg_flow,con_flow,[("outputnode.roi_volumes_registered","inputnode.roi_volumes_registered")]),
(con_flow,fMRI_outputnode,[("outputnode.connectivity_matrices","connectivity_matrices")])
])
if self.stages['Parcellation'].config.parcellation_scheme == "Custom":
fMRI_flow.connect([(fMRI_inputnode,con_flow, [('atlas_info','inputnode.atlas_info')])])
# Create NIPYPE flow
flow = pe.Workflow(name='NIPYPE', base_dir=os.path.join(self.base_directory))
flow.connect([
(datasource,common_flow,[("T1","inputnode.T1")]),
(datasource,fMRI_flow,[("fMRI","inputnode.fMRI"),("T1","inputnode.T1"),("T2","inputnode.T2")]),
#.........这里部分代码省略.........
示例14: create_workflow
#.........这里部分代码省略.........
oasis_path : string
filepath to the oasis
Returns
-------
wf : nipype.pipeline.engine.Workflow instance
the workflow to be ran for preprocessing
'''
# Import packages
from act_interface import antsCorticalThickness
import nipype.interfaces.io as nio
import nipype.pipeline.engine as pe
import nipype.interfaces.utility as util
from nipype.interfaces.utility import Function
from nipype import logging as np_logging
from nipype import config
import os
# Init variables
oasis_trt_20 = os.path.join(oasis_path,
'OASIS-TRT-20_jointfusion_DKT31_CMA_labels_in_OASIS-30.nii')
# Setup nipype workflow
if not os.path.exists(wf_base_dir):
os.makedirs(wf_base_dir)
wf = pe.Workflow(name='thickness_workflow')
wf.base_dir = wf_base_dir
# Init log directory
log_dir = wf_base_dir
# Define antsCorticalThickness node
thickness = pe.Node(antsCorticalThickness(), name='thickness')
# Set antsCorticalThickness inputs
thickness.inputs.dimension = 3
thickness.inputs.segmentation_iterations = 1
thickness.inputs.segmentation_weight = 0.25
thickness.inputs.input_skull = input_anat #-a
thickness.inputs.template = oasis_path + 'T_template0.nii.gz' #-e
thickness.inputs.brain_prob_mask = oasis_path + \
'T_template0_BrainCerebellumProbabilityMask.nii.gz' #-m
thickness.inputs.brain_seg_priors = oasis_path + \
'Priors2/priors%d.nii.gz' #-p
thickness.inputs.intensity_template = oasis_path + \
'T_template0_BrainCerebellum.nii.gz' #-t
thickness.inputs.extraction_registration_mask = oasis_path + \
'T_template0_BrainCerebellumExtractionMask.nii.gz' #-f
thickness.inputs.out_prefix = 'OUTPUT_' #-o
thickness.inputs.keep_intermediate_files = 0 #-k
# Node to run ANTs 3dROIStats
ROIstats = pe.Node(util.Function(input_names=['mask','thickness_normd'],
output_names=['roi_stats_file'],
function=roi_func),
name='ROIstats')
wf.connect(thickness, 'cortical_thickness_normalized',
ROIstats, 'thickness_normd')
ROIstats.inputs.mask = oasis_trt_20
# Create datasink node
datasink = pe.Node(nio.DataSink(), name='sinker')
datasink.inputs.base_directory = wf_base_dir
# Connect thickness outputs to datasink
wf.connect(thickness, 'brain_extraction_mask',
datasink, '[email protected]_extr_mask')
wf.connect(thickness, 'brain_segmentation',
datasink, '[email protected]_seg')
wf.connect(thickness, 'brain_segmentation_N4',
datasink, '[email protected]_seg_N4')
wf.connect(thickness, 'brain_segmentation_posteriors_1',
datasink, '[email protected]_seg_post_1')
wf.connect(thickness, 'brain_segmentation_posteriors_2',
datasink, '[email protected]_seg_post_2')
wf.connect(thickness, 'brain_segmentation_posteriors_3',
datasink, '[email protected]_seg_post_3')
wf.connect(thickness, 'brain_segmentation_posteriors_4',
datasink, '[email protected]_seg_post_4')
wf.connect(thickness, 'brain_segmentation_posteriors_5',
datasink, '[email protected]_seg_post_5')
wf.connect(thickness, 'brain_segmentation_posteriors_6',
datasink, '[email protected]_seg_post_6')
wf.connect(thickness, 'cortical_thickness',
datasink, '[email protected]_thickness')
wf.connect(thickness, 'cortical_thickness_normalized',
datasink,'[email protected]_thickness_normalized')
# Connect ROI stats output text file to datasink
wf.connect(ROIstats, 'roi_stats_file', datasink, '[email protected]')
# Setup crashfile directory and logging
wf.config['execution'] = {'hash_method': 'timestamp',
'crashdump_dir': '/home/ubuntu/crashes'}
config.update_config({'logging': {'log_directory': log_dir,
'log_to_file': True}})
np_logging.update_logging(config)
# Return the workflow
return wf
示例15: main
def main():
"""Entry point"""
parser = ArgumentParser(description='MRI Quality Control',
formatter_class=RawTextHelpFormatter)
g_input = parser.add_argument_group('Inputs')
g_input.add_argument('-B', '--bids-root', action='store', default=os.getcwd())
g_input.add_argument('-i', '--input-folder', action='store')
g_input.add_argument('-S', '--subject-id', nargs='*', action='store')
g_input.add_argument('-s', '--session-id', action='store')
g_input.add_argument('-r', '--run-id', action='store')
g_input.add_argument('-d', '--data-type', action='store', nargs='*',
choices=['anat', 'func'], default=['anat', 'func'])
g_input.add_argument('-v', '--version', action='store_true', default=False,
help='Show current mriqc version')
g_input.add_argument('--nthreads', action='store', default=0,
type=int, help='number of threads')
g_input.add_argument('--write-graph', action='store_true', default=False,
help='Write workflow graph.')
g_input.add_argument('--test-run', action='store_true', default=False,
help='Do not run the workflow.')
g_input.add_argument('--use-plugin', action='store', default=None,
help='nipype plugin configuration file')
g_input.add_argument('--save-memory', action='store_true', default=False,
help='Save as much memory as possible')
g_input.add_argument('--hmc-afni', action='store_true', default=False,
help='Use ANFI 3dvolreg for head motion correction (HMC) and '
'frame displacement (FD) estimation')
g_input.add_argument('--ants-settings', action='store',
help='path to JSON file with settings for ANTS')
g_outputs = parser.add_argument_group('Outputs')
g_outputs.add_argument('-o', '--output-dir', action='store')
g_outputs.add_argument('-w', '--work-dir', action='store', default=op.join(os.getcwd(), 'work'))
opts = parser.parse_args()
bids_root = op.abspath(opts.bids_root)
if opts.input_folder is not None:
warn('The --input-folder flag is deprecated, please use -B instead', DeprecationWarning)
if bids_root == os.getcwd():
bids_root = op.abspath(opts.input_folder)
if opts.version:
print('mriqc version ' + __version__)
exit(0)
settings = {'bids_root': bids_root,
'output_dir': os.getcwd(),
'write_graph': opts.write_graph,
'save_memory': opts.save_memory,
'hmc_afni': opts.hmc_afni,
'nthreads': opts.nthreads}
if opts.output_dir:
settings['output_dir'] = op.abspath(opts.output_dir)
if not op.exists(settings['output_dir']):
os.makedirs(settings['output_dir'])
settings['work_dir'] = op.abspath(opts.work_dir)
with LockFile(settings['work_dir']):
if not op.exists(settings['work_dir']):
os.makedirs(settings['work_dir'])
if opts.ants_settings:
settings['ants_settings'] = opts.ants_settings
log_dir = op.join(settings['work_dir'] + '_log')
if not op.exists(log_dir):
os.makedirs(log_dir)
# Set nipype config
ncfg.update_config({
'logging': {'log_directory': log_dir, 'log_to_file': True},
'execution': {'crashdump_dir': log_dir}
})
plugin_settings = {'plugin': 'Linear'}
if opts.use_plugin is not None:
from yaml import load as loadyml
with open(opts.use_plugin) as pfile:
plugin_settings = loadyml(pfile)
else:
# Setup multiprocessing
if settings['nthreads'] == 0:
settings['nthreads'] = cpu_count()
if settings['nthreads'] > 1:
plugin_settings['plugin'] = 'MultiProc'
plugin_settings['plugin_args'] = {'n_procs': settings['nthreads']}
for dtype in opts.data_type:
ms_func = getattr(mwc, 'ms_' + dtype)
workflow = ms_func(subject_id=opts.subject_id, session_id=opts.session_id,
#.........这里部分代码省略.........