本文整理汇总了Python中nipype.config.enable_debug_mode函数的典型用法代码示例。如果您正苦于以下问题:Python enable_debug_mode函数的具体用法?Python enable_debug_mode怎么用?Python enable_debug_mode使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了enable_debug_mode函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: check_mask_coverage
def check_mask_coverage(epi,brainmask):
from os.path import abspath
from nipype import config, logging
config.enable_debug_mode()
logging.update_logging(config)
from nilearn import plotting
from nipype.interfaces.nipy.preprocess import Trim
trim = Trim()
trim.inputs.in_file = epi
trim.inputs.end_index = 1
trim.inputs.out_file = 'epi_vol1.nii.gz'
trim.run()
epi_vol = abspath('epi_vol1.nii.gz')
maskcheck_filename='maskcheck.png'
display = plotting.plot_anat(epi_vol, display_mode='ortho',
draw_cross=False,
title = 'brainmask coverage')
display.add_contours(brainmask,levels=[.5], colors='r')
display.savefig(maskcheck_filename)
display.close()
maskcheck_file = abspath(maskcheck_filename)
return(maskcheck_file)
示例2: sort_pes
def sort_pes(pes):
from nipype import config, logging
from nipype.interfaces.fsl import Merge
from os.path import abspath
config.enable_debug_mode()
logging.update_logging(config)
print(pes)
pe1s = []
pe0s = []
for file in pes:
if 'pe0' in file:
pe0s.append(file)
elif 'pe1' in file:
pe1s.append(file)
pe1s = sorted(pe1s)
pe0s = sorted(pe0s)
me = Merge()
merged_pes = []
for i in range(0,len(pe1s)):
num=pe1s[i][-12:-11]
me.inputs.in_files = [pe1s[i],pe0s[i]]
me.inputs.dimension='t'
me.inputs.merged_file = 'merged_pes%s.nii.gz' % num
me.run()
file = abspath('merged_pes%s.nii.gz' % num)
merged_pes.append(file)
return(merged_pes)
示例3: run
def run(args):
"""Get and process specific information"""
project = gather_project_info()
exp = gather_experiment_info(args.experiment, args.model)
# Subject is always highest level of parameterization
subject_list = determine_subjects(args.subjects)
subj_source = make_subject_source(subject_list)
# Get the full correct name for the experiment
if args.experiment is None:
exp_name = project["default_exp"]
else:
exp_name = args.experiment
exp['exp_name'] = exp_name
exp['model_name'] = args.model if args.model else ''
# Set roots of output storage
project['analysis_dir'] = op.join(project["analysis_dir"], exp_name)
project['working_dir'] = op.join(project["working_dir"], exp_name,
exp['model_name'])
config.set("execution", "crashdump_dir", project["crash_dir"])
if args.verbose > 0:
config.set("logging", "filemanip_level", 'DEBUG')
config.enable_debug_mode()
logging.update_logging(config)
if not op.exists(project['analysis_dir']):
os.makedirs(project['analysis_dir'])
workflows_dir = os.path.join(os.environ['FITZ_DIR'], exp['pipeline'],
'workflows')
if not op.isdir(workflows_dir):
missing_pipe = 'raise'
if missing_pipe == 'install':
install(args)
else:
raise IOError("Run `fitz install` to set up your pipeline of "
"workflows, %s does not exist." % workflows_dir)
sys.path.insert(0, workflows_dir)
for wf_name in args.workflows:
try:
mod = imp.find_module(wf_name)
wf_module = imp.load_module("wf", *mod)
except (IOError, ImportError):
print "Could not find any workflows matching %s" % wf_name
raise
params = update_params(wf_module, exp)
workflow = wf_module.workflow_manager(
project, params, args, subj_source)
# Run the pipeline
plugin, plugin_args = determine_engine(args)
workflow.write_graph(str(workflow)+'.dot', format='svg')
if not args.dontrun:
workflow.run(plugin, plugin_args)
示例4: setup_environment
def setup_environment(argv):
print("Configuring environment...")
import os
import os.path
from BAW.utilities.configFileParser import resolveDataSinkOption, parseFile
from BAW.utilities.pathHandling import validatePath
from BAW.utilities import misc
from collections import OrderedDict # Need OrderedDict internally to ensure consistent ordering
environment, experiment, pipeline, cluster = parseFile(
argv["--ExperimentConfig"], argv["--pe"], argv["--workphase"])
pipeline['ds_overwrite'] = resolveDataSinkOption(argv, pipeline)
if cluster is None:
print("Running on local")
# raise NotImplementedError("Running local has old code and has not been tested!")
# assert argv["--wfrun"] in argvWFRUN, \
# "wfrun options for clusters can only be given when the configuration file's CLUSTER option == True"
# os.environ['NSLOTS'] = str(misc.get_cpus(argv["--wf_template_runner"]))
else:
load_modules(cluster['modules']) # Load modules if not already done ## MODS PATH
# print os.environ['LOADEDMODULES']
# if environment['virtualenv_dir'] is not None: # MODS PATH
# activate_this = validatePath(
# os.path.join(environment['virtualenv_dir'], 'bin', 'activate_this.py'), False, False)
# if os.path.exists( activate_this ) :
# exec(open(activate_this).read(), OrderedDict(__file__=activate_this))
utilities_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'utilities')
configure_env = validatePath(os.path.join(utilities_path, 'configure_env.py'), False, False)
# Add the AutoWorkup directory to the PYTHONPATH every time - REQUIRED FOR CLUSTER DISPATCHING
environment['env']['PYTHONPATH'] = environment['env']['PYTHONPATH'] + ":" + os.path.dirname(__file__)
exec(open(configure_env).read(), OrderedDict(__file__=__file__,
append_os_path=environment['env']['PATH'],
append_sys_path=environment['env']['PYTHONPATH'])
) # MODS PATH
print(("@" * 80))
print((environment['env']['PYTHONPATH']))
print(("@" * 80))
print((environment['env']['PATH']))
print(("@" * 80))
from nipype import config
config.enable_debug_mode()
# config.enable_provenance()
from BAW.utilities.package_check import verify_packages
verify_packages()
if 'FREESURFER' in experiment['components']: # FREESURFER MODS
configure_FS = validatePath(os.path.join(utilities_path, 'utilities', 'configure_FS.py'), False, False)
exec(open(configure_FS).read(), OrderedDict(FS_VARS=misc.FS_VARS, env=environment['env']))
print("FREESURFER needs to check for sane environment here!") # TODO: raise warning, write method, what???
for key, value in list(environment['env'].items()):
if key in ['PATH', 'PYTHONPATH'] + misc.FS_VARS:
pass
else:
os.environ[key] = value # Do not use os.putenv (see Python documentation)
return environment, experiment, pipeline, cluster
示例5: combine_masks
def combine_masks(mask1,mask2):
from nipype.interfaces.fsl.utils import Merge
from os.path import abspath
from nipype import config, logging
config.enable_debug_mode()
logging.update_logging(config)
vols = []
vols.append(mask1)
vols.append(mask2)
return(vols)
示例6: run_examples
def run_examples(example, pipelines, data_path, plugin=None, rm_base_dir=True):
from nipype import config
from nipype.interfaces.base import CommandLine
if plugin is None:
plugin = 'MultiProc'
print('running example: %s with plugin: %s' % (example, plugin))
config.enable_debug_mode()
config.enable_provenance()
CommandLine.set_default_terminal_output("stream")
plugin_args = {}
if plugin == 'MultiProc':
plugin_args['n_procs'] = int(
os.getenv('NIPYPE_NUMBER_OF_CPUS', cpu_count()))
__import__(example)
for pipeline in pipelines:
wf = getattr(sys.modules[example], pipeline)
wf.base_dir = os.path.join(os.getcwd(), 'output', example, plugin)
results_dir = os.path.join(wf.base_dir, wf.name)
if rm_base_dir and os.path.exists(results_dir):
rmtree(results_dir)
# Handle a logging directory
log_dir = os.path.join(os.getcwd(), 'logs', example)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
wf.config = {
'execution': {
'hash_method': 'timestamp',
'stop_on_first_rerun': 'true',
'write_provenance': 'true',
'poll_sleep_duration': 2
},
'logging': {
'log_directory': log_dir,
'log_to_file': True
}
}
try:
wf.inputs.inputnode.in_data = os.path.abspath(data_path)
except AttributeError:
pass # the workflow does not have inputnode.in_data
wf.run(plugin=plugin, plugin_args=plugin_args)
# run twice to check if nothing is rerunning
wf.run(plugin=plugin)
示例7: convertafni
def convertafni(in_file):
from nipype.interfaces.afni.utils import AFNItoNIFTI
from os import path
from nipype import config, logging
config.enable_debug_mode()
logging.update_logging(config)
cvt = AFNItoNIFTI()
cvt.inputs.in_file = in_file
cvt.inputs.out_file = 'func_filtered.nii.gz'
cvt.run()
out_file = path.abspath('func_filtered.nii.gz')
return(out_file)
示例8: main
def main(argv=None):
import os
import sys
from nipype import config
config.enable_debug_mode()
#-------------------------------- argument parser
import argparse
argParser = argparse.ArgumentParser( description="""****************************
10-cross validation analysis
""")
# workup arguments
argWfGrp = argParser.add_argument_group( 'argWfGrp', """****************************
auto workflow arguments for cross validation
""")
argWfGrp.add_argument('--experimentalConfigurationFile',
help="""experimentalConfigurationFile
Configuration file name with FULL PATH""",
dest='experimentalConfigurationFile', required=True)
argWfGrp.add_argument( '--expDir', help="""expDir
""",
dest='expDir', required=False, default=".")
argWfGrp.add_argument( '--baseDir', help="""baseDir
""",
dest='baseDir', required=False, default=".")
argWfGrp.add_argument( '--runOption', help="""runOption [local/cluster]
""",
dest='runOption', required=False, default="local")
argWfGrp.add_argument( '--PythonBinDir', help="""PythonBinDir [local/cluster]
""",
dest='PythonBinDir', required=False, default="NA")
argWfGrp.add_argument( '--BRAINSToolsSrcDir', help="""BRAINSToolsSrcDir [local/cluster]
""",
dest='BRAINSToolsSrcDir', required=False, default="NA")
argWfGrp.add_argument( '--BRAINSToolsBuildDir', help="""BRAINSToolsBuildDir [local/cluster]
""",
dest='BRAINSToolsBuildDir', required=False, default="NA")
args = argParser.parse_args()
similarityComputeWorkflow(args.expDir,
args.baseDir,
args.experimentalConfigurationFile,
args.runOption,
args.PythonBinDir,
args.BRAINSToolsSrcDir,
args.BRAINSToolsBuildDir)
示例9: test_debug_mode
def test_debug_mode():
from ... import logging
sofc_config = config.get('execution', 'stop_on_first_crash')
ruo_config = config.get('execution', 'remove_unnecessary_outputs')
ki_config = config.get('execution', 'keep_inputs')
wf_config = config.get('logging', 'workflow_level')
if_config = config.get('logging', 'interface_level')
ut_config = config.get('logging', 'utils_level')
wf_level = logging.getLogger('nipype.workflow').level
if_level = logging.getLogger('nipype.interface').level
ut_level = logging.getLogger('nipype.utils').level
config.enable_debug_mode()
# Check config is updated and logging levels, too
assert config.get('execution', 'stop_on_first_crash') == 'true'
assert config.get('execution', 'remove_unnecessary_outputs') == 'false'
assert config.get('execution', 'keep_inputs') == 'true'
assert config.get('logging', 'workflow_level') == 'DEBUG'
assert config.get('logging', 'interface_level') == 'DEBUG'
assert config.get('logging', 'utils_level') == 'DEBUG'
assert logging.getLogger('nipype.workflow').level == 10
assert logging.getLogger('nipype.interface').level == 10
assert logging.getLogger('nipype.utils').level == 10
# Restore config and levels
config.set('execution', 'stop_on_first_crash', sofc_config)
config.set('execution', 'remove_unnecessary_outputs', ruo_config)
config.set('execution', 'keep_inputs', ki_config)
config.set('logging', 'workflow_level', wf_config)
config.set('logging', 'interface_level', if_config)
config.set('logging', 'utils_level', ut_config)
logging.update_logging(config)
assert config.get('execution', 'stop_on_first_crash') == sofc_config
assert config.get('execution', 'remove_unnecessary_outputs') == ruo_config
assert config.get('execution', 'keep_inputs') == ki_config
assert config.get('logging', 'workflow_level') == wf_config
assert config.get('logging', 'interface_level') == if_config
assert config.get('logging', 'utils_level') == ut_config
assert logging.getLogger('nipype.workflow').level == wf_level
assert logging.getLogger('nipype.interface').level == if_level
assert logging.getLogger('nipype.utils').level == ut_level
示例10: combine_par
def combine_par(par_list):
from nipype import config, logging
config.enable_debug_mode()
logging.update_logging(config)
from os.path import abspath
from numpy import vstack, savetxt, genfromtxt
motion = genfromtxt(par_list[0], dtype=float)
if len(par_list)>1:
for file in par_list[1:]:
temp = genfromtxt(par_list[0], dtype=float)
motion=vstack((motion,temp))
filename = 'motion.par'
savetxt(filename, motion, delimiter=' ')
combined_par = abspath(filename)
return(combined_par)
示例11: create_coreg_plot
def create_coreg_plot(epi,anat):
import os
from nipype import config, logging
config.enable_debug_mode()
logging.update_logging(config)
from nilearn import plotting
coreg_filename='coregistration.png'
display = plotting.plot_anat(epi, display_mode='ortho',
draw_cross=False,
title = 'coregistration to anatomy')
display.add_edges(anat)
display.savefig(coreg_filename)
display.close()
coreg_file = os.path.abspath(coreg_filename)
return(coreg_file)
示例12: run_examples
def run_examples(example, pipelines, plugin):
print('running example: %s with plugin: %s' % (example, plugin))
from nipype import config
config.enable_debug_mode()
from nipype.interfaces.base import CommandLine
CommandLine.set_default_terminal_output("stream")
__import__(example)
for pipeline in pipelines:
wf = getattr(sys.modules[example], pipeline)
wf.base_dir = os.path.join(os.getcwd(), 'output', example, plugin)
if os.path.exists(wf.base_dir):
rmtree(wf.base_dir)
wf.config = {'execution': {'hash_method': 'timestamp', 'stop_on_first_rerun': 'true'}}
wf.run(plugin=plugin, plugin_args={'n_procs': 4})
# run twice to check if nothing is rerunning
wf.run(plugin=plugin)
示例13: brightthresh
def brightthresh(func):
import nibabel as nib
from numpy import median, where
from nipype import config, logging
config.enable_debug_mode()
logging.update_logging(config)
func_nifti1 = nib.load(func)
func_data = func_nifti1.get_data()
func_data = func_data.astype(float)
brain_values = where(func_data > 0)
median_thresh = median(brain_values)
bright_thresh = 0.75 * median_thresh
return(bright_thresh)
示例14: combine_fd
def combine_fd(fd_list):
from nipype import config, logging
config.enable_debug_mode()
logging.update_logging(config)
from os.path import abspath
from numpy import asarray, savetxt
motion = open(fd_list[0]).read().splitlines()
if len(fd_list)>1:
for file in fd_list[1:]:
temp = open(file).read().splitlines()
motion = motion+temp
motion = asarray(motion).astype(float)
filename = 'FD_full.txt'
savetxt(filename,motion)
combined_fd = abspath(filename)
return(combined_fd)
示例15: main
def main(argv=None):
import os
import sys
from nipype import config
config.enable_debug_mode()
#-------------------------------- argument parser
import argparse
argParser = argparse.ArgumentParser( description="""****************************
similarity computation between two labels
""")
# workup arguments
argParser.add_argument('--labelMapFilename1',
help="""a filename that will be compared to. """,
dest='labelMapFilename1', required=False)
argParser.add_argument('--labelMapFilename2',
help="""a filename that will be compared to. """,
dest='labelMapFilename2', required=False)
argParser.add_argument('--outputCSVFilename',
help="""a filename that will store comparative results to. """,
dest='outputCSVFilename', required=False)
argParser.add_argument('--doUnitTest', action='store_true',
help="""Do unit test if given""",
dest='doUnitTest', required=False)
args = argParser.parse_args()
action=False
if args.doUnitTest :
unitTest()
action=True
if args.labelMapFilename1 or args.labelMapFilename2:
print os.path.abspath( args.labelMapFilename1 )
print os.path.abspath( args.labelMapFilename2 )
print os.path.abspath( args.outputCSVFilename )
computeSimilarity( os.path.abspath( args.labelMapFilename1 ),
os.path.abspath( args.labelMapFilename2 ),
os.path.abspath( args.outputCSVFilename ) )
action=True
if not action:
print """ ***