本文整理汇总了Python中cylc.flow.LOG.exception方法的典型用法代码示例。如果您正苦于以下问题:Python LOG.exception方法的具体用法?Python LOG.exception怎么用?Python LOG.exception使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cylc.flow.LOG
的用法示例。
在下文中一共展示了LOG.exception方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _receiver
# 需要导入模块: from cylc.flow import LOG [as 别名]
# 或者: from cylc.flow.LOG import exception [as 别名]
def _receiver(self, message):
"""Wrap incoming messages and dispatch them to exposed methods.
Args:
message (dict): message contents
"""
# determine the server method to call
try:
method = getattr(self, message['command'])
args = message['args']
args.update({'user': message['user']})
if 'meta' in message:
args['meta'] = message['meta']
except KeyError:
# malformed message
return {'error': {
'message': 'Request missing required field(s).'}}
except AttributeError:
# no exposed method by that name
return {'error': {
'message': 'No method by the name "%s"' % message['command']}}
# generate response
try:
response = method(**args)
except Exception as exc:
# includes incorrect arguments (TypeError)
LOG.exception(exc) # note the error server side
import traceback
return {'error': {
'message': str(exc), 'traceback': traceback.format_exc()}}
return {'data': response}
示例2: create_directory
# 需要导入模块: from cylc.flow import LOG [as 别名]
# 或者: from cylc.flow.LOG import exception [as 别名]
def create_directory(dir_, name):
"""Create directory. Raise GlobalConfigError on error."""
try:
os.makedirs(dir_, exist_ok=True)
except OSError as exc:
LOG.exception(exc)
raise GlobalConfigError(
'Failed to create directory "' + name + '"')
示例3: _manip_task_jobs_callback
# 需要导入模块: from cylc.flow import LOG [as 别名]
# 或者: from cylc.flow.LOG import exception [as 别名]
def _manip_task_jobs_callback(
self, ctx, suite, itasks, summary_callback, more_callbacks=None):
"""Callback when submit/poll/kill tasks command exits."""
if ctx.ret_code:
LOG.error(ctx)
else:
LOG.debug(ctx)
# A dict for easy reference of (CYCLE, NAME, SUBMIT_NUM) -> TaskProxy
#
# Note for "reload": A TaskProxy instance may be replaced on reload, so
# the "itasks" list may not reference the TaskProxy objects that
# replace the old ones. The .reload_successor attribute provides the
# link(s) for us to get to the latest replacement.
#
# Note for "kill": It is possible for a job to trigger its trap and
# report back to the suite back this logic is called. If so, the task
# will no longer be TASK_STATUS_SUBMITTED or TASK_STATUS_RUNNING, and
# its output line will be ignored here.
tasks = {}
for itask in itasks:
while itask.reload_successor is not None:
itask = itask.reload_successor
if itask.point is not None and itask.submit_num:
submit_num = "%02d" % (itask.submit_num)
tasks[(str(itask.point), itask.tdef.name, submit_num)] = itask
handlers = [(self.batch_sys_mgr.OUT_PREFIX_SUMMARY, summary_callback)]
if more_callbacks:
for prefix, callback in more_callbacks.items():
handlers.append((prefix, callback))
out = ctx.out
if not out:
out = ""
bad_tasks = dict(tasks)
for line in out.splitlines(True):
for prefix, callback in handlers:
if line.startswith(prefix):
line = line[len(prefix):].strip()
try:
path = line.split("|", 2)[1] # timestamp, path, status
point, name, submit_num = path.split(os.sep, 2)
if prefix == self.batch_sys_mgr.OUT_PREFIX_SUMMARY:
del bad_tasks[(point, name, submit_num)]
itask = tasks[(point, name, submit_num)]
callback(suite, itask, ctx, line)
except (LookupError, ValueError, KeyError) as exc:
LOG.warning(
'Unhandled %s output: %s', ctx.cmd_key, line)
LOG.exception(exc)
# Task jobs that are in the original command but did not get a status
# in the output. Handle as failures.
for key, itask in sorted(bad_tasks.items()):
line = (
"|".join([ctx.timestamp, os.sep.join(key), "1"]) + "\n")
summary_callback(suite, itask, ctx, line)
示例4: _run_command_init
# 需要导入模块: from cylc.flow import LOG [as 别名]
# 或者: from cylc.flow.LOG import exception [as 别名]
def _run_command_init(cls, ctx, callback=None, callback_args=None):
"""Prepare and launch shell command in ctx."""
try:
if ctx.cmd_kwargs.get('stdin_files'):
if len(ctx.cmd_kwargs['stdin_files']) > 1:
stdin_file = cls.get_temporary_file()
for file_ in ctx.cmd_kwargs['stdin_files']:
if hasattr(file_, 'read'):
stdin_file.write(file_.read())
else:
stdin_file.write(open(file_, 'rb').read())
stdin_file.seek(0)
elif hasattr(ctx.cmd_kwargs['stdin_files'][0], 'read'):
stdin_file = ctx.cmd_kwargs['stdin_files'][0]
else:
stdin_file = open(
ctx.cmd_kwargs['stdin_files'][0], 'rb')
elif ctx.cmd_kwargs.get('stdin_str'):
stdin_file = cls.get_temporary_file()
stdin_file.write(ctx.cmd_kwargs.get('stdin_str').encode())
stdin_file.seek(0)
else:
stdin_file = open(os.devnull)
proc = procopen(
ctx.cmd, stdin=stdin_file, stdoutpipe=True, stderrpipe=True,
# Execute command as a process group leader,
# so we can use "os.killpg" to kill the whole group.
preexec_fn=os.setpgrp,
env=ctx.cmd_kwargs.get('env'),
usesh=ctx.cmd_kwargs.get('shell'))
# calls to open a shell are aggregated in cylc_subproc.procopen()
# with logging for what is calling it and the commands given
except (IOError, OSError) as exc:
if exc.filename is None:
exc.filename = ctx.cmd[0]
LOG.exception(exc)
ctx.ret_code = 1
ctx.err = str(exc)
cls._run_command_exit(ctx, callback, callback_args)
return None
else:
LOG.debug(ctx.cmd)
return proc