本文整理汇总了Python中cylc.flow.LOG.debug方法的典型用法代码示例。如果您正苦于以下问题:Python LOG.debug方法的具体用法?Python LOG.debug怎么用?Python LOG.debug使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cylc.flow.LOG
的用法示例。
在下文中一共展示了LOG.debug方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: stop
# 需要导入模块: from cylc.flow import LOG [as 别名]
# 或者: from cylc.flow.LOG import debug [as 别名]
def stop(self):
"""Finish serving the current request then stop the server."""
LOG.debug('stopping zmq server...')
self.queue.put('STOP')
self.thread.join() # wait for the listener to return
self.socket.close()
LOG.debug('...stopped')
示例2: poll_task_jobs
# 需要导入模块: from cylc.flow import LOG [as 别名]
# 或者: from cylc.flow.LOG import debug [as 别名]
def poll_task_jobs(self, suite, itasks, poll_succ=True, msg=None):
"""Poll jobs of specified tasks.
Any job that is or was submitted or running can be polled, except for
retrying tasks - which would poll (correctly) as failed. And don't poll
succeeded tasks by default.
This method uses _poll_task_jobs_callback() and
_manip_task_jobs_callback() as help/callback methods.
_poll_task_job_callback() executes one specific job.
"""
to_poll_tasks = []
pollable_statuses = set([
TASK_STATUS_SUBMITTED, TASK_STATUS_RUNNING, TASK_STATUS_FAILED])
if poll_succ:
pollable_statuses.add(TASK_STATUS_SUCCEEDED)
for itask in itasks:
if itask.state.status in pollable_statuses:
to_poll_tasks.append(itask)
else:
LOG.debug("skipping %s: not pollable, "
"or skipping 'succeeded' tasks" % itask.identity)
if to_poll_tasks:
if msg is not None:
LOG.info(msg)
self._run_job_cmd(
self.JOBS_POLL, suite, to_poll_tasks,
self._poll_task_jobs_callback)
示例3: addict
# 需要导入模块: from cylc.flow import LOG [as 别名]
# 或者: from cylc.flow.LOG import debug [as 别名]
def addict(cfig, key, val, parents, index):
"""Add a new [parents...]key=value pair to a nested dict."""
for p in parents:
# drop down the parent list
cfig = cfig[p]
if not isinstance(cfig, dict):
# an item of this name has already been encountered at this level
raise FileParseError(
'line %d: already encountered %s',
index, itemstr(parents, key, val))
if key in cfig:
# this item already exists
if (key == 'graph' and (
parents == ['scheduling', 'dependencies'] or
len(parents) == 3 and
parents[-3:-1] == ['scheduling', 'dependencies'])):
# append the new graph string to the existing one
LOG.debug('Merging graph strings under %s', itemstr(parents))
if not isinstance(cfig[key], list):
cfig[key] = [cfig[key]]
cfig[key].append(val)
else:
# otherwise override the existing item
LOG.debug(
'overriding %s old value: %s new value: %s',
itemstr(parents, key), cfig[key], val)
cfig[key] = val
else:
cfig[key] = val
示例4: addsect
# 需要导入模块: from cylc.flow import LOG [as 别名]
# 或者: from cylc.flow.LOG import debug [as 别名]
def addsect(cfig, sname, parents):
"""Add a new section to a nested dict."""
for p in parents:
# drop down the parent list
cfig = cfig[p]
if sname in cfig:
# this doesn't warrant a warning unless contained items are repeated
LOG.debug(
'Section already encountered: %s', itemstr(parents + [sname]))
else:
cfig[sname] = OrderedDictWithDefaults()
示例5: _manip_task_jobs_callback
# 需要导入模块: from cylc.flow import LOG [as 别名]
# 或者: from cylc.flow.LOG import debug [as 别名]
def _manip_task_jobs_callback(
self, ctx, suite, itasks, summary_callback, more_callbacks=None):
"""Callback when submit/poll/kill tasks command exits."""
if ctx.ret_code:
LOG.error(ctx)
else:
LOG.debug(ctx)
# A dict for easy reference of (CYCLE, NAME, SUBMIT_NUM) -> TaskProxy
#
# Note for "reload": A TaskProxy instance may be replaced on reload, so
# the "itasks" list may not reference the TaskProxy objects that
# replace the old ones. The .reload_successor attribute provides the
# link(s) for us to get to the latest replacement.
#
# Note for "kill": It is possible for a job to trigger its trap and
# report back to the suite back this logic is called. If so, the task
# will no longer be TASK_STATUS_SUBMITTED or TASK_STATUS_RUNNING, and
# its output line will be ignored here.
tasks = {}
for itask in itasks:
while itask.reload_successor is not None:
itask = itask.reload_successor
if itask.point is not None and itask.submit_num:
submit_num = "%02d" % (itask.submit_num)
tasks[(str(itask.point), itask.tdef.name, submit_num)] = itask
handlers = [(self.batch_sys_mgr.OUT_PREFIX_SUMMARY, summary_callback)]
if more_callbacks:
for prefix, callback in more_callbacks.items():
handlers.append((prefix, callback))
out = ctx.out
if not out:
out = ""
bad_tasks = dict(tasks)
for line in out.splitlines(True):
for prefix, callback in handlers:
if line.startswith(prefix):
line = line[len(prefix):].strip()
try:
path = line.split("|", 2)[1] # timestamp, path, status
point, name, submit_num = path.split(os.sep, 2)
if prefix == self.batch_sys_mgr.OUT_PREFIX_SUMMARY:
del bad_tasks[(point, name, submit_num)]
itask = tasks[(point, name, submit_num)]
callback(suite, itask, ctx, line)
except (LookupError, ValueError, KeyError) as exc:
LOG.warning(
'Unhandled %s output: %s', ctx.cmd_key, line)
LOG.exception(exc)
# Task jobs that are in the original command but did not get a status
# in the output. Handle as failures.
for key, itask in sorted(bad_tasks.items()):
line = (
"|".join([ctx.timestamp, os.sep.join(key), "1"]) + "\n")
summary_callback(suite, itask, ctx, line)
示例6: _remote_host_select_callback
# 需要导入模块: from cylc.flow import LOG [as 别名]
# 或者: from cylc.flow.LOG import debug [as 别名]
def _remote_host_select_callback(self, proc_ctx, cmd_str):
"""Callback when host select command exits"""
self.ready = True
if proc_ctx.ret_code == 0 and proc_ctx.out:
# Good status
LOG.debug(proc_ctx)
self.remote_host_str_map[cmd_str] = proc_ctx.out.splitlines()[0]
else:
# Bad status
LOG.error(proc_ctx)
self.remote_host_str_map[cmd_str] = TaskRemoteMgmtError(
TaskRemoteMgmtError.MSG_SELECT, (cmd_str, None), cmd_str,
proc_ctx.ret_code, proc_ctx.out, proc_ctx.err)
示例7: async_request
# 需要导入模块: from cylc.flow import LOG [as 别名]
# 或者: from cylc.flow.LOG import debug [as 别名]
async def async_request(self, command, args=None, timeout=None):
"""Send an asynchronous request using asyncio.
Has the same arguments and return values as ``serial_request``.
"""
if timeout:
timeout = float(timeout)
timeout = (timeout * 1000 if timeout else None) or self.timeout
if not args:
args = {}
# get secret for this request
# assumes secret won't change during the request
try:
secret = self.secret()
except cylc.flow.suite_srv_files_mgr.SuiteServiceFileError:
raise ClientError('could not read suite passphrase')
# send message
msg = {'command': command, 'args': args}
msg.update(self.header)
LOG.debug('zmq:send %s' % msg)
message = encrypt(msg, secret)
self.socket.send_string(message)
# receive response
if self.poller.poll(timeout):
res = await self.socket.recv_string()
else:
if self.timeout_handler:
self.timeout_handler()
raise ClientTimeout('Timeout waiting for server response.')
try:
response = decrypt(res, secret)
LOG.debug('zmq:recv %s' % response)
except jose.exceptions.JWTError:
raise ClientError(
'Could not decrypt response. Has the passphrase changed?')
try:
return response['data']
except KeyError:
error = response['error']
raise ClientError(error['message'], error.get('traceback'))
示例8: _run_command_init
# 需要导入模块: from cylc.flow import LOG [as 别名]
# 或者: from cylc.flow.LOG import debug [as 别名]
def _run_command_init(cls, ctx, callback=None, callback_args=None):
"""Prepare and launch shell command in ctx."""
try:
if ctx.cmd_kwargs.get('stdin_files'):
if len(ctx.cmd_kwargs['stdin_files']) > 1:
stdin_file = cls.get_temporary_file()
for file_ in ctx.cmd_kwargs['stdin_files']:
if hasattr(file_, 'read'):
stdin_file.write(file_.read())
else:
stdin_file.write(open(file_, 'rb').read())
stdin_file.seek(0)
elif hasattr(ctx.cmd_kwargs['stdin_files'][0], 'read'):
stdin_file = ctx.cmd_kwargs['stdin_files'][0]
else:
stdin_file = open(
ctx.cmd_kwargs['stdin_files'][0], 'rb')
elif ctx.cmd_kwargs.get('stdin_str'):
stdin_file = cls.get_temporary_file()
stdin_file.write(ctx.cmd_kwargs.get('stdin_str').encode())
stdin_file.seek(0)
else:
stdin_file = open(os.devnull)
proc = procopen(
ctx.cmd, stdin=stdin_file, stdoutpipe=True, stderrpipe=True,
# Execute command as a process group leader,
# so we can use "os.killpg" to kill the whole group.
preexec_fn=os.setpgrp,
env=ctx.cmd_kwargs.get('env'),
usesh=ctx.cmd_kwargs.get('shell'))
# calls to open a shell are aggregated in cylc_subproc.procopen()
# with logging for what is calling it and the commands given
except (IOError, OSError) as exc:
if exc.filename is None:
exc.filename = ctx.cmd[0]
LOG.exception(exc)
ctx.ret_code = 1
ctx.err = str(exc)
cls._run_command_exit(ctx, callback, callback_args)
return None
else:
LOG.debug(ctx.cmd)
return proc
示例9: _prep_submit_task_job_error
# 需要导入模块: from cylc.flow import LOG [as 别名]
# 或者: from cylc.flow.LOG import debug [as 别名]
def _prep_submit_task_job_error(self, suite, itask, dry_run, action, exc):
"""Helper for self._prep_submit_task_job. On error."""
LOG.debug("submit_num %s" % itask.submit_num)
LOG.debug(traceback.format_exc())
LOG.error(exc)
log_task_job_activity(
SubProcContext(self.JOBS_SUBMIT, action, err=exc, ret_code=1),
suite, itask.point, itask.tdef.name, submit_num=itask.submit_num)
if not dry_run:
# Persist
self.suite_db_mgr.put_insert_task_jobs(itask, {
'is_manual_submit': itask.is_manual_submit,
'try_num': itask.get_try_num(),
'time_submit': get_current_time_string(),
'batch_sys_name': itask.summary.get('batch_sys_name'),
})
itask.is_manual_submit = False
self.task_events_mgr.process_message(
itask, CRITICAL, self.task_events_mgr.EVENT_SUBMIT_FAILED)
示例10: _dump_item
# 需要导入模块: from cylc.flow import LOG [as 别名]
# 或者: from cylc.flow.LOG import debug [as 别名]
def _dump_item(path, item, value):
"""Dump "value" to a file called "item" in the directory "path".
1. File permission should already be user-read-write-only on
creation by mkstemp.
2. The combination of os.fsync and os.rename should guarantee
that we don't end up with an incomplete file.
"""
os.makedirs(path, exist_ok=True)
from tempfile import NamedTemporaryFile
handle = NamedTemporaryFile(prefix=item, dir=path, delete=False)
try:
handle.write(value.encode())
except AttributeError:
handle.write(value)
os.fsync(handle.fileno())
handle.close()
fname = os.path.join(path, item)
os.rename(handle.name, fname)
LOG.debug('Generated %s', fname)
示例11: load
# 需要导入模块: from cylc.flow import LOG [as 别名]
# 或者: from cylc.flow.LOG import debug [as 别名]
def load(self):
"""Load or reload configuration from files."""
self.sparse.clear()
self.dense.clear()
LOG.debug("Loading site/user global config files")
conf_path_str = os.getenv("CYLC_CONF_PATH")
if conf_path_str is None:
# CYLC_CONF_PATH not defined, use default locations.
for conf_dir_1, conf_dir_2, conf_type in [
(self.SITE_CONF_DIR, self.SITE_CONF_DIR_OLD,
upgrader.SITE_CONFIG),
(self.USER_CONF_DIR_1, self.USER_CONF_DIR_2,
upgrader.USER_CONFIG)]:
fname1 = os.path.join(conf_dir_1, self.CONF_BASE)
fname2 = os.path.join(conf_dir_2, self.CONF_BASE)
if os.access(fname1, os.F_OK | os.R_OK):
fname = fname1
elif os.access(fname2, os.F_OK | os.R_OK):
fname = fname2
else:
continue
try:
self.loadcfg(fname, conf_type)
except ParsecError as exc:
if conf_type == upgrader.SITE_CONFIG:
# Warn on bad site file (users can't fix it).
LOG.warning(
'ignoring bad %s %s:\n%s', conf_type, fname, exc)
else:
# Abort on bad user file (users can fix it).
LOG.error('bad %s %s', conf_type, fname)
raise
break
elif conf_path_str:
# CYLC_CONF_PATH defined with a value
for path in conf_path_str.split(os.pathsep):
fname = os.path.join(path, self.CONF_BASE)
if os.access(fname, os.F_OK | os.R_OK):
self.loadcfg(fname, upgrader.USER_CONFIG)
# (OK if no global.rc is found, just use system defaults).
self.transform()
示例12: _remote_init_callback
# 需要导入模块: from cylc.flow import LOG [as 别名]
# 或者: from cylc.flow.LOG import debug [as 别名]
def _remote_init_callback(self, proc_ctx, host, owner, tmphandle):
"""Callback when "cylc remote-init" exits"""
self.ready = True
try:
tmphandle.close()
except OSError: # E.g. ignore bad unlink, etc
pass
if proc_ctx.ret_code == 0:
for status in (REMOTE_INIT_DONE, REMOTE_INIT_NOT_REQUIRED):
if status in proc_ctx.out:
# Good status
LOG.debug(proc_ctx)
self.remote_init_map[(host, owner)] = status
return
# Bad status
LOG.error(TaskRemoteMgmtError(
TaskRemoteMgmtError.MSG_INIT,
(host, owner), ' '.join(quote(item) for item in proc_ctx.cmd),
proc_ctx.ret_code, proc_ctx.out, proc_ctx.err))
LOG.error(proc_ctx)
self.remote_init_map[(host, owner)] = REMOTE_INIT_FAILED
示例13: _rank_good_hosts
# 需要导入模块: from cylc.flow import LOG [as 别名]
# 或者: from cylc.flow.LOG import debug [as 别名]
def _rank_good_hosts(self, all_host_stats):
"""Rank, by specified method, 'good' hosts to return the most suitable.
Take a dictionary of hosts considered 'good' with the corresponding
metric data, and rank them via the method specified in the global
configuration, returning the lowest-ranked (taken as best) host.
"""
# Convert all dict values from full metrics structures to single
# metric data values corresponding to the rank method to rank with.
hosts_with_vals_to_rank = dict(
(host, metric[self.rank_method])
for host, metric in all_host_stats.items())
LOG.debug(
"INFO: host %s values extracted are: %s",
self.rank_method,
"\n".join(" %s: %s" % item
for item in hosts_with_vals_to_rank.items()))
# Sort new dict by value to return ascending-value ordered host list.
sort_asc_hosts = sorted(
hosts_with_vals_to_rank, key=hosts_with_vals_to_rank.get)
base_msg = ("good (metric-returning) hosts were ranked in the "
"following order, from most to least suitable: %s")
if self.rank_method in ("memory", "disk-space:" + self.USE_DISK_PATH):
# Want 'most free' i.e. highest => reverse asc. list for ranking.
LOG.debug(base_msg, ', '.join(sort_asc_hosts[::-1]))
return sort_asc_hosts[-1]
else: # A load av. is only poss. left; 'random' dealt with earlier.
# Want lowest => ranking given by asc. list.
LOG.debug(base_msg, ', '.join(sort_asc_hosts))
return sort_asc_hosts[0]
示例14: _listener
# 需要导入模块: from cylc.flow import LOG [as 别名]
# 或者: from cylc.flow.LOG import debug [as 别名]
def _listener(self):
"""The server main loop, listen for and serve requests."""
while True:
# process any commands passed to the listener by its parent process
if self.queue.qsize():
command = self.queue.get()
if command == 'STOP':
break
else:
raise ValueError('Unknown command "%s"' % command)
try:
# wait RECV_TIMEOUT for a message
msg = self.socket.recv_string()
except zmq.error.Again:
# timeout, continue with the loop, this allows the listener
# thread to stop
continue
# attempt to decode the message, authenticating the user in the
# process
try:
message = self.decode(msg, self.secret())
except Exception as exc: # purposefully catch generic exception
# failed to decode message, possibly resulting from failed
# authentication
import traceback
return {'error': {
'message': str(exc), 'traceback': traceback.format_exc()}}
else:
# success case - serve the request
LOG.debug('zmq:recv %s', message)
res = self._receiver(message)
response = self.encode(res, self.secret())
LOG.debug('zmq:send %s', res)
# send back the response
self.socket.send_string(response)
sleep(0) # yield control to other threads
示例15: create_cylc_run_tree
# 需要导入模块: from cylc.flow import LOG [as 别名]
# 或者: from cylc.flow.LOG import debug [as 别名]
def create_cylc_run_tree(self, suite):
"""Create all top-level cylc-run output dirs on the suite host."""
cfg = self.get()
item = 'suite run directory'
idir = self.get_derived_host_item(suite, item)
LOG.debug('creating %s: %s', item, idir)
if cfg['enable run directory housekeeping']:
self.roll_directory(
idir, item, cfg['run directory rolling archive length'])
for item in [
'suite log directory',
'suite job log directory',
'suite config log directory',
'suite work directory',
'suite share directory']:
idir = self.get_derived_host_item(suite, item)
LOG.debug('creating %s: %s', item, idir)
self.create_directory(idir, item)
item = 'temporary directory'
value = cfg[item]
if value:
self.create_directory(value, item)