本文整理汇总了Python中cylc.LOG.warning方法的典型用法代码示例。如果您正苦于以下问题:Python LOG.warning方法的具体用法?Python LOG.warning怎么用?Python LOG.warning使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cylc.LOG
的用法示例。
在下文中一共展示了LOG.warning方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _job_cmd_out_callback
# 需要导入模块: from cylc import LOG [as 别名]
# 或者: from cylc.LOG import warning [as 别名]
def _job_cmd_out_callback(suite, itask, cmd_ctx, line):
"""Callback on job command STDOUT/STDERR."""
if cmd_ctx.cmd_kwargs.get("host") and cmd_ctx.cmd_kwargs.get("user"):
owner_at_host = "(%(user)[email protected]%(host)s) " % cmd_ctx.cmd_kwargs
elif cmd_ctx.cmd_kwargs.get("host"):
owner_at_host = "(%(host)s) " % cmd_ctx.cmd_kwargs
elif cmd_ctx.cmd_kwargs.get("user"):
owner_at_host = "(%(user)[email protected]) " % cmd_ctx.cmd_kwargs
else:
owner_at_host = ""
try:
timestamp, _, content = line.split("|")
except ValueError:
pass
else:
line = "%s %s" % (timestamp, content)
job_activity_log = get_task_job_activity_log(
suite, itask.point, itask.tdef.name)
try:
with open(job_activity_log, "ab") as handle:
if not line.endswith("\n"):
line += "\n"
handle.write((owner_at_host + line).encode())
except IOError as exc:
LOG.warning("%s: write failed\n%s" % (job_activity_log, exc))
LOG.warning("[%s] -%s%s", itask, owner_at_host, line)
示例2: check_job_time
# 需要导入模块: from cylc import LOG [as 别名]
# 或者: from cylc.LOG import warning [as 别名]
def check_job_time(self, itask, now):
"""Check/handle job timeout and poll timer"""
can_poll = self.check_poll_time(itask, now)
if itask.timeout is None or now <= itask.timeout:
return can_poll
# Timeout reached for task, emit event and reset itask.timeout
if itask.state.status == TASK_STATUS_RUNNING:
time_ref = itask.summary['started_time']
event = 'execution timeout'
elif itask.state.status == TASK_STATUS_SUBMITTED:
time_ref = itask.summary['submitted_time']
event = 'submission timeout'
msg = event
try:
msg += ' after %s' % intvl_as_str(itask.timeout - time_ref)
except (TypeError, ValueError):
# Badness in time_ref?
pass
itask.timeout = None # emit event only once
if msg and event:
LOG.warning('[%s] -%s', itask, msg)
self.setup_event_handlers(itask, event, msg)
return True
else:
return can_poll
示例3: remote_tidy
# 需要导入模块: from cylc import LOG [as 别名]
# 或者: from cylc.LOG import warning [as 别名]
def remote_tidy(self):
"""Remove suite contact files from initialised remotes.
Call "cylc remote-tidy".
This method is called on suite shutdown, so we want nothing to hang.
Timeout any incomplete commands after 10 seconds.
Also remove UUID file on suite host ".service/uuid".
"""
# Remove UUID file
uuid_fname = os.path.join(
self.suite_srv_files_mgr.get_suite_srv_dir(self.suite),
FILE_BASE_UUID)
try:
os.unlink(uuid_fname)
except OSError:
pass
# Issue all SSH commands in parallel
procs = {}
for (host, owner), init_with_contact in self.remote_init_map.items():
if init_with_contact != REMOTE_INIT_DONE:
continue
cmd = ['timeout', '10', 'cylc', 'remote-tidy']
if is_remote_host(host):
cmd.append('--host=%s' % host)
if is_remote_user(owner):
cmd.append('--user=%s' % owner)
if cylc.flags.debug:
cmd.append('--debug')
cmd.append(os.path.join(glbl_cfg().get_derived_host_item(
self.suite, 'suite run directory', host, owner)))
procs[(host, owner)] = (
cmd,
Popen(cmd, stdout=PIPE, stderr=PIPE, stdin=open(os.devnull)))
# Wait for commands to complete for a max of 10 seconds
timeout = time() + 10.0
while procs and time() < timeout:
for (host, owner), (cmd, proc) in procs.copy().items():
if proc.poll() is None:
continue
del procs[(host, owner)]
out, err = (f.decode() for f in proc.communicate())
if proc.wait():
LOG.warning(TaskRemoteMgmtError(
TaskRemoteMgmtError.MSG_TIDY,
(host, owner), ' '.join(quote(item) for item in cmd),
proc.returncode, out, err))
# Terminate any remaining commands
for (host, owner), (cmd, proc) in procs.items():
try:
proc.terminate()
except OSError:
pass
out, err = proc.communicate()
if proc.wait():
LOG.warning(TaskRemoteMgmtError(
TaskRemoteMgmtError.MSG_TIDY,
(host, owner), ' '.join(quote(item) for item in cmd),
proc.returncode, out, err))
示例4: recover_pub_from_pri
# 需要导入模块: from cylc import LOG [as 别名]
# 或者: from cylc.LOG import warning [as 别名]
def recover_pub_from_pri(self):
"""Recover public database from private database."""
if self.pub_dao.n_tries >= self.pub_dao.MAX_TRIES:
self.copy_pri_to_pub()
LOG.warning(
"%(pub_db_name)s: recovered from %(pri_db_name)s" % {
"pub_db_name": self.pub_dao.db_file_name,
"pri_db_name": self.pri_dao.db_file_name})
self.pub_dao.n_tries = 0
示例5: _manip_task_jobs_callback
# 需要导入模块: from cylc import LOG [as 别名]
# 或者: from cylc.LOG import warning [as 别名]
def _manip_task_jobs_callback(
self, ctx, suite, itasks, summary_callback, more_callbacks=None):
"""Callback when submit/poll/kill tasks command exits."""
if ctx.ret_code:
LOG.error(ctx)
else:
LOG.debug(ctx)
# A dict for easy reference of (CYCLE, NAME, SUBMIT_NUM) -> TaskProxy
#
# Note for "reload": A TaskProxy instance may be replaced on reload, so
# the "itasks" list may not reference the TaskProxy objects that
# replace the old ones. The .reload_successor attribute provides the
# link(s) for us to get to the latest replacement.
#
# Note for "kill": It is possible for a job to trigger its trap and
# report back to the suite back this logic is called. If so, the task
# will no longer be TASK_STATUS_SUBMITTED or TASK_STATUS_RUNNING, and
# its output line will be ignored here.
tasks = {}
for itask in itasks:
while itask.reload_successor is not None:
itask = itask.reload_successor
if itask.point is not None and itask.submit_num:
submit_num = "%02d" % (itask.submit_num)
tasks[(str(itask.point), itask.tdef.name, submit_num)] = itask
handlers = [(self.batch_sys_mgr.OUT_PREFIX_SUMMARY, summary_callback)]
if more_callbacks:
for prefix, callback in more_callbacks.items():
handlers.append((prefix, callback))
out = ctx.out
if not out:
out = ""
bad_tasks = dict(tasks)
for line in out.splitlines(True):
for prefix, callback in handlers:
if line.startswith(prefix):
line = line[len(prefix):].strip()
try:
path = line.split("|", 2)[1] # timestamp, path, status
point, name, submit_num = path.split(os.sep, 2)
if prefix == self.batch_sys_mgr.OUT_PREFIX_SUMMARY:
del bad_tasks[(point, name, submit_num)]
itask = tasks[(point, name, submit_num)]
callback(suite, itask, ctx, line)
except (LookupError, ValueError, KeyError) as exc:
LOG.warning(
'Unhandled %s output: %s', ctx.cmd_key, line)
LOG.exception(exc)
# Task jobs that are in the original command but did not get a status
# in the output. Handle as failures.
for key, itask in sorted(bad_tasks.items()):
line = (
"|".join([ctx.timestamp, os.sep.join(key), "1"]) + "\n")
summary_callback(suite, itask, ctx, line)
示例6: execute_queued_items
# 需要导入模块: from cylc import LOG [as 别名]
# 或者: from cylc.LOG import warning [as 别名]
def execute_queued_items(self):
"""Execute queued items for each table."""
try:
for table in self.tables.values():
# DELETE statements may have varying number of WHERE args so we
# can only executemany for each identical template statement.
for stmt, stmt_args_list in table.delete_queues.items():
self._execute_stmt(stmt, stmt_args_list)
# INSERT statements are uniform for each table, so all INSERT
# statements can be executed using a single "executemany" call.
if table.insert_queue:
self._execute_stmt(
table.get_insert_stmt(), table.insert_queue)
# UPDATE statements can have varying number of SET and WHERE
# args so we can only executemany for each identical template
# statement.
for stmt, stmt_args_list in table.update_queues.items():
self._execute_stmt(stmt, stmt_args_list)
# Connection should only be opened if we have executed something.
if self.conn is None:
return
self.conn.commit()
except sqlite3.Error:
if not self.is_public:
raise
self.n_tries += 1
LOG.warning(
"%(file)s: write attempt (%(attempt)d) did not complete\n" % {
"file": self.db_file_name, "attempt": self.n_tries})
if self.conn is not None:
try:
self.conn.rollback()
except sqlite3.Error:
pass
return
else:
# Clear the queues
for table in self.tables.values():
table.delete_queues.clear()
del table.insert_queue[:] # list.clear avail from Python 3.3
table.update_queues.clear()
# Report public database retry recovery if necessary
if self.n_tries:
LOG.warning(
"%(file)s: recovered after (%(attempt)d) attempt(s)\n" % {
"file": self.db_file_name, "attempt": self.n_tries})
self.n_tries = 0
finally:
# Note: This is not strictly necessary. However, if the suite run
# directory is removed, a forced reconnection to the private
# database will ensure that the suite dies.
self.close()
示例7: _process_message_started
# 需要导入模块: from cylc import LOG [as 别名]
# 或者: from cylc.LOG import warning [as 别名]
def _process_message_started(self, itask, event_time):
"""Helper for process_message, handle a started message."""
if itask.job_vacated:
itask.job_vacated = False
LOG.warning("[%s] -Vacated job restarted", itask)
self.pflag = True
if itask.state.reset_state(TASK_STATUS_RUNNING):
self.setup_event_handlers(itask, 'started', 'job started')
itask.set_summary_time('started', event_time)
self._reset_job_timers(itask)
self.suite_db_mgr.put_update_task_jobs(itask, {
"time_run": itask.summary['started_time_string']})
# submission was successful so reset submission try number
if TASK_STATUS_SUBMIT_RETRYING in itask.try_timers:
itask.try_timers[TASK_STATUS_SUBMIT_RETRYING].num = 0
示例8: kill_task_jobs
# 需要导入模块: from cylc import LOG [as 别名]
# 或者: from cylc.LOG import warning [as 别名]
def kill_task_jobs(self, suite, itasks):
"""Kill jobs of active tasks, and hold the tasks.
If items is specified, kill active tasks matching given IDs.
"""
to_kill_tasks = []
for itask in itasks:
if itask.state.status in TASK_STATUSES_ACTIVE:
itask.state.set_held()
to_kill_tasks.append(itask)
else:
LOG.warning('skipping %s: task not killable' % itask.identity)
self._run_job_cmd(
self.JOBS_KILL, suite, to_kill_tasks,
self._kill_task_jobs_callback)
示例9: load
# 需要导入模块: from cylc import LOG [as 别名]
# 或者: from cylc.LOG import warning [as 别名]
def load(self):
"""Load or reload configuration from files."""
self.sparse.clear()
self.dense.clear()
LOG.debug("Loading site/user global config files")
conf_path_str = os.getenv("CYLC_CONF_PATH")
if conf_path_str is None:
# CYLC_CONF_PATH not defined, use default locations.
for conf_dir_1, conf_dir_2, conf_type in [
(self.SITE_CONF_DIR, self.SITE_CONF_DIR_OLD,
upgrader.SITE_CONFIG),
(self.USER_CONF_DIR_1, self.USER_CONF_DIR_2,
upgrader.USER_CONFIG)]:
fname1 = os.path.join(conf_dir_1, self.CONF_BASE)
fname2 = os.path.join(conf_dir_2, self.CONF_BASE)
if os.access(fname1, os.F_OK | os.R_OK):
fname = fname1
elif os.access(fname2, os.F_OK | os.R_OK):
fname = fname2
else:
continue
try:
self.loadcfg(fname, conf_type)
except ParsecError as exc:
if conf_type == upgrader.SITE_CONFIG:
# Warn on bad site file (users can't fix it).
LOG.warning(
'ignoring bad %s %s:\n%s', conf_type, fname, exc)
else:
# Abort on bad user file (users can fix it).
LOG.error('bad %s %s', conf_type, fname)
raise
break
elif conf_path_str:
# CYLC_CONF_PATH defined with a value
for path in conf_path_str.split(os.pathsep):
fname = os.path.join(path, self.CONF_BASE)
if os.access(fname, os.F_OK | os.R_OK):
self.loadcfg(fname, upgrader.USER_CONFIG)
# (OK if no global.rc is found, just use system defaults).
self.transform()
示例10: _execute_stmt
# 需要导入模块: from cylc import LOG [as 别名]
# 或者: from cylc.LOG import warning [as 别名]
def _execute_stmt(self, stmt, stmt_args_list):
"""Helper for "self.execute_queued_items".
Execute a statement. If this is the public database, return True on
success and False on failure. If this is the private database, return
True on success, and raise on failure.
"""
try:
self.connect()
self.conn.executemany(stmt, stmt_args_list)
except sqlite3.Error:
if not self.is_public:
raise
if cylc.flags.debug:
traceback.print_exc()
err_log = (
"cannot execute database statement:\n"
"file=%(file)s:\nstmt=%(stmt)s"
) % {"file": self.db_file_name, "stmt": stmt}
for i, stmt_args in enumerate(stmt_args_list):
err_log += ("\nstmt_args[%(i)d]=%(stmt_args)s" % {
"i": i, "stmt_args": stmt_args})
LOG.warning(err_log)
raise
示例11: process_message
# 需要导入模块: from cylc import LOG [as 别名]
# 或者: from cylc.LOG import warning [as 别名]
def process_message(
self, itask, severity, message, event_time=None, flag='',
submit_num=None):
"""Parse an incoming task message and update task state.
Incoming, e.g. "succeeded at <TIME>", may be from task job or polling.
It is possible for my current state to be inconsistent with an incoming
message (whether normal or polled) e.g. due to a late poll result, or a
network outage, or manual state reset. To handle this, if a message
would take the task state backward, issue a poll to confirm instead of
changing state - then always believe the next message. Note that the
next message might not be the result of this confirmation poll, in the
unlikely event that a job emits a succession of messages very quickly,
but this is the best we can do without somehow uniquely associating
each poll with its result message.
Arguments:
itask (cylc.task_proxy.TaskProxy):
The task proxy object relevant for the message.
severity (str or int):
Message severity, should be a recognised logging level.
message (str):
Message content.
event_time (str):
Event time stamp. Expect ISO8601 date time string.
If not specified, use current time.
flag (str):
If specified, can be INCOMING_FLAG to indicate an incoming
message, POLLED_FLAG to indicate a message resulted from a
poll. Otherwise, the message is assumed to be generated by the
logic in the suite server program.
submit_num (int):
The submit number of the task relevant for the message.
If not specified, use latest submit number.
Return:
None: in normal circumstances.
True: if polling is required to confirm a reversal of status.
"""
# Log incoming messages
if event_time is None:
event_time = get_current_time_string()
if submit_num is None:
submit_num = itask.submit_num
if flag == self.INCOMING_FLAG and submit_num != itask.submit_num:
flag = self.IGNORED_INCOMING_FLAG
LOG.log(
self.LEVELS.get(severity, INFO),
r'[%s] -(current:%s)%s %s at %s',
itask, itask.state.status, flag, message, event_time)
if flag == self.IGNORED_INCOMING_FLAG:
LOG.warning(
'[%s] -submit-num=%02d: ignore message from job(%02d)',
itask, itask.submit_num, submit_num)
return
# always update the suite state summary for latest message
if flag == self.POLLED_FLAG:
itask.set_summary_message('%s %s' % (message, self.POLLED_FLAG))
else:
itask.set_summary_message(message)
# Satisfy my output, if possible, and record the result.
completed_trigger = itask.state.outputs.set_msg_trg_completion(
message=message, is_completed=True)
if message == TASK_OUTPUT_STARTED:
if (flag == self.INCOMING_FLAG
and itask.state.is_gt(TASK_STATUS_RUNNING)):
return True
self._process_message_started(itask, event_time)
elif message == TASK_OUTPUT_SUCCEEDED:
self._process_message_succeeded(itask, event_time)
elif message == TASK_OUTPUT_FAILED:
if (flag == self.INCOMING_FLAG
and itask.state.is_gt(TASK_STATUS_FAILED)):
return True
self._process_message_failed(itask, event_time, self.JOB_FAILED)
elif message == self.EVENT_SUBMIT_FAILED:
if (flag == self.INCOMING_FLAG
and itask.state.is_gt(TASK_STATUS_SUBMIT_FAILED)):
return True
self._process_message_submit_failed(itask, event_time)
elif message == TASK_OUTPUT_SUBMITTED:
if (flag == self.INCOMING_FLAG
and itask.state.is_gt(TASK_STATUS_SUBMITTED)):
return True
self._process_message_submitted(itask, event_time)
elif message.startswith(FAIL_MESSAGE_PREFIX):
# Task received signal.
if (flag == self.INCOMING_FLAG
and itask.state.is_gt(TASK_STATUS_FAILED)):
return True
signal = message[len(FAIL_MESSAGE_PREFIX):]
self._db_events_insert(itask, "signaled", signal)
self.suite_db_mgr.put_update_task_jobs(
itask, {"run_signal": signal})
self._process_message_failed(itask, event_time, self.JOB_FAILED)
#.........这里部分代码省略.........
示例12: process_events
# 需要导入模块: from cylc import LOG [as 别名]
# 或者: from cylc.LOG import warning [as 别名]
def process_events(self, schd_ctx):
"""Process task events that were created by "setup_event_handlers".
schd_ctx is an instance of "Scheduler" in "cylc.scheduler".
"""
ctx_groups = {}
now = time()
for id_key, timer in self.event_timers.copy().items():
key1, point, name, submit_num = id_key
if timer.is_waiting:
continue
# Set timer if timeout is None.
if not timer.is_timeout_set():
if timer.next() is None:
LOG.warning("%s/%s/%02d %s failed" % (
point, name, submit_num, key1))
del self.event_timers[id_key]
continue
# Report retries and delayed 1st try
tmpl = None
if timer.num > 1:
tmpl = "%s/%s/%02d %s failed, retrying in %s"
elif timer.delay:
tmpl = "%s/%s/%02d %s will run after %s"
if tmpl:
LOG.debug(tmpl % (
point, name, submit_num, key1,
timer.delay_timeout_as_str()))
# Ready to run?
if not timer.is_delay_done() or (
# Avoid flooding user's mail box with mail notification.
# Group together as many notifications as possible within a
# given interval.
timer.ctx.ctx_type == self.HANDLER_MAIL and
not schd_ctx.stop_mode and
self.next_mail_time is not None and
self.next_mail_time > now
):
continue
timer.set_waiting()
if timer.ctx.ctx_type == self.HANDLER_CUSTOM:
# Run custom event handlers on their own
self.proc_pool.put_command(
SubProcContext(
(key1, submit_num),
timer.ctx.cmd, env=os.environ, shell=True,
),
self._custom_handler_callback, [schd_ctx, id_key])
else:
# Group together built-in event handlers, where possible
if timer.ctx not in ctx_groups:
ctx_groups[timer.ctx] = []
ctx_groups[timer.ctx].append(id_key)
next_mail_time = now + self.mail_interval
for ctx, id_keys in ctx_groups.items():
if ctx.ctx_type == self.HANDLER_MAIL:
# Set next_mail_time if any mail sent
self.next_mail_time = next_mail_time
self._process_event_email(schd_ctx, ctx, id_keys)
elif ctx.ctx_type == self.HANDLER_JOB_LOGS_RETRIEVE:
self._process_job_logs_retrieval(schd_ctx, ctx, id_keys)
示例13: register
# 需要导入模块: from cylc import LOG [as 别名]
# 或者: from cylc.LOG import warning [as 别名]
def register(self, reg=None, source=None, redirect=False):
"""Register a suite, or renew its registration.
Create suite service directory and symlink to suite source location.
Args:
reg (str): suite name, default basename($PWD).
source (str): directory location of suite.rc file, default $PWD.
redirect (bool): allow reuse of existing name and run directory.
Return:
The registered suite name (which may be computed here).
Raise:
SuiteServiceFileError:
No suite.rc file found in source location.
Illegal name (can look like a relative path, but not absolute).
Another suite already has this name (unless --redirect).
"""
if reg is None:
reg = os.path.basename(os.getcwd())
if os.path.isabs(reg):
raise SuiteServiceFileError(
"suite name cannot be an absolute path: %s" % reg)
if source is not None:
if os.path.basename(source) == self.FILE_BASE_SUITE_RC:
source = os.path.dirname(source)
else:
source = os.getcwd()
# suite.rc must exist so we can detect accidentally reversed args.
source = os.path.abspath(source)
if not os.path.isfile(os.path.join(source, self.FILE_BASE_SUITE_RC)):
raise SuiteServiceFileError("no suite.rc in %s" % source)
# Create service dir if necessary.
srv_d = self.get_suite_srv_dir(reg)
os.makedirs(srv_d, exist_ok=True)
# See if suite already has a source or not
try:
orig_source = os.readlink(
os.path.join(srv_d, self.FILE_BASE_SOURCE))
except OSError:
orig_source = None
else:
if not os.path.isabs(orig_source):
orig_source = os.path.normpath(
os.path.join(srv_d, orig_source))
if orig_source is not None and source != orig_source:
if not redirect:
raise SuiteServiceFileError(
"the name '%s' already points to %s.\nUse "
"--redirect to re-use an existing name and run "
"directory." % (reg, orig_source))
LOG.warning(
"the name '%(reg)s' points to %(old)s.\nIt will now"
" be redirected to %(new)s.\nFiles in the existing %(reg)s run"
" directory will be overwritten.\n",
{'reg': reg, 'old': orig_source, 'new': source})
# Remove symlink to the original suite.
os.unlink(os.path.join(srv_d, self.FILE_BASE_SOURCE))
# Create symlink to the suite, if it doesn't already exist.
if orig_source is None or source != orig_source:
target = os.path.join(srv_d, self.FILE_BASE_SOURCE)
if (os.path.abspath(source) ==
os.path.abspath(os.path.dirname(srv_d))):
# If source happens to be the run directory,
# create .service/source -> ..
source_str = ".."
else:
source_str = source
os.symlink(source_str, target)
print('REGISTERED %s -> %s' % (reg, source))
return reg