本文整理汇总了Python中cylc.flow.LOG.warning方法的典型用法代码示例。如果您正苦于以下问题:Python LOG.warning方法的具体用法?Python LOG.warning怎么用?Python LOG.warning使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cylc.flow.LOG
的用法示例。
在下文中一共展示了LOG.warning方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _job_cmd_out_callback
# 需要导入模块: from cylc.flow import LOG [as 别名]
# 或者: from cylc.flow.LOG import warning [as 别名]
def _job_cmd_out_callback(suite, itask, cmd_ctx, line):
"""Callback on job command STDOUT/STDERR."""
if cmd_ctx.cmd_kwargs.get("host") and cmd_ctx.cmd_kwargs.get("user"):
owner_at_host = "(%(user)[email protected]%(host)s) " % cmd_ctx.cmd_kwargs
elif cmd_ctx.cmd_kwargs.get("host"):
owner_at_host = "(%(host)s) " % cmd_ctx.cmd_kwargs
elif cmd_ctx.cmd_kwargs.get("user"):
owner_at_host = "(%(user)[email protected]) " % cmd_ctx.cmd_kwargs
else:
owner_at_host = ""
try:
timestamp, _, content = line.split("|")
except ValueError:
pass
else:
line = "%s %s" % (timestamp, content)
job_activity_log = get_task_job_activity_log(
suite, itask.point, itask.tdef.name)
try:
with open(job_activity_log, "ab") as handle:
if not line.endswith("\n"):
line += "\n"
handle.write((owner_at_host + line).encode())
except IOError as exc:
LOG.warning("%s: write failed\n%s" % (job_activity_log, exc))
LOG.warning("[%s] -%s%s", itask, owner_at_host, line)
示例2: remote_tidy
# 需要导入模块: from cylc.flow import LOG [as 别名]
# 或者: from cylc.flow.LOG import warning [as 别名]
def remote_tidy(self):
"""Remove suite contact files from initialised remotes.
Call "cylc remote-tidy".
This method is called on suite shutdown, so we want nothing to hang.
Timeout any incomplete commands after 10 seconds.
Also remove UUID file on suite host ".service/uuid".
"""
# Remove UUID file
uuid_fname = os.path.join(
self.suite_srv_files_mgr.get_suite_srv_dir(self.suite),
FILE_BASE_UUID)
try:
os.unlink(uuid_fname)
except OSError:
pass
# Issue all SSH commands in parallel
procs = {}
for (host, owner), init_with_contact in self.remote_init_map.items():
if init_with_contact != REMOTE_INIT_DONE:
continue
cmd = ['timeout', '10', 'cylc', 'remote-tidy']
if is_remote_host(host):
cmd.append('--host=%s' % host)
if is_remote_user(owner):
cmd.append('--user=%s' % owner)
if cylc.flow.flags.debug:
cmd.append('--debug')
cmd.append(os.path.join(glbl_cfg().get_derived_host_item(
self.suite, 'suite run directory', host, owner)))
procs[(host, owner)] = (
cmd,
Popen(cmd, stdout=PIPE, stderr=PIPE, stdin=open(os.devnull)))
# Wait for commands to complete for a max of 10 seconds
timeout = time() + 10.0
while procs and time() < timeout:
for (host, owner), (cmd, proc) in procs.copy().items():
if proc.poll() is None:
continue
del procs[(host, owner)]
out, err = (f.decode() for f in proc.communicate())
if proc.wait():
LOG.warning(TaskRemoteMgmtError(
TaskRemoteMgmtError.MSG_TIDY,
(host, owner), ' '.join(quote(item) for item in cmd),
proc.returncode, out, err))
# Terminate any remaining commands
for (host, owner), (cmd, proc) in procs.items():
try:
proc.terminate()
except OSError:
pass
out, err = proc.communicate()
if proc.wait():
LOG.warning(TaskRemoteMgmtError(
TaskRemoteMgmtError.MSG_TIDY,
(host, owner), ' '.join(quote(item) for item in cmd),
proc.returncode, out, err))
示例3: recover_pub_from_pri
# 需要导入模块: from cylc.flow import LOG [as 别名]
# 或者: from cylc.flow.LOG import warning [as 别名]
def recover_pub_from_pri(self):
"""Recover public database from private database."""
if self.pub_dao.n_tries >= self.pub_dao.MAX_TRIES:
self.copy_pri_to_pub()
LOG.warning(
"%(pub_db_name)s: recovered from %(pri_db_name)s" % {
"pub_db_name": self.pub_dao.db_file_name,
"pri_db_name": self.pri_dao.db_file_name})
self.pub_dao.n_tries = 0
示例4: _manip_task_jobs_callback
# 需要导入模块: from cylc.flow import LOG [as 别名]
# 或者: from cylc.flow.LOG import warning [as 别名]
def _manip_task_jobs_callback(
self, ctx, suite, itasks, summary_callback, more_callbacks=None):
"""Callback when submit/poll/kill tasks command exits."""
if ctx.ret_code:
LOG.error(ctx)
else:
LOG.debug(ctx)
# A dict for easy reference of (CYCLE, NAME, SUBMIT_NUM) -> TaskProxy
#
# Note for "reload": A TaskProxy instance may be replaced on reload, so
# the "itasks" list may not reference the TaskProxy objects that
# replace the old ones. The .reload_successor attribute provides the
# link(s) for us to get to the latest replacement.
#
# Note for "kill": It is possible for a job to trigger its trap and
# report back to the suite back this logic is called. If so, the task
# will no longer be TASK_STATUS_SUBMITTED or TASK_STATUS_RUNNING, and
# its output line will be ignored here.
tasks = {}
for itask in itasks:
while itask.reload_successor is not None:
itask = itask.reload_successor
if itask.point is not None and itask.submit_num:
submit_num = "%02d" % (itask.submit_num)
tasks[(str(itask.point), itask.tdef.name, submit_num)] = itask
handlers = [(self.batch_sys_mgr.OUT_PREFIX_SUMMARY, summary_callback)]
if more_callbacks:
for prefix, callback in more_callbacks.items():
handlers.append((prefix, callback))
out = ctx.out
if not out:
out = ""
bad_tasks = dict(tasks)
for line in out.splitlines(True):
for prefix, callback in handlers:
if line.startswith(prefix):
line = line[len(prefix):].strip()
try:
path = line.split("|", 2)[1] # timestamp, path, status
point, name, submit_num = path.split(os.sep, 2)
if prefix == self.batch_sys_mgr.OUT_PREFIX_SUMMARY:
del bad_tasks[(point, name, submit_num)]
itask = tasks[(point, name, submit_num)]
callback(suite, itask, ctx, line)
except (LookupError, ValueError, KeyError) as exc:
LOG.warning(
'Unhandled %s output: %s', ctx.cmd_key, line)
LOG.exception(exc)
# Task jobs that are in the original command but did not get a status
# in the output. Handle as failures.
for key, itask in sorted(bad_tasks.items()):
line = (
"|".join([ctx.timestamp, os.sep.join(key), "1"]) + "\n")
summary_callback(suite, itask, ctx, line)
示例5: execute_queued_items
# 需要导入模块: from cylc.flow import LOG [as 别名]
# 或者: from cylc.flow.LOG import warning [as 别名]
def execute_queued_items(self):
"""Execute queued items for each table."""
try:
for table in self.tables.values():
# DELETE statements may have varying number of WHERE args so we
# can only executemany for each identical template statement.
for stmt, stmt_args_list in table.delete_queues.items():
self._execute_stmt(stmt, stmt_args_list)
# INSERT statements are uniform for each table, so all INSERT
# statements can be executed using a single "executemany" call.
if table.insert_queue:
self._execute_stmt(
table.get_insert_stmt(), table.insert_queue)
# UPDATE statements can have varying number of SET and WHERE
# args so we can only executemany for each identical template
# statement.
for stmt, stmt_args_list in table.update_queues.items():
self._execute_stmt(stmt, stmt_args_list)
# Connection should only be opened if we have executed something.
if self.conn is None:
return
self.conn.commit()
except sqlite3.Error:
if not self.is_public:
raise
self.n_tries += 1
LOG.warning(
"%(file)s: write attempt (%(attempt)d) did not complete\n" % {
"file": self.db_file_name, "attempt": self.n_tries})
if self.conn is not None:
try:
self.conn.rollback()
except sqlite3.Error:
pass
return
else:
# Clear the queues
for table in self.tables.values():
table.delete_queues.clear()
del table.insert_queue[:] # list.clear avail from Python 3.3
table.update_queues.clear()
# Report public database retry recovery if necessary
if self.n_tries:
LOG.warning(
"%(file)s: recovered after (%(attempt)d) attempt(s)\n" % {
"file": self.db_file_name, "attempt": self.n_tries})
self.n_tries = 0
finally:
# Note: This is not strictly necessary. However, if the suite run
# directory is removed, a forced reconnection to the private
# database will ensure that the suite dies.
self.close()
示例6: kill_task_jobs
# 需要导入模块: from cylc.flow import LOG [as 别名]
# 或者: from cylc.flow.LOG import warning [as 别名]
def kill_task_jobs(self, suite, itasks):
"""Kill jobs of active tasks, and hold the tasks.
If items is specified, kill active tasks matching given IDs.
"""
to_kill_tasks = []
for itask in itasks:
if itask.state.status in TASK_STATUSES_ACTIVE:
itask.state.set_held()
to_kill_tasks.append(itask)
else:
LOG.warning('skipping %s: task not killable' % itask.identity)
self._run_job_cmd(
self.JOBS_KILL, suite, to_kill_tasks,
self._kill_task_jobs_callback)
示例7: _get_host_metrics
# 需要导入模块: from cylc.flow import LOG [as 别名]
# 或者: from cylc.flow.LOG import warning [as 别名]
def _get_host_metrics(self):
"""Run "cylc get-host-metrics" commands on hosts.
Return (dict): {host: host-metrics-dict, ...}
"""
host_stats = {}
# Run "cylc get-host-metrics" commands on hosts
host_proc_map = {}
cmd = [self.CMD_BASE] + sorted(self._get_host_metrics_opts())
# Start up commands on hosts
for host in self.hosts:
if is_remote_host(host):
host_proc_map[host] = remote_cylc_cmd(
cmd, stdin=None, host=host, capture_process=True)
elif 'localhost' in host_proc_map:
continue # Don't duplicate localhost
else:
# 1st instance of localhost
host_proc_map['localhost'] = run_cmd(
['cylc'] + cmd, capture_process=True)
# Collect results from commands
while host_proc_map:
for host, proc in list(host_proc_map.copy().items()):
if proc.poll() is None:
continue
del host_proc_map[host]
out, err = (f.decode() for f in proc.communicate())
if proc.wait():
# Command failed in verbose/debug mode
LOG.warning(
"can't get host metric from '%s'" +
"%s # returncode=%d, err=%s\n",
host, ' '.join((quote(item) for item in cmd)),
proc.returncode, err)
else:
# Command OK
# Users may have profile scripts that write to STDOUT.
# Drop all output lines until the the first character of a
# line is '{'. Hopefully this is enough to find us the
# first line that denotes the beginning of the expected
# JSON data structure.
out = ''.join(dropwhile(
lambda s: not s.startswith('{'), out.splitlines(True)))
host_stats[host] = json.loads(out)
sleep(0.01)
return host_stats
示例8: load
# 需要导入模块: from cylc.flow import LOG [as 别名]
# 或者: from cylc.flow.LOG import warning [as 别名]
def load(self):
"""Load or reload configuration from files."""
self.sparse.clear()
self.dense.clear()
LOG.debug("Loading site/user global config files")
conf_path_str = os.getenv("CYLC_CONF_PATH")
if conf_path_str is None:
# CYLC_CONF_PATH not defined, use default locations.
for conf_dir_1, conf_dir_2, conf_type in [
(self.SITE_CONF_DIR, self.SITE_CONF_DIR_OLD,
upgrader.SITE_CONFIG),
(self.USER_CONF_DIR_1, self.USER_CONF_DIR_2,
upgrader.USER_CONFIG)]:
fname1 = os.path.join(conf_dir_1, self.CONF_BASE)
fname2 = os.path.join(conf_dir_2, self.CONF_BASE)
if os.access(fname1, os.F_OK | os.R_OK):
fname = fname1
elif os.access(fname2, os.F_OK | os.R_OK):
fname = fname2
else:
continue
try:
self.loadcfg(fname, conf_type)
except ParsecError as exc:
if conf_type == upgrader.SITE_CONFIG:
# Warn on bad site file (users can't fix it).
LOG.warning(
'ignoring bad %s %s:\n%s', conf_type, fname, exc)
else:
# Abort on bad user file (users can fix it).
LOG.error('bad %s %s', conf_type, fname)
raise
break
elif conf_path_str:
# CYLC_CONF_PATH defined with a value
for path in conf_path_str.split(os.pathsep):
fname = os.path.join(path, self.CONF_BASE)
if os.access(fname, os.F_OK | os.R_OK):
self.loadcfg(fname, upgrader.USER_CONFIG)
# (OK if no global.rc is found, just use system defaults).
self.transform()
示例9: _execute_stmt
# 需要导入模块: from cylc.flow import LOG [as 别名]
# 或者: from cylc.flow.LOG import warning [as 别名]
def _execute_stmt(self, stmt, stmt_args_list):
"""Helper for "self.execute_queued_items".
Execute a statement. If this is the public database, return True on
success and False on failure. If this is the private database, return
True on success, and raise on failure.
"""
try:
self.connect()
self.conn.executemany(stmt, stmt_args_list)
except sqlite3.Error:
if not self.is_public:
raise
if cylc.flow.flags.debug:
traceback.print_exc()
err_log = (
"cannot execute database statement:\n"
"file=%(file)s:\nstmt=%(stmt)s"
) % {"file": self.db_file_name, "stmt": stmt}
for i, stmt_args in enumerate(stmt_args_list):
err_log += ("\nstmt_args[%(i)d]=%(stmt_args)s" % {
"i": i, "stmt_args": stmt_args})
LOG.warning(err_log)
raise
示例10: _remove_bad_hosts
# 需要导入模块: from cylc.flow import LOG [as 别名]
# 或者: from cylc.flow.LOG import warning [as 别名]
def _remove_bad_hosts(self, mock_host_stats=None):
"""Return dictionary of 'good' hosts with their metric stats.
Run 'get-host-metrics' on each run host in parallel & store extracted
stats for hosts, else an empty JSON structure. Filter out 'bad' hosts
whereby either metric data cannot be accessed from the command or at
least one metric value does not pass a specified threshold.
"""
if mock_host_stats: # Create fake data for unittest purposes (only).
host_stats = dict(mock_host_stats) # Prevent mutable object issues
else:
if not self.hosts:
return {}
host_stats = self._get_host_metrics()
# Analyse get-host-metrics results
for host, data in list(dict(host_stats).items()):
if not data:
# No results for host (command failed) -> skip.
host_stats.pop(host)
continue
for measure, cutoff in self.parsed_thresholds.items():
datum = data[measure]
# Cutoff is a minimum or maximum depending on measure context.
if ((datum > cutoff and measure.startswith("load")) or
(datum < cutoff and (
measure == "memory" or
measure.startswith("disk-space")))):
# Alert user that threshold has not been met.
LOG.warning(
"host '%s' did not pass %s threshold " +
"(%s %s threshold %s)\n",
host, measure, datum,
">" if measure.startswith("load") else "<", cutoff)
host_stats.pop(host)
break
return host_stats
示例11: register
# 需要导入模块: from cylc.flow import LOG [as 别名]
# 或者: from cylc.flow.LOG import warning [as 别名]
def register(self, reg=None, source=None, redirect=False):
"""Register a suite, or renew its registration.
Create suite service directory and symlink to suite source location.
Args:
reg (str): suite name, default basename($PWD).
source (str): directory location of suite.rc file, default $PWD.
redirect (bool): allow reuse of existing name and run directory.
Return:
The registered suite name (which may be computed here).
Raise:
SuiteServiceFileError:
No suite.rc file found in source location.
Illegal name (can look like a relative path, but not absolute).
Another suite already has this name (unless --redirect).
"""
if reg is None:
reg = os.path.basename(os.getcwd())
if os.path.isabs(reg):
raise SuiteServiceFileError(
"suite name cannot be an absolute path: %s" % reg)
if source is not None:
if os.path.basename(source) == self.FILE_BASE_SUITE_RC:
source = os.path.dirname(source)
else:
source = os.getcwd()
# suite.rc must exist so we can detect accidentally reversed args.
source = os.path.abspath(source)
if not os.path.isfile(os.path.join(source, self.FILE_BASE_SUITE_RC)):
raise SuiteServiceFileError("no suite.rc in %s" % source)
# Create service dir if necessary.
srv_d = self.get_suite_srv_dir(reg)
os.makedirs(srv_d, exist_ok=True)
# See if suite already has a source or not
try:
orig_source = os.readlink(
os.path.join(srv_d, self.FILE_BASE_SOURCE))
except OSError:
orig_source = None
else:
if not os.path.isabs(orig_source):
orig_source = os.path.normpath(
os.path.join(srv_d, orig_source))
if orig_source is not None and source != orig_source:
if not redirect:
raise SuiteServiceFileError(
"the name '%s' already points to %s.\nUse "
"--redirect to re-use an existing name and run "
"directory." % (reg, orig_source))
LOG.warning(
"the name '%(reg)s' points to %(old)s.\nIt will now"
" be redirected to %(new)s.\nFiles in the existing %(reg)s run"
" directory will be overwritten.\n",
{'reg': reg, 'old': orig_source, 'new': source})
# Remove symlink to the original suite.
os.unlink(os.path.join(srv_d, self.FILE_BASE_SOURCE))
# Create symlink to the suite, if it doesn't already exist.
if orig_source is None or source != orig_source:
target = os.path.join(srv_d, self.FILE_BASE_SOURCE)
if (os.path.abspath(source) ==
os.path.abspath(os.path.dirname(srv_d))):
# If source happens to be the run directory,
# create .service/source -> ..
source_str = ".."
else:
source_str = source
os.symlink(source_str, target)
print('REGISTERED %s -> %s' % (reg, source))
return reg
示例12: _run_event_mail_callback
# 需要导入模块: from cylc.flow import LOG [as 别名]
# 或者: from cylc.flow.LOG import warning [as 别名]
def _run_event_mail_callback(proc_ctx):
"""Callback the mail command for notification of a suite event."""
if proc_ctx.ret_code:
LOG.warning(str(proc_ctx))
else:
LOG.info(str(proc_ctx))