本文整理汇总了Python中cylc.flow.LOG类的典型用法代码示例。如果您正苦于以下问题:Python LOG类的具体用法?Python LOG怎么用?Python LOG使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了LOG类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _job_cmd_out_callback
def _job_cmd_out_callback(suite, itask, cmd_ctx, line):
"""Callback on job command STDOUT/STDERR."""
if cmd_ctx.cmd_kwargs.get("host") and cmd_ctx.cmd_kwargs.get("user"):
owner_at_host = "(%(user)[email protected]%(host)s) " % cmd_ctx.cmd_kwargs
elif cmd_ctx.cmd_kwargs.get("host"):
owner_at_host = "(%(host)s) " % cmd_ctx.cmd_kwargs
elif cmd_ctx.cmd_kwargs.get("user"):
owner_at_host = "(%(user)[email protected]) " % cmd_ctx.cmd_kwargs
else:
owner_at_host = ""
try:
timestamp, _, content = line.split("|")
except ValueError:
pass
else:
line = "%s %s" % (timestamp, content)
job_activity_log = get_task_job_activity_log(
suite, itask.point, itask.tdef.name)
try:
with open(job_activity_log, "ab") as handle:
if not line.endswith("\n"):
line += "\n"
handle.write((owner_at_host + line).encode())
except IOError as exc:
LOG.warning("%s: write failed\n%s" % (job_activity_log, exc))
LOG.warning("[%s] -%s%s", itask, owner_at_host, line)
示例2: addict
def addict(cfig, key, val, parents, index):
"""Add a new [parents...]key=value pair to a nested dict."""
for p in parents:
# drop down the parent list
cfig = cfig[p]
if not isinstance(cfig, dict):
# an item of this name has already been encountered at this level
raise FileParseError(
'line %d: already encountered %s',
index, itemstr(parents, key, val))
if key in cfig:
# this item already exists
if (key == 'graph' and (
parents == ['scheduling', 'dependencies'] or
len(parents) == 3 and
parents[-3:-1] == ['scheduling', 'dependencies'])):
# append the new graph string to the existing one
LOG.debug('Merging graph strings under %s', itemstr(parents))
if not isinstance(cfig[key], list):
cfig[key] = [cfig[key]]
cfig[key].append(val)
else:
# otherwise override the existing item
LOG.debug(
'overriding %s old value: %s new value: %s',
itemstr(parents, key), cfig[key], val)
cfig[key] = val
else:
cfig[key] = val
示例3: _kill_task_job_callback
def _kill_task_job_callback(self, suite, itask, cmd_ctx, line):
"""Helper for _kill_task_jobs_callback, on one task job."""
ctx = SubProcContext(self.JOBS_KILL, None)
ctx.out = line
try:
ctx.timestamp, _, ctx.ret_code = line.split("|", 2)
except ValueError:
ctx.ret_code = 1
ctx.cmd = cmd_ctx.cmd # print original command on failure
else:
ctx.ret_code = int(ctx.ret_code)
if ctx.ret_code:
ctx.cmd = cmd_ctx.cmd # print original command on failure
log_task_job_activity(ctx, suite, itask.point, itask.tdef.name)
log_lvl = INFO
log_msg = 'killed'
if ctx.ret_code: # non-zero exit status
log_lvl = WARNING
log_msg = 'kill failed'
itask.state.kill_failed = True
elif itask.state.status == TASK_STATUS_SUBMITTED:
self.task_events_mgr.process_message(
itask, CRITICAL, self.task_events_mgr.EVENT_SUBMIT_FAILED,
ctx.timestamp)
elif itask.state.status == TASK_STATUS_RUNNING:
self.task_events_mgr.process_message(
itask, CRITICAL, TASK_OUTPUT_FAILED)
else:
log_lvl = DEBUG
log_msg = (
'ignoring job kill result, unexpected task state: %s' %
itask.state.status)
itask.set_summary_message(log_msg)
LOG.log(log_lvl, "[%s] -job(%02d) %s" % (
itask.identity, itask.submit_num, log_msg))
示例4: stop
def stop(self):
"""Finish serving the current request then stop the server."""
LOG.debug('stopping zmq server...')
self.queue.put('STOP')
self.thread.join() # wait for the listener to return
self.socket.close()
LOG.debug('...stopped')
示例5: _receiver
def _receiver(self, message):
"""Wrap incoming messages and dispatch them to exposed methods.
Args:
message (dict): message contents
"""
# determine the server method to call
try:
method = getattr(self, message['command'])
args = message['args']
args.update({'user': message['user']})
if 'meta' in message:
args['meta'] = message['meta']
except KeyError:
# malformed message
return {'error': {
'message': 'Request missing required field(s).'}}
except AttributeError:
# no exposed method by that name
return {'error': {
'message': 'No method by the name "%s"' % message['command']}}
# generate response
try:
response = method(**args)
except Exception as exc:
# includes incorrect arguments (TypeError)
LOG.exception(exc) # note the error server side
import traceback
return {'error': {
'message': str(exc), 'traceback': traceback.format_exc()}}
return {'data': response}
示例6: list_suites
def list_suites(self, regfilter=None):
"""Return a filtered list of valid suite registrations."""
rec_regfilter = None
if regfilter:
try:
rec_regfilter = re.compile(regfilter)
except re.error as exc:
raise ValueError("%s: %s" % (regfilter, exc))
run_d = glbl_cfg().get_host_item('run directory')
results = []
for dirpath, dnames, _ in os.walk(run_d, followlinks=True):
# Always descend for top directory, but
# don't descend further if it has a .service/ dir
if dirpath != run_d and self.DIR_BASE_SRV in dnames:
dnames[:] = []
# Choose only suites with .service and matching filter
reg = os.path.relpath(dirpath, run_d)
path = os.path.join(dirpath, self.DIR_BASE_SRV)
if (not self._locate_item(self.FILE_BASE_SOURCE, path) or
rec_regfilter and not rec_regfilter.search(reg)):
continue
try:
results.append([
reg,
self.get_suite_source_dir(reg),
self.get_suite_title(reg)])
except (IOError, SuiteServiceFileError) as exc:
LOG.error('%s: %s', reg, exc)
return results
示例7: poll_task_jobs
def poll_task_jobs(self, suite, itasks, poll_succ=True, msg=None):
"""Poll jobs of specified tasks.
Any job that is or was submitted or running can be polled, except for
retrying tasks - which would poll (correctly) as failed. And don't poll
succeeded tasks by default.
This method uses _poll_task_jobs_callback() and
_manip_task_jobs_callback() as help/callback methods.
_poll_task_job_callback() executes one specific job.
"""
to_poll_tasks = []
pollable_statuses = set([
TASK_STATUS_SUBMITTED, TASK_STATUS_RUNNING, TASK_STATUS_FAILED])
if poll_succ:
pollable_statuses.add(TASK_STATUS_SUCCEEDED)
for itask in itasks:
if itask.state.status in pollable_statuses:
to_poll_tasks.append(itask)
else:
LOG.debug("skipping %s: not pollable, "
"or skipping 'succeeded' tasks" % itask.identity)
if to_poll_tasks:
if msg is not None:
LOG.info(msg)
self._run_job_cmd(
self.JOBS_POLL, suite, to_poll_tasks,
self._poll_task_jobs_callback)
示例8: remote_tidy
def remote_tidy(self):
"""Remove suite contact files from initialised remotes.
Call "cylc remote-tidy".
This method is called on suite shutdown, so we want nothing to hang.
Timeout any incomplete commands after 10 seconds.
Also remove UUID file on suite host ".service/uuid".
"""
# Remove UUID file
uuid_fname = os.path.join(
self.suite_srv_files_mgr.get_suite_srv_dir(self.suite),
FILE_BASE_UUID)
try:
os.unlink(uuid_fname)
except OSError:
pass
# Issue all SSH commands in parallel
procs = {}
for (host, owner), init_with_contact in self.remote_init_map.items():
if init_with_contact != REMOTE_INIT_DONE:
continue
cmd = ['timeout', '10', 'cylc', 'remote-tidy']
if is_remote_host(host):
cmd.append('--host=%s' % host)
if is_remote_user(owner):
cmd.append('--user=%s' % owner)
if cylc.flow.flags.debug:
cmd.append('--debug')
cmd.append(os.path.join(glbl_cfg().get_derived_host_item(
self.suite, 'suite run directory', host, owner)))
procs[(host, owner)] = (
cmd,
Popen(cmd, stdout=PIPE, stderr=PIPE, stdin=open(os.devnull)))
# Wait for commands to complete for a max of 10 seconds
timeout = time() + 10.0
while procs and time() < timeout:
for (host, owner), (cmd, proc) in procs.copy().items():
if proc.poll() is None:
continue
del procs[(host, owner)]
out, err = (f.decode() for f in proc.communicate())
if proc.wait():
LOG.warning(TaskRemoteMgmtError(
TaskRemoteMgmtError.MSG_TIDY,
(host, owner), ' '.join(quote(item) for item in cmd),
proc.returncode, out, err))
# Terminate any remaining commands
for (host, owner), (cmd, proc) in procs.items():
try:
proc.terminate()
except OSError:
pass
out, err = proc.communicate()
if proc.wait():
LOG.warning(TaskRemoteMgmtError(
TaskRemoteMgmtError.MSG_TIDY,
(host, owner), ' '.join(quote(item) for item in cmd),
proc.returncode, out, err))
示例9: create_directory
def create_directory(dir_, name):
"""Create directory. Raise GlobalConfigError on error."""
try:
os.makedirs(dir_, exist_ok=True)
except OSError as exc:
LOG.exception(exc)
raise GlobalConfigError(
'Failed to create directory "' + name + '"')
示例10: recover_pub_from_pri
def recover_pub_from_pri(self):
"""Recover public database from private database."""
if self.pub_dao.n_tries >= self.pub_dao.MAX_TRIES:
self.copy_pri_to_pub()
LOG.warning(
"%(pub_db_name)s: recovered from %(pri_db_name)s" % {
"pub_db_name": self.pub_dao.db_file_name,
"pri_db_name": self.pri_dao.db_file_name})
self.pub_dao.n_tries = 0
示例11: clear_broadcast
def clear_broadcast(
self, point_strings=None, namespaces=None, cancel_settings=None):
"""Clear broadcasts globally, or for listed namespaces and/or points.
Return a tuple (modified_settings, bad_options), where:
* modified_settings is similar to the return value of the "put" method,
but for removed broadcasts.
* bad_options is a dict in the form:
{"point_strings": ["20020202", ..."], ...}
The dict is only populated if there are options not associated with
previous broadcasts. The keys can be:
* point_strings: a list of bad point strings.
* namespaces: a list of bad namespaces.
* cancel: a list of tuples. Each tuple contains the keys of a bad
setting.
"""
# If cancel_settings defined, only clear specific broadcasts
cancel_keys_list = self._settings_to_keys_list(cancel_settings)
# Clear broadcasts
modified_settings = []
with self.lock:
for point_string, point_string_settings in self.broadcasts.items():
if point_strings and point_string not in point_strings:
continue
for namespace, namespace_settings in (
point_string_settings.items()):
if namespaces and namespace not in namespaces:
continue
stuff_stack = [([], namespace_settings)]
while stuff_stack:
keys, stuff = stuff_stack.pop()
for key, value in stuff.items():
if isinstance(value, dict):
stuff_stack.append((keys + [key], value))
elif (not cancel_keys_list or
keys + [key] in cancel_keys_list):
stuff[key] = None
setting = {key: value}
for rkey in reversed(keys):
setting = {rkey: setting}
modified_settings.append(
(point_string, namespace, setting))
# Prune any empty branches
bad_options = self._get_bad_options(
self._prune(), point_strings, namespaces, cancel_keys_list)
# Log the broadcast
self.suite_db_mgr.put_broadcast(modified_settings, is_cancel=True)
LOG.info(
get_broadcast_change_report(modified_settings, is_cancel=True))
if bad_options:
LOG.error(get_broadcast_bad_options_report(bad_options))
return modified_settings, bad_options
示例12: addsect
def addsect(cfig, sname, parents):
"""Add a new section to a nested dict."""
for p in parents:
# drop down the parent list
cfig = cfig[p]
if sname in cfig:
# this doesn't warrant a warning unless contained items are repeated
LOG.debug(
'Section already encountered: %s', itemstr(parents + [sname]))
else:
cfig[sname] = OrderedDictWithDefaults()
示例13: execute_queued_items
def execute_queued_items(self):
"""Execute queued items for each table."""
try:
for table in self.tables.values():
# DELETE statements may have varying number of WHERE args so we
# can only executemany for each identical template statement.
for stmt, stmt_args_list in table.delete_queues.items():
self._execute_stmt(stmt, stmt_args_list)
# INSERT statements are uniform for each table, so all INSERT
# statements can be executed using a single "executemany" call.
if table.insert_queue:
self._execute_stmt(
table.get_insert_stmt(), table.insert_queue)
# UPDATE statements can have varying number of SET and WHERE
# args so we can only executemany for each identical template
# statement.
for stmt, stmt_args_list in table.update_queues.items():
self._execute_stmt(stmt, stmt_args_list)
# Connection should only be opened if we have executed something.
if self.conn is None:
return
self.conn.commit()
except sqlite3.Error:
if not self.is_public:
raise
self.n_tries += 1
LOG.warning(
"%(file)s: write attempt (%(attempt)d) did not complete\n" % {
"file": self.db_file_name, "attempt": self.n_tries})
if self.conn is not None:
try:
self.conn.rollback()
except sqlite3.Error:
pass
return
else:
# Clear the queues
for table in self.tables.values():
table.delete_queues.clear()
del table.insert_queue[:] # list.clear avail from Python 3.3
table.update_queues.clear()
# Report public database retry recovery if necessary
if self.n_tries:
LOG.warning(
"%(file)s: recovered after (%(attempt)d) attempt(s)\n" % {
"file": self.db_file_name, "attempt": self.n_tries})
self.n_tries = 0
finally:
# Note: This is not strictly necessary. However, if the suite run
# directory is removed, a forced reconnection to the private
# database will ensure that the suite dies.
self.close()
示例14: _run_event_custom_handlers
def _run_event_custom_handlers(self, config, ctx):
"""Helper for "run_event_handlers", custom event handlers."""
# Look for event handlers
# 1. Handlers for specific event
# 2. General handlers
handlers = self.get_events_conf(config, '%s handler' % ctx.event)
if not handlers and (
ctx.event in
self.get_events_conf(config, 'handler events', [])):
handlers = self.get_events_conf(config, 'handlers')
if not handlers:
return
for i, handler in enumerate(handlers):
cmd_key = ('%s-%02d' % (self.SUITE_EVENT_HANDLER, i), ctx.event)
# Handler command may be a string for substitution
abort_on_error = self.get_events_conf(
config, 'abort if %s handler fails' % ctx.event)
try:
handler_data = {
'event': quote(ctx.event),
'message': quote(ctx.reason),
'suite': quote(ctx.suite),
'suite_uuid': quote(str(ctx.uuid_str)),
}
if config.cfg['meta']:
for key, value in config.cfg['meta'].items():
if key == "URL":
handler_data["suite_url"] = quote(value)
handler_data[key] = quote(value)
cmd = handler % (handler_data)
except KeyError as exc:
message = "%s bad template: %s" % (cmd_key, exc)
LOG.error(message)
if abort_on_error:
raise SuiteEventError(message)
continue
if cmd == handler:
# Nothing substituted, assume classic interface
cmd = "%s '%s' '%s' '%s'" % (
handler, ctx.event, ctx.suite, ctx.reason)
proc_ctx = SubProcContext(
cmd_key, cmd, env=dict(os.environ), shell=True)
if abort_on_error or self.proc_pool.closed:
# Run command in foreground if abort on failure is set or if
# process pool is closed
self.proc_pool.run_command(proc_ctx)
self._run_event_handlers_callback(
proc_ctx, abort_on_error=abort_on_error)
else:
# Run command using process pool otherwise
self.proc_pool.put_command(
proc_ctx, self._run_event_handlers_callback)
示例15: _remote_host_select_callback
def _remote_host_select_callback(self, proc_ctx, cmd_str):
"""Callback when host select command exits"""
self.ready = True
if proc_ctx.ret_code == 0 and proc_ctx.out:
# Good status
LOG.debug(proc_ctx)
self.remote_host_str_map[cmd_str] = proc_ctx.out.splitlines()[0]
else:
# Bad status
LOG.error(proc_ctx)
self.remote_host_str_map[cmd_str] = TaskRemoteMgmtError(
TaskRemoteMgmtError.MSG_SELECT, (cmd_str, None), cmd_str,
proc_ctx.ret_code, proc_ctx.out, proc_ctx.err)