本文整理汇总了Python中cylc.LOG.info方法的典型用法代码示例。如果您正苦于以下问题:Python LOG.info方法的具体用法?Python LOG.info怎么用?Python LOG.info使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cylc.LOG
的用法示例。
在下文中一共展示了LOG.info方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _process_message_submit_failed
# 需要导入模块: from cylc import LOG [as 别名]
# 或者: from cylc.LOG import info [as 别名]
def _process_message_submit_failed(self, itask, event_time):
"""Helper for process_message, handle a submit-failed message."""
LOG.error('[%s] -%s', itask, self.EVENT_SUBMIT_FAILED)
if event_time is None:
event_time = get_current_time_string()
self.suite_db_mgr.put_update_task_jobs(itask, {
"time_submit_exit": event_time,
"submit_status": 1,
})
itask.summary['submit_method_id'] = None
self.pflag = True
if (TASK_STATUS_SUBMIT_RETRYING not in itask.try_timers or
itask.try_timers[TASK_STATUS_SUBMIT_RETRYING].next() is None):
# No submission retry lined up: definitive failure.
# See github #476.
if itask.state.reset_state(TASK_STATUS_SUBMIT_FAILED):
self.setup_event_handlers(
itask, self.EVENT_SUBMIT_FAILED,
'job %s' % self.EVENT_SUBMIT_FAILED)
else:
# There is a submission retry lined up.
timer = itask.try_timers[TASK_STATUS_SUBMIT_RETRYING]
delay_msg = "submit-retrying in %s" % timer.delay_timeout_as_str()
msg = "%s, %s" % (self.EVENT_SUBMIT_FAILED, delay_msg)
LOG.info("[%s] -job(%02d) %s", itask, itask.submit_num, msg)
itask.set_summary_message(msg)
if itask.state.reset_state(TASK_STATUS_SUBMIT_RETRYING):
self.setup_event_handlers(
itask, self.EVENT_SUBMIT_RETRY,
"job %s, %s" % (self.EVENT_SUBMIT_FAILED, delay_msg))
self._reset_job_timers(itask)
示例2: _process_message_failed
# 需要导入模块: from cylc import LOG [as 别名]
# 或者: from cylc.LOG import info [as 别名]
def _process_message_failed(self, itask, event_time, message):
"""Helper for process_message, handle a failed message."""
if event_time is None:
event_time = get_current_time_string()
itask.set_summary_time('finished', event_time)
self.suite_db_mgr.put_update_task_jobs(itask, {
"run_status": 1,
"time_run_exit": event_time,
})
if (TASK_STATUS_RETRYING not in itask.try_timers or
itask.try_timers[TASK_STATUS_RETRYING].next() is None):
# No retry lined up: definitive failure.
self.pflag = True
if itask.state.reset_state(TASK_STATUS_FAILED):
self.setup_event_handlers(itask, "failed", message)
LOG.critical(
"[%s] -job(%02d) %s", itask, itask.submit_num, "failed")
else:
# There is a retry lined up
delay_msg = "retrying in %s" % (
itask.try_timers[TASK_STATUS_RETRYING].delay_timeout_as_str())
msg = "failed, %s" % (delay_msg)
LOG.info("[%s] -job(%02d) %s", itask, itask.submit_num, msg)
itask.set_summary_message(msg)
if itask.state.reset_state(TASK_STATUS_RETRYING):
self.setup_event_handlers(
itask, "retry", "%s, %s" % (self.JOB_FAILED, delay_msg))
self._reset_job_timers(itask)
示例3: _process_message_succeeded
# 需要导入模块: from cylc import LOG [as 别名]
# 或者: from cylc.LOG import info [as 别名]
def _process_message_succeeded(self, itask, event_time):
"""Helper for process_message, handle a succeeded message."""
self.pflag = True
itask.set_summary_time('finished', event_time)
self.suite_db_mgr.put_update_task_jobs(itask, {
"run_status": 0,
"time_run_exit": event_time,
})
# Update mean elapsed time only on task succeeded.
if itask.summary['started_time'] is not None:
itask.tdef.elapsed_times.append(
itask.summary['finished_time'] -
itask.summary['started_time'])
if not itask.state.outputs.all_completed():
msg = ""
for output in itask.state.outputs.get_not_completed():
if output not in [TASK_OUTPUT_EXPIRED,
TASK_OUTPUT_SUBMIT_FAILED,
TASK_OUTPUT_FAILED]:
msg += "\n " + output
if msg:
LOG.info(
"[%s] -Succeeded with outputs not completed: %s",
itask, msg)
if itask.state.reset_state(TASK_STATUS_SUCCEEDED):
self.setup_event_handlers(itask, "succeeded", "job succeeded")
self._reset_job_timers(itask)
示例4: poll_task_jobs
# 需要导入模块: from cylc import LOG [as 别名]
# 或者: from cylc.LOG import info [as 别名]
def poll_task_jobs(self, suite, itasks, poll_succ=True, msg=None):
"""Poll jobs of specified tasks.
Any job that is or was submitted or running can be polled, except for
retrying tasks - which would poll (correctly) as failed. And don't poll
succeeded tasks by default.
This method uses _poll_task_jobs_callback() and
_manip_task_jobs_callback() as help/callback methods.
_poll_task_job_callback() executes one specific job.
"""
to_poll_tasks = []
pollable_statuses = set([
TASK_STATUS_SUBMITTED, TASK_STATUS_RUNNING, TASK_STATUS_FAILED])
if poll_succ:
pollable_statuses.add(TASK_STATUS_SUCCEEDED)
for itask in itasks:
if itask.state.status in pollable_statuses:
to_poll_tasks.append(itask)
else:
LOG.debug("skipping %s: not pollable, "
"or skipping 'succeeded' tasks" % itask.identity)
if to_poll_tasks:
if msg is not None:
LOG.info(msg)
self._run_job_cmd(
self.JOBS_POLL, suite, to_poll_tasks,
self._poll_task_jobs_callback)
示例5: clear_broadcast
# 需要导入模块: from cylc import LOG [as 别名]
# 或者: from cylc.LOG import info [as 别名]
def clear_broadcast(
self, point_strings=None, namespaces=None, cancel_settings=None):
"""Clear broadcasts globally, or for listed namespaces and/or points.
Return a tuple (modified_settings, bad_options), where:
* modified_settings is similar to the return value of the "put" method,
but for removed broadcasts.
* bad_options is a dict in the form:
{"point_strings": ["20020202", ..."], ...}
The dict is only populated if there are options not associated with
previous broadcasts. The keys can be:
* point_strings: a list of bad point strings.
* namespaces: a list of bad namespaces.
* cancel: a list of tuples. Each tuple contains the keys of a bad
setting.
"""
# If cancel_settings defined, only clear specific broadcasts
cancel_keys_list = self._settings_to_keys_list(cancel_settings)
# Clear broadcasts
modified_settings = []
with self.lock:
for point_string, point_string_settings in self.broadcasts.items():
if point_strings and point_string not in point_strings:
continue
for namespace, namespace_settings in (
point_string_settings.items()):
if namespaces and namespace not in namespaces:
continue
stuff_stack = [([], namespace_settings)]
while stuff_stack:
keys, stuff = stuff_stack.pop()
for key, value in stuff.items():
if isinstance(value, dict):
stuff_stack.append((keys + [key], value))
elif (not cancel_keys_list or
keys + [key] in cancel_keys_list):
stuff[key] = None
setting = {key: value}
for rkey in reversed(keys):
setting = {rkey: setting}
modified_settings.append(
(point_string, namespace, setting))
# Prune any empty branches
bad_options = self._get_bad_options(
self._prune(), point_strings, namespaces, cancel_keys_list)
# Log the broadcast
self.suite_db_mgr.put_broadcast(modified_settings, is_cancel=True)
LOG.info(
get_broadcast_change_report(modified_settings, is_cancel=True))
if bad_options:
LOG.error(get_broadcast_bad_options_report(bad_options))
return (modified_settings, bad_options)
示例6: satisfy_xclock
# 需要导入模块: from cylc import LOG [as 别名]
# 或者: from cylc.LOG import info [as 别名]
def satisfy_xclock(self, itask):
"""Attempt to satisfy itask's clock trigger, if it has one."""
label, sig, ctx, satisfied = self._get_xclock(itask)
if satisfied:
return
if wall_clock(*ctx.func_args, **ctx.func_kwargs):
satisfied = True
itask.state.xclock = (label, True)
self.sat_xclock.append(sig)
LOG.info('clock xtrigger satisfied: %s = %s' % (label, str(ctx)))
示例7: test_value_error_raises_system_exit
# 需要导入模块: from cylc import LOG [as 别名]
# 或者: from cylc.LOG import info [as 别名]
def test_value_error_raises_system_exit(self, mocked_glbl_cfg):
"""Test that a ValueError when writing to a log stream won't result
in multiple exceptions (what could lead to infinite loop in some
occasions. Instead, it **must** raise a SystemExit"""
with tempfile.NamedTemporaryFile() as tf:
# mock objects used when creating the file handler
mocked = mock.MagicMock()
mocked_glbl_cfg.return_value = mocked
mocked.get_derived_host_item.return_value = tf.name
mocked.get.return_value = 100
file_handler = TimestampRotatingFileHandler("suiteA", False)
# next line is important as pytest can have a "Bad file descriptor"
# due to a FileHandler with default "a" (pytest tries to r/w).
file_handler.mode = "a+"
# enable the logger
LOG.setLevel(logging.INFO)
LOG.addHandler(file_handler)
# Disable raising uncaught exceptions in logging, due to file
# handler using stdin.fileno. See the following links for more.
# https://github.com/pytest-dev/pytest/issues/2276 &
# https://github.com/pytest-dev/pytest/issues/1585
logging.raiseExceptions = False
# first message will initialize the stream and the handler
LOG.info("What could go")
# here we change the stream of the handler
old_stream = file_handler.stream
file_handler.stream = mock.MagicMock()
file_handler.stream.seek = mock.MagicMock()
# in case where
file_handler.stream.seek.side_effect = ValueError
try:
# next call will call the emit method and use the mocked stream
LOG.info("wrong?!")
self.fail("Exception SystemError was not raised")
except SystemExit:
pass
finally:
# clean up
file_handler.stream = old_stream
# for log_handler in LOG.handlers:
# log_handler.close()
file_handler.close()
LOG.removeHandler(file_handler)
logging.raiseExceptions = True
示例8: _authorise
# 需要导入模块: from cylc import LOG [as 别名]
# 或者: from cylc.LOG import info [as 别名]
def _authorise(self, *args, user='?', meta=None, **kwargs):
if not meta:
meta = {}
host = meta.get('host', '?')
prog = meta.get('prog', '?')
usr_priv_level = self._get_priv_level(user)
if usr_priv_level < req_priv_level:
LOG.warn(
"[client-connect] DENIED (privilege '%s' < '%s') %[email protected]%s:%s",
usr_priv_level, req_priv_level, user, host, prog)
raise Exception('Authorisation failure')
LOG.info(
'[client-command] %s %[email protected]%s:%s', fcn.__name__, user, host, prog)
return fcn(self, *args, **kwargs)
示例9: put_broadcast
# 需要导入模块: from cylc import LOG [as 别名]
# 或者: from cylc.LOG import info [as 别名]
def put_broadcast(
self, point_strings=None, namespaces=None, settings=None):
"""Add new broadcast settings (server side interface).
Return a tuple (modified_settings, bad_options) where:
modified_settings is list of modified settings in the form:
[("20200202", "foo", {"script": "true"}, ...]
bad_options is as described in the docstring for self.clear().
"""
modified_settings = []
bad_point_strings = []
bad_namespaces = []
with self.lock:
for setting in settings:
for point_string in point_strings:
# Standardise the point and check its validity.
bad_point = False
try:
point_string = standardise_point_string(point_string)
except PointParsingError:
if point_string != '*':
bad_point_strings.append(point_string)
bad_point = True
if not bad_point and point_string not in self.broadcasts:
self.broadcasts[point_string] = {}
for namespace in namespaces:
if namespace not in self.linearized_ancestors:
bad_namespaces.append(namespace)
elif not bad_point:
if namespace not in self.broadcasts[point_string]:
self.broadcasts[point_string][namespace] = {}
self._addict(
self.broadcasts[point_string][namespace],
setting)
modified_settings.append(
(point_string, namespace, setting))
# Log the broadcast
self.suite_db_mgr.put_broadcast(modified_settings)
LOG.info(get_broadcast_change_report(modified_settings))
bad_options = {}
if bad_point_strings:
bad_options["point_strings"] = bad_point_strings
if bad_namespaces:
bad_options["namespaces"] = bad_namespaces
return modified_settings, bad_options
示例10: check_task_jobs
# 需要导入模块: from cylc import LOG [as 别名]
# 或者: from cylc.LOG import info [as 别名]
def check_task_jobs(self, suite, task_pool):
"""Check submission and execution timeout and polling timers.
Poll tasks that have timed out and/or have reached next polling time.
"""
now = time()
poll_tasks = set()
for itask in task_pool.get_tasks():
if self.task_events_mgr.check_job_time(itask, now):
poll_tasks.add(itask)
if itask.poll_timer.delay is not None:
LOG.info(
'[%s] -poll now, (next in %s)',
itask, itask.poll_timer.delay_timeout_as_str())
if poll_tasks:
self.poll_task_jobs(suite, poll_tasks)
示例11: log_task_job_activity
# 需要导入模块: from cylc import LOG [as 别名]
# 或者: from cylc.LOG import info [as 别名]
def log_task_job_activity(ctx, suite, point, name, submit_num=None):
"""Log an activity for a task job."""
ctx_str = str(ctx)
if not ctx_str:
return
if isinstance(ctx.cmd_key, tuple): # An event handler
submit_num = ctx.cmd_key[-1]
job_activity_log = get_task_job_activity_log(
suite, point, name, submit_num)
try:
with open(job_activity_log, "ab") as handle:
handle.write((ctx_str + '\n').encode())
except IOError as exc:
# This happens when there is no job directory, e.g. if job host
# selection command causes an submission failure, there will be no job
# directory. In this case, just send the information to the suite log.
LOG.exception(exc)
LOG.info(ctx_str)
if ctx.cmd and ctx.ret_code:
LOG.error(ctx_str)
elif ctx.cmd:
LOG.debug(ctx_str)
示例12: _process_message_submitted
# 需要导入模块: from cylc import LOG [as 别名]
# 或者: from cylc.LOG import info [as 别名]
def _process_message_submitted(self, itask, event_time):
"""Helper for process_message, handle a submit-succeeded message."""
try:
LOG.info(
'[%s] -job[%02d] submitted to %s:%s[%s]',
itask,
itask.summary['submit_num'],
itask.summary['host'],
itask.summary['batch_sys_name'],
itask.summary['submit_method_id'])
except KeyError:
pass
self.suite_db_mgr.put_update_task_jobs(itask, {
"time_submit_exit": event_time,
"submit_status": 0,
"batch_sys_job_id": itask.summary.get('submit_method_id')})
if itask.tdef.run_mode == 'simulation':
# Simulate job execution at this point.
itask.set_summary_time('submitted', event_time)
itask.set_summary_time('started', event_time)
itask.state.reset_state(TASK_STATUS_RUNNING)
itask.state.outputs.set_completion(TASK_OUTPUT_STARTED, True)
return
itask.set_summary_time('submitted', event_time)
# Unset started and finished times in case of resubmission.
itask.set_summary_time('started')
itask.set_summary_time('finished')
itask.set_summary_message(TASK_OUTPUT_SUBMITTED)
self.pflag = True
if itask.state.status == TASK_STATUS_READY:
# The job started message can (rarely) come in before the submit
# command returns - in which case do not go back to 'submitted'.
if itask.state.reset_state(TASK_STATUS_SUBMITTED):
self.setup_event_handlers(
itask, TASK_OUTPUT_SUBMITTED, 'job submitted')
self._reset_job_timers(itask)
示例13: load_db_broadcast_states
# 需要导入模块: from cylc import LOG [as 别名]
# 或者: from cylc.LOG import info [as 别名]
def load_db_broadcast_states(self, row_idx, row):
"""Load broadcast variables from runtime DB broadcast states row."""
if row_idx == 0:
LOG.info("LOADING broadcast states")
point, namespace, key, value = row
sections = []
cur_key = key
if "]" in cur_key:
sections = self.REC_SECTION.findall(cur_key)
cur_key = cur_key.rsplit(r"]", 1)[-1]
with self.lock:
self.broadcasts.setdefault(point, {})
self.broadcasts[point].setdefault(namespace, {})
dict_ = self.broadcasts[point][namespace]
for section in sections:
dict_.setdefault(section, {})
dict_ = dict_[section]
dict_[cur_key] = value
LOG.info(CHANGE_FMT.strip() % {
"change": CHANGE_PREFIX_SET,
"point": point,
"namespace": namespace,
"key": key,
"value": value})
示例14: load_xtrigger_for_restart
# 需要导入模块: from cylc import LOG [as 别名]
# 或者: from cylc.LOG import info [as 别名]
def load_xtrigger_for_restart(self, row_idx, row):
"""Load satisfied xtrigger results from suite DB."""
if row_idx == 0:
LOG.info("LOADING satisfied xtriggers")
sig, results = row
self.sat_xtrig[sig] = json.loads(results)
示例15: _reset_job_timers
# 需要导入模块: from cylc import LOG [as 别名]
# 或者: from cylc.LOG import info [as 别名]
def _reset_job_timers(self, itask):
"""Set up poll timer and timeout for task."""
if itask.state.status not in TASK_STATUSES_ACTIVE:
# Reset, task not active
itask.timeout = None
itask.poll_timer = None
return
ctx = (itask.submit_num, itask.state.status)
if itask.poll_timer and itask.poll_timer.ctx == ctx:
return
# Set poll timer
# Set timeout
timeref = None # reference time, submitted or started time
timeout = None # timeout in setting
if itask.state.status == TASK_STATUS_RUNNING:
timeref = itask.summary['started_time']
timeout_key = 'execution timeout'
timeout = self._get_events_conf(itask, timeout_key)
delays = list(self.get_host_conf(
itask, 'execution polling intervals', skey='job',
default=[900])) # Default 15 minute intervals
if itask.summary[self.KEY_EXECUTE_TIME_LIMIT]:
time_limit = itask.summary[self.KEY_EXECUTE_TIME_LIMIT]
try:
host_conf = self.get_host_conf(itask, 'batch systems')
batch_sys_conf = host_conf[itask.summary['batch_sys_name']]
except (TypeError, KeyError):
batch_sys_conf = {}
time_limit_delays = batch_sys_conf.get(
'execution time limit polling intervals', [60, 120, 420])
timeout = time_limit + sum(time_limit_delays)
# Remove excessive polling before time limit
while sum(delays) > time_limit:
del delays[-1]
# But fill up the gap before time limit
if delays:
size = int((time_limit - sum(delays)) / delays[-1])
delays.extend([delays[-1]] * size)
time_limit_delays[0] += time_limit - sum(delays)
delays += time_limit_delays
else: # if itask.state.status == TASK_STATUS_SUBMITTED:
timeref = itask.summary['submitted_time']
timeout_key = 'submission timeout'
timeout = self._get_events_conf(itask, timeout_key)
delays = list(self.get_host_conf(
itask, 'submission polling intervals', skey='job',
default=[900])) # Default 15 minute intervals
try:
itask.timeout = timeref + float(timeout)
timeout_str = intvl_as_str(timeout)
except (TypeError, ValueError):
itask.timeout = None
timeout_str = None
itask.poll_timer = TaskActionTimer(ctx=ctx, delays=delays)
# Log timeout and polling schedule
message = 'health check settings: %s=%s' % (timeout_key, timeout_str)
# Attempt to group identical consecutive delays as N*DELAY,...
if itask.poll_timer.delays:
items = [] # [(number of item - 1, item), ...]
for delay in itask.poll_timer.delays:
if items and items[-1][1] == delay:
items[-1][0] += 1
else:
items.append([0, delay])
message += ', polling intervals='
for num, item in items:
if num:
message += '%d*' % (num + 1)
message += '%s,' % intvl_as_str(item)
message += '...'
LOG.info('[%s] -%s', itask, message)
# Set next poll time
self.check_poll_time(itask)