本文整理汇总了Python中cylc.flow.LOG.info方法的典型用法代码示例。如果您正苦于以下问题:Python LOG.info方法的具体用法?Python LOG.info怎么用?Python LOG.info使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cylc.flow.LOG
的用法示例。
在下文中一共展示了LOG.info方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: poll_task_jobs
# 需要导入模块: from cylc.flow import LOG [as 别名]
# 或者: from cylc.flow.LOG import info [as 别名]
def poll_task_jobs(self, suite, itasks, poll_succ=True, msg=None):
"""Poll jobs of specified tasks.
Any job that is or was submitted or running can be polled, except for
retrying tasks - which would poll (correctly) as failed. And don't poll
succeeded tasks by default.
This method uses _poll_task_jobs_callback() and
_manip_task_jobs_callback() as help/callback methods.
_poll_task_job_callback() executes one specific job.
"""
to_poll_tasks = []
pollable_statuses = set([
TASK_STATUS_SUBMITTED, TASK_STATUS_RUNNING, TASK_STATUS_FAILED])
if poll_succ:
pollable_statuses.add(TASK_STATUS_SUCCEEDED)
for itask in itasks:
if itask.state.status in pollable_statuses:
to_poll_tasks.append(itask)
else:
LOG.debug("skipping %s: not pollable, "
"or skipping 'succeeded' tasks" % itask.identity)
if to_poll_tasks:
if msg is not None:
LOG.info(msg)
self._run_job_cmd(
self.JOBS_POLL, suite, to_poll_tasks,
self._poll_task_jobs_callback)
示例2: clear_broadcast
# 需要导入模块: from cylc.flow import LOG [as 别名]
# 或者: from cylc.flow.LOG import info [as 别名]
def clear_broadcast(
self, point_strings=None, namespaces=None, cancel_settings=None):
"""Clear broadcasts globally, or for listed namespaces and/or points.
Return a tuple (modified_settings, bad_options), where:
* modified_settings is similar to the return value of the "put" method,
but for removed broadcasts.
* bad_options is a dict in the form:
{"point_strings": ["20020202", ..."], ...}
The dict is only populated if there are options not associated with
previous broadcasts. The keys can be:
* point_strings: a list of bad point strings.
* namespaces: a list of bad namespaces.
* cancel: a list of tuples. Each tuple contains the keys of a bad
setting.
"""
# If cancel_settings defined, only clear specific broadcasts
cancel_keys_list = self._settings_to_keys_list(cancel_settings)
# Clear broadcasts
modified_settings = []
with self.lock:
for point_string, point_string_settings in self.broadcasts.items():
if point_strings and point_string not in point_strings:
continue
for namespace, namespace_settings in (
point_string_settings.items()):
if namespaces and namespace not in namespaces:
continue
stuff_stack = [([], namespace_settings)]
while stuff_stack:
keys, stuff = stuff_stack.pop()
for key, value in stuff.items():
if isinstance(value, dict):
stuff_stack.append((keys + [key], value))
elif (not cancel_keys_list or
keys + [key] in cancel_keys_list):
stuff[key] = None
setting = {key: value}
for rkey in reversed(keys):
setting = {rkey: setting}
modified_settings.append(
(point_string, namespace, setting))
# Prune any empty branches
bad_options = self._get_bad_options(
self._prune(), point_strings, namespaces, cancel_keys_list)
# Log the broadcast
self.suite_db_mgr.put_broadcast(modified_settings, is_cancel=True)
LOG.info(
get_broadcast_change_report(modified_settings, is_cancel=True))
if bad_options:
LOG.error(get_broadcast_bad_options_report(bad_options))
return modified_settings, bad_options
示例3: _run_event_handlers_callback
# 需要导入模块: from cylc.flow import LOG [as 别名]
# 或者: from cylc.flow.LOG import info [as 别名]
def _run_event_handlers_callback(proc_ctx, abort_on_error=False):
"""Callback on completion of a suite event handler."""
if proc_ctx.ret_code:
msg = '%s EVENT HANDLER FAILED' % proc_ctx.cmd_key[1]
LOG.error(str(proc_ctx))
LOG.error(msg)
if abort_on_error:
raise SuiteEventError(msg)
else:
LOG.info(str(proc_ctx))
示例4: test_value_error_raises_system_exit
# 需要导入模块: from cylc.flow import LOG [as 别名]
# 或者: from cylc.flow.LOG import info [as 别名]
def test_value_error_raises_system_exit(self, mocked_glbl_cfg):
"""Test that a ValueError when writing to a log stream won't result
in multiple exceptions (what could lead to infinite loop in some
occasions. Instead, it **must** raise a SystemExit"""
with tempfile.NamedTemporaryFile() as tf:
# mock objects used when creating the file handler
mocked = mock.MagicMock()
mocked_glbl_cfg.return_value = mocked
mocked.get_derived_host_item.return_value = tf.name
mocked.get.return_value = 100
file_handler = TimestampRotatingFileHandler("suiteA", False)
# next line is important as pytest can have a "Bad file descriptor"
# due to a FileHandler with default "a" (pytest tries to r/w).
file_handler.mode = "a+"
# enable the logger
LOG.setLevel(logging.INFO)
LOG.addHandler(file_handler)
# Disable raising uncaught exceptions in logging, due to file
# handler using stdin.fileno. See the following links for more.
# https://github.com/pytest-dev/pytest/issues/2276 &
# https://github.com/pytest-dev/pytest/issues/1585
logging.raiseExceptions = False
# first message will initialize the stream and the handler
LOG.info("What could go")
# here we change the stream of the handler
old_stream = file_handler.stream
file_handler.stream = mock.MagicMock()
file_handler.stream.seek = mock.MagicMock()
# in case where
file_handler.stream.seek.side_effect = ValueError
try:
# next call will call the emit method and use the mocked stream
LOG.info("wrong?!")
self.fail("Exception SystemError was not raised")
except SystemExit:
pass
finally:
# clean up
file_handler.stream = old_stream
# for log_handler in LOG.handlers:
# log_handler.close()
file_handler.close()
LOG.removeHandler(file_handler)
logging.raiseExceptions = True
示例5: _authorise
# 需要导入模块: from cylc.flow import LOG [as 别名]
# 或者: from cylc.flow.LOG import info [as 别名]
def _authorise(self, *args, user='?', meta=None, **kwargs):
if not meta:
meta = {}
host = meta.get('host', '?')
prog = meta.get('prog', '?')
usr_priv_level = self._get_priv_level(user)
if usr_priv_level < req_priv_level:
LOG.warn(
"[client-connect] DENIED (privilege '%s' < '%s') %[email protected]%s:%s",
usr_priv_level, req_priv_level, user, host, prog)
raise Exception('Authorisation failure')
LOG.info(
'[client-command] %s %[email protected]%s:%s', fcn.__name__, user, host, prog)
return fcn(self, *args, **kwargs)
示例6: put_broadcast
# 需要导入模块: from cylc.flow import LOG [as 别名]
# 或者: from cylc.flow.LOG import info [as 别名]
def put_broadcast(
self, point_strings=None, namespaces=None, settings=None):
"""Add new broadcast settings (server side interface).
Return a tuple (modified_settings, bad_options) where:
modified_settings is list of modified settings in the form:
[("20200202", "foo", {"script": "true"}, ...]
bad_options is as described in the docstring for self.clear().
"""
modified_settings = []
bad_point_strings = []
bad_namespaces = []
with self.lock:
for setting in settings:
for point_string in point_strings:
# Standardise the point and check its validity.
bad_point = False
try:
point_string = standardise_point_string(point_string)
except PointParsingError:
if point_string != '*':
bad_point_strings.append(point_string)
bad_point = True
if not bad_point and point_string not in self.broadcasts:
self.broadcasts[point_string] = {}
for namespace in namespaces:
if namespace not in self.linearized_ancestors:
bad_namespaces.append(namespace)
elif not bad_point:
if namespace not in self.broadcasts[point_string]:
self.broadcasts[point_string][namespace] = {}
self._addict(
self.broadcasts[point_string][namespace],
setting)
modified_settings.append(
(point_string, namespace, setting))
# Log the broadcast
self.suite_db_mgr.put_broadcast(modified_settings)
LOG.info(get_broadcast_change_report(modified_settings))
bad_options = {}
if bad_point_strings:
bad_options["point_strings"] = bad_point_strings
if bad_namespaces:
bad_options["namespaces"] = bad_namespaces
return modified_settings, bad_options
示例7: check_task_jobs
# 需要导入模块: from cylc.flow import LOG [as 别名]
# 或者: from cylc.flow.LOG import info [as 别名]
def check_task_jobs(self, suite, task_pool):
"""Check submission and execution timeout and polling timers.
Poll tasks that have timed out and/or have reached next polling time.
"""
now = time()
poll_tasks = set()
for itask in task_pool.get_tasks():
if self.task_events_mgr.check_job_time(itask, now):
poll_tasks.add(itask)
if itask.poll_timer.delay is not None:
LOG.info(
'[%s] -poll now, (next in %s)',
itask, itask.poll_timer.delay_timeout_as_str())
if poll_tasks:
self.poll_task_jobs(suite, poll_tasks)
示例8: verify_triggering
# 需要导入模块: from cylc.flow import LOG [as 别名]
# 或者: from cylc.flow.LOG import info [as 别名]
def verify_triggering(self):
new = self.get_triggered(self.new_loglines)
ref = self.get_triggered(self.ref_loglines)
if len(new) == 0:
raise LogAnalyserError(
"new log contains no triggering info.")
if len(ref) == 0:
raise LogAnalyserError(
"reference log contains no triggering info.")
new.sort()
ref.sort()
if new != ref:
diff = unified_diff(new, ref, 'this run', 'reference log')
raise LogAnalyserError(
"triggering is NOT consistent with the reference log:" +
'\n' + '\n'.join(diff) + '\n')
else:
LOG.info(
"LogAnalyser: triggering is consistent with the reference log")
示例9: load_db_broadcast_states
# 需要导入模块: from cylc.flow import LOG [as 别名]
# 或者: from cylc.flow.LOG import info [as 别名]
def load_db_broadcast_states(self, row_idx, row):
"""Load broadcast variables from runtime DB broadcast states row."""
if row_idx == 0:
LOG.info("LOADING broadcast states")
point, namespace, key, value = row
sections = []
cur_key = key
if "]" in cur_key:
sections = self.REC_SECTION.findall(cur_key)
cur_key = cur_key.rsplit(r"]", 1)[-1]
with self.lock:
self.broadcasts.setdefault(point, {})
self.broadcasts[point].setdefault(namespace, {})
dict_ = self.broadcasts[point][namespace]
for section in sections:
dict_.setdefault(section, {})
dict_ = dict_[section]
dict_[cur_key] = value
LOG.info(CHANGE_FMT.strip() % {
"change": CHANGE_PREFIX_SET,
"point": point,
"namespace": namespace,
"key": key,
"value": value})
示例10: submit_task_jobs
# 需要导入模块: from cylc.flow import LOG [as 别名]
# 或者: from cylc.flow.LOG import info [as 别名]
def submit_task_jobs(self, suite, itasks, is_simulation=False):
"""Prepare and submit task jobs.
Submit tasks where possible. Ignore tasks that are waiting for host
select command to complete, or tasks that are waiting for remote
initialisation. Bad host select command, error writing to a job file or
bad remote initialisation will cause a bad task - leading to submission
failure.
This method uses prep_submit_task_job() as helper.
Return (list): list of tasks that attempted submission.
"""
if is_simulation:
return self._simulation_submit_task_jobs(itasks)
# Prepare tasks for job submission
prepared_tasks, bad_tasks = self.prep_submit_task_jobs(suite, itasks)
# Reset consumed host selection results
self.task_remote_mgr.remote_host_select_reset()
if not prepared_tasks:
return bad_tasks
# Group task jobs by (host, owner)
auth_itasks = {} # {(host, owner): [itask, ...], ...}
for itask in prepared_tasks:
auth_itasks.setdefault((itask.task_host, itask.task_owner), [])
auth_itasks[(itask.task_host, itask.task_owner)].append(itask)
# Submit task jobs for each (host, owner) group
done_tasks = bad_tasks
for (host, owner), itasks in sorted(auth_itasks.items()):
is_init = self.task_remote_mgr.remote_init(host, owner)
if is_init is None:
# Remote is waiting to be initialised
for itask in itasks:
itask.set_summary_message(self.REMOTE_INIT_MSG)
continue
# Ensure that localhost background/at jobs are recorded as running
# on the host name of the current suite host, rather than just
# "localhost". On suite restart on a different suite host, this
# allows the restart logic to correctly poll the status of the
# background/at jobs that may still be running on the previous
# suite host.
if (
self.batch_sys_mgr.is_job_local_to_host(
itask.summary['batch_sys_name']) and
not is_remote_host(host)
):
owner_at_host = get_host()
else:
owner_at_host = host
# Persist
if owner:
owner_at_host = owner + '@' + owner_at_host
now_str = get_current_time_string()
done_tasks.extend(itasks)
for itask in itasks:
# Log and persist
LOG.info(
'[%s] -submit-num=%02d, [email protected]=%s',
itask, itask.submit_num, owner_at_host)
self.suite_db_mgr.put_insert_task_jobs(itask, {
'is_manual_submit': itask.is_manual_submit,
'try_num': itask.get_try_num(),
'time_submit': now_str,
'user_at_host': owner_at_host,
'batch_sys_name': itask.summary['batch_sys_name'],
})
itask.is_manual_submit = False
if is_init == REMOTE_INIT_FAILED:
# Remote has failed to initialise
# Set submit-failed for all affected tasks
for itask in itasks:
itask.local_job_file_path = None # reset for retry
log_task_job_activity(
SubProcContext(
self.JOBS_SUBMIT,
'(init %s)' % owner_at_host,
err=REMOTE_INIT_FAILED,
ret_code=1),
suite, itask.point, itask.tdef.name)
self.task_events_mgr.process_message(
itask, CRITICAL,
self.task_events_mgr.EVENT_SUBMIT_FAILED)
continue
# Build the "cylc jobs-submit" command
cmd = ['cylc', self.JOBS_SUBMIT]
if LOG.isEnabledFor(DEBUG):
cmd.append('--debug')
if get_utc_mode():
cmd.append('--utc-mode')
remote_mode = False
kwargs = {}
for key, value, test_func in [
('host', host, is_remote_host),
('user', owner, is_remote_user)]:
if test_func(value):
cmd.append('--%s=%s' % (key, value))
#.........这里部分代码省略.........
示例11: _run_event_mail_callback
# 需要导入模块: from cylc.flow import LOG [as 别名]
# 或者: from cylc.flow.LOG import info [as 别名]
def _run_event_mail_callback(proc_ctx):
"""Callback the mail command for notification of a suite event."""
if proc_ctx.ret_code:
LOG.warning(str(proc_ctx))
else:
LOG.info(str(proc_ctx))