本文整理汇总了Python中oslo_utils.timeutils.is_older_than方法的典型用法代码示例。如果您正苦于以下问题:Python timeutils.is_older_than方法的具体用法?Python timeutils.is_older_than怎么用?Python timeutils.is_older_than使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类oslo_utils.timeutils
的用法示例。
在下文中一共展示了timeutils.is_older_than方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _test_and_create_object
# 需要导入模块: from oslo_utils import timeutils [as 别名]
# 或者: from oslo_utils.timeutils import is_older_than [as 别名]
def _test_and_create_object(uuid):
try:
session = db_api.get_writer_session()
with session.begin():
row = session.query(models.DFLockedObjects).filter_by(
object_uuid=uuid).one()
# test ttl
if row.lock and timeutils.is_older_than(
row.created_at, cfg.CONF.df.distributed_lock_ttl):
# reset the lock if it is timeout
LOG.warning('The lock for object %(id)s is reset '
'due to timeout.', {'id': uuid})
_lock_free_update(session, uuid, lock_state=True,
session_id=row.session_id)
except orm_exc.NoResultFound:
try:
session = db_api.get_writer_session()
with session.begin():
_create_db_row(session, oid=uuid)
except db_exc.DBDuplicateEntry:
# the lock is concurrently created.
pass
示例2: _service_manage_cleanup
# 需要导入模块: from oslo_utils import timeutils [as 别名]
# 或者: from oslo_utils.timeutils import is_older_than [as 别名]
def _service_manage_cleanup(self):
try:
ctx = senlin_context.get_admin_context()
time_window = (2 * CONF.periodic_interval)
svcs = service_obj.Service.get_all(ctx)
for svc in svcs:
if svc['id'] == self.service_id:
continue
if timeutils.is_older_than(svc['updated_at'], time_window):
LOG.info('Service %s was aborted', svc['id'])
LOG.info('Breaking locks for dead engine %s', svc['id'])
service_obj.Service.gc_by_engine(svc['id'])
LOG.info('Done breaking locks for engine %s', svc['id'])
service_obj.Service.delete(svc['id'])
except Exception as ex:
LOG.error('Error while cleaning up engine service: %s', ex)
示例3: _node_within_grace_period
# 需要导入模块: from oslo_utils import timeutils [as 别名]
# 或者: from oslo_utils.timeutils import is_older_than [as 别名]
def _node_within_grace_period(self, node):
"""Check if current time is within the node_update_timeout grace period
:returns: True if current time is less than node_update_timeout since
last node update action. False otherwise.
"""
node_last_updated = node.updated_at or node.init_at
if timeutils.is_older_than(node_last_updated,
self.node_update_timeout):
# node was last updated more than node_update_timeout seconds ago
# -> we are outside the grace period
LOG.info("%s was updated at %s which is more "
"than %d secs ago. Mark node as unhealthy.",
node.name, node_last_updated,
self.node_update_timeout)
return False
else:
# node was last updated less than node_update_timeout seconds ago
# -> we are inside the grace period
LOG.info("%s was updated at %s which is less "
"than %d secs ago. Mark node as healthy.",
node.name, node_last_updated,
self.node_update_timeout)
return True
示例4: is_engine_dead
# 需要导入模块: from oslo_utils import timeutils [as 别名]
# 或者: from oslo_utils.timeutils import is_older_than [as 别名]
def is_engine_dead(ctx, engine_id, duration=None):
"""Check if an engine is dead.
If engine hasn't reported its status for the given duration, it is treated
as a dead engine.
:param ctx: A request context.
:param engine_id: The ID of the engine to test.
:param duration: The time duration in seconds.
"""
if not duration:
duration = 2 * cfg.CONF.periodic_interval
eng = service_obj.Service.get(ctx, engine_id)
if not eng:
return True
if timeutils.is_older_than(eng.updated_at, duration):
return True
return False
示例5: _get_project_alarms
# 需要导入模块: from oslo_utils import timeutils [as 别名]
# 或者: from oslo_utils.timeutils import is_older_than [as 别名]
def _get_project_alarms(self, project):
if self.conf.event_alarm_cache_ttl and project in self.caches:
if timeutils.is_older_than(self.caches[project]['updated'],
self.conf.event_alarm_cache_ttl):
del self.caches[project]
else:
return self.caches[project]['alarms']
# TODO(r-mibu): Implement "changes-since" at the storage API and make
# this function update only alarms changed from the last access.
alarms = {a.alarm_id: Alarm(a) for a in
self._storage_conn.get_alarms(enabled=True,
type='event',
project_id=project)}
if self.conf.event_alarm_cache_ttl:
self.caches[project] = {
'alarms': alarms,
'updated': timeutils.utcnow()
}
return alarms
示例6: _approle_token_id
# 需要导入模块: from oslo_utils import timeutils [as 别名]
# 或者: from oslo_utils.timeutils import is_older_than [as 别名]
def _approle_token_id(self):
if (all((self._approle_token_issue, self._approle_token_ttl)) and
timeutils.is_older_than(self._approle_token_issue,
self._approle_token_ttl)):
self._cached_approle_token_id = None
return self._cached_approle_token_id
示例7: _process_unfinished_notifications
# 需要导入模块: from oslo_utils import timeutils [as 别名]
# 或者: from oslo_utils.timeutils import is_older_than [as 别名]
def _process_unfinished_notifications(self, context):
filters = {
'status': [fields.NotificationStatus.ERROR,
fields.NotificationStatus.NEW]
}
notifications_list = objects.NotificationList.get_all(context,
filters=filters)
for notification in notifications_list:
if (notification.status == fields.NotificationStatus.ERROR or
(notification.status == fields.NotificationStatus.NEW and
timeutils.is_older_than(
notification.generated_time,
CONF.retry_notification_new_status_interval))):
self._process_notification(context, notification)
# get updated notification from db after workflow execution
notification_db = objects.Notification.get_by_uuid(
context, notification.notification_uuid)
if notification_db.status == fields.NotificationStatus.ERROR:
# update notification status as failed
notification_status = fields.NotificationStatus.FAILED
update_data = {
'status': notification_status
}
notification_db.update(update_data)
notification_db.save()
LOG.error(
"Periodic task 'process_unfinished_notifications': "
"Notification %(notification_uuid)s exits with "
"status: %(status)s.",
{'notification_uuid': notification.notification_uuid,
'status': notification_status})
示例8: run_monitor
# 需要导入模块: from oslo_utils import timeutils [as 别名]
# 或者: from oslo_utils.timeutils import is_older_than [as 别名]
def run_monitor(self, hosting_vnf):
mgmt_ips = hosting_vnf['mgmt_ip_addresses']
vdupolicies = hosting_vnf['monitoring_policy']['vdus']
vnf_delay = hosting_vnf['monitoring_policy'].get(
'monitoring_delay', self.boot_wait)
for vdu in vdupolicies:
if hosting_vnf.get('dead') or (
hosting_vnf['vnf']['status']) == constants.PENDING_HEAL:
return
policy = vdupolicies[vdu]
for driver in policy:
params = policy[driver].get('monitoring_params', {})
vdu_delay = params.get('monitoring_delay', vnf_delay)
if not timeutils.is_older_than(hosting_vnf['boot_at'],
vdu_delay):
continue
actions = policy[driver].get('actions', {})
params['mgmt_ip'] = mgmt_ips[vdu]
driver_return = self.monitor_call(driver,
hosting_vnf['vnf'],
params)
LOG.debug('driver_return %s', driver_return)
if driver_return in actions:
action = actions[driver_return]
hosting_vnf['action_cb'](action, vdu_name=vdu)
示例9: is_service_dead
# 需要导入模块: from oslo_utils import timeutils [as 别名]
# 或者: from oslo_utils.timeutils import is_older_than [as 别名]
def is_service_dead(service):
"""Check if a given service is dead."""
cfg.CONF.import_opt("periodic_interval", "senlin.conf")
max_elapse = 2 * cfg.CONF.periodic_interval
return timeutils.is_older_than(service.updated_at, max_elapse)
示例10: _test_is_older_than
# 需要导入模块: from oslo_utils import timeutils [as 别名]
# 或者: from oslo_utils.timeutils import is_older_than [as 别名]
def _test_is_older_than(self, fn, datetime_mock):
datetime_mock.utcnow.return_value = self.skynet_self_aware_time
expect_true = timeutils.is_older_than(fn(self.one_minute_before),
59)
self.assertTrue(expect_true)
expect_false = timeutils.is_older_than(fn(self.one_minute_before),
60)
self.assertFalse(expect_false)
expect_false = timeutils.is_older_than(fn(self.one_minute_before),
61)
self.assertFalse(expect_false)
示例11: test_is_older_than_aware
# 需要导入模块: from oslo_utils import timeutils [as 别名]
# 或者: from oslo_utils.timeutils import is_older_than [as 别名]
def test_is_older_than_aware(self):
"""Tests sending is_older_than an 'aware' datetime."""
self._test_is_older_than(lambda x: x.replace(
tzinfo=iso8601.iso8601.UTC))
示例12: is_ec2_timestamp_expired
# 需要导入模块: from oslo_utils import timeutils [as 别名]
# 或者: from oslo_utils.timeutils import is_older_than [as 别名]
def is_ec2_timestamp_expired(request, expires=None):
"""Checks the timestamp or expiry time included in an EC2 request
and returns true if the request is expired
"""
query_time = None
timestamp = request.get('Timestamp')
expiry_time = request.get('Expires')
def parse_strtime(strtime):
if _ms_time_regex.match(strtime):
# NOTE(MotoKen): time format for aws-sdk-java contains millisecond
time_format = "%Y-%m-%dT%H:%M:%S.%fZ"
else:
time_format = "%Y-%m-%dT%H:%M:%SZ"
return timeutils.parse_strtime(strtime, time_format)
try:
if timestamp and expiry_time:
msg = _("Request must include either Timestamp or Expires,"
" but cannot contain both")
LOG.error(msg)
raise exception.InvalidRequest(msg)
elif expiry_time:
query_time = parse_strtime(expiry_time)
return timeutils.is_older_than(query_time, -1)
elif timestamp:
query_time = parse_strtime(timestamp)
# Check if the difference between the timestamp in the request
# and the time on our servers is larger than 5 minutes, the
# request is too old (or too new).
if query_time and expires:
return (timeutils.is_older_than(query_time, expires) or
timeutils.is_newer_than(query_time, expires))
return False
except ValueError:
LOG.exception("Timestamp is invalid: ")
return True
# NOTE(ft): extra functions to use in vpc specific code or instead of
# malformed existed functions
示例13: validate_scaling_action
# 需要导入模块: from oslo_utils import timeutils [as 别名]
# 或者: from oslo_utils.timeutils import is_older_than [as 别名]
def validate_scaling_action(ctx, cluster_id, action):
"""Validate scaling action against actions table and policy cooldown.
:param ctx: An instance of the request context.
:param cluster_id: ID of the cluster the scaling action is targeting.
:param action: Scaling action being validated.
:return: None
:raises: An exception of ``ActionCooldown`` when the action being
validated is still in cooldown based off the policy or
``ActionConflict`` when a scaling action is already in the action
table.
"""
# Check for conflicting actions in the actions table.
conflicting_actions = Action._get_conflicting_scaling_actions(
ctx, cluster_id)
if conflicting_actions:
action_ids = [a.get('id', None) for a in conflicting_actions]
LOG.info("Unable to process %(action)s for cluster %(cluster_id)s "
"the action conflicts with %(conflicts)s",
{'action': action,
'cluster_id': cluster_id,
'conflicts': action_ids})
raise exception.ActionConflict(
type=action,
target=cluster_id,
actions=",".join(action_ids))
# Check to see if action cooldown should be observed.
bindings = cpo.ClusterPolicy.get_all(ctx, cluster_id,
sort='priority',
filters={'enabled': True})
for pb in bindings:
policy = policy_mod.Policy.load(ctx, pb.policy_id)
if getattr(policy, 'cooldown', None) and policy.event == action:
if pb.last_op and not timeutils.is_older_than(
pb.last_op, policy.cooldown):
LOG.info("Unable to process %(action)s for cluster "
"%(cluster_id)s the actions policy %(policy)s "
"cooldown still in progress",
{'action': action,
'cluster_id': cluster_id,
'policy': pb.policy_id})
raise exception.ActionCooldown(
type=action,
cluster=cluster_id,
policy_id=pb.policy_id)
return