本文整理汇总了Python中qonos.common.timeutils.parse_isotime函数的典型用法代码示例。如果您正苦于以下问题:Python parse_isotime函数的具体用法?Python parse_isotime怎么用?Python parse_isotime使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了parse_isotime函数的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _get_request_params
def _get_request_params(self, request):
filter_args = {}
params = request.params
if params.get('next_run_after') is not None:
next_run_after = params['next_run_after']
next_run_after = timeutils.parse_isotime(next_run_after)
next_run_after = timeutils.normalize_time(next_run_after)
filter_args['next_run_after'] = next_run_after
if params.get('next_run_before') is not None:
next_run_before = params['next_run_before']
next_run_before = timeutils.parse_isotime(next_run_before)
next_run_before = timeutils.normalize_time(next_run_before)
filter_args['next_run_before'] = next_run_before
if request.params.get('tenant') is not None:
filter_args['tenant'] = request.params['tenant']
filter_args['limit'] = params.get('limit')
filter_args['marker'] = params.get('marker')
for filter_key in params.keys():
if filter_key not in filter_args:
filter_args[filter_key] = params[filter_key]
return filter_args
示例2: update
def update(self, request, schedule_id, body):
if not body:
msg = _('The request body must not be empty')
raise webob.exc.HTTPBadRequest(explanation=msg)
elif not 'schedule' in body:
msg = _('The request body must contain a "schedule" entity')
raise webob.exc.HTTPBadRequest(explanation=msg)
# NOTE(jculp): only raise if a blank tenant is passed
# passing no tenant at all is perfectly fine.
elif ('tenant' in body['schedule'] and not
body['schedule']['tenant'].strip()):
msg = _('The request body has not specified a "tenant" entity')
raise webob.exc.HTTPBadRequest(explanation=msg)
api_utils.deserialize_schedule_metadata(body['schedule'])
values = {}
values.update(body['schedule'])
try:
values = api_utils.check_read_only_properties(values)
except exception.Forbidden as e:
raise webob.exc.HTTPForbidden(explanation=unicode(e))
request_next_run = body['schedule'].get('next_run')
times = {
'minute': None,
'hour': None,
'month': None,
'day_of_week': None,
'day_of_month': None,
}
update_schedule_times = False
for key in times:
if key in values:
times[key] = values[key]
update_schedule_times = True
if update_schedule_times:
# NOTE(ameade): We must recalculate the schedules next_run time
# since the schedule has changed
values.update(times)
values['next_run'] = api_utils.schedule_to_next_run(times)
elif request_next_run:
try:
timeutils.parse_isotime(request_next_run)
except ValueError as e:
msg = _('Invalid "next_run" value. Must be ISO 8601 format')
raise webob.exc.HTTPBadRequest(explanation=msg)
try:
schedule = self.db_api.schedule_update(schedule_id, values)
except exception.NotFound:
msg = _('Schedule %s could not be found.') % schedule_id
raise webob.exc.HTTPNotFound(explanation=msg)
utils.serialize_datetimes(schedule)
api_utils.serialize_schedule_metadata(schedule)
return {'schedule': schedule}
示例3: create
def create(self, request, body):
if (body is None or body.get('job') is None or
body['job'].get('schedule_id') is None):
raise webob.exc.HTTPBadRequest()
job = body['job']
try:
schedule = self.db_api.schedule_get_by_id(job['schedule_id'])
except exception.NotFound:
raise webob.exc.HTTPNotFound()
# Check integrity of schedule and update next run
expected_next_run = job.get('next_run')
if expected_next_run:
expected_next_run = timeutils.parse_isotime(job.get('next_run'))
next_run = api_utils.schedule_to_next_run(schedule, timeutils.utcnow())
try:
self.db_api.schedule_test_and_set_next_run(schedule['id'],
expected_next_run, next_run)
except exception.NotFound:
msg = _("Specified next run does not match the current next run"
" value. This could mean schedule has either changed"
"or has already been scheduled since you last expected.")
raise webob.exc.HTTPConflict(explanation=msg)
# Update schedule last_scheduled
values = {}
values['last_scheduled'] = timeutils.utcnow()
self.db_api.schedule_update(schedule['id'], values)
# Create job
values = {}
values.update(job)
values['tenant'] = schedule['tenant']
values['action'] = schedule['action']
values['status'] = 'QUEUED'
job_metadata = []
for metadata in schedule['schedule_metadata']:
job_metadata.append({
'key': metadata['key'],
'value': metadata['value']
})
values['job_metadata'] = job_metadata
job_action = values['action']
if not 'timeout' in values:
values['timeout'] = api_utils.get_new_timeout_by_action(job_action)
values['hard_timeout'] = \
api_utils.get_new_timeout_by_action(job_action)
job = self.db_api.job_create(values)
utils.serialize_datetimes(job)
api_utils.serialize_job_metadata(job)
job = {'job': job}
utils.generate_notification(None, 'qonos.job.create', job, 'INFO')
return job
示例4: test_create_with_next_run
def test_create_with_next_run(self):
expected_next_run = timeutils.parse_isotime('1989-01-19T12:00:00Z')
def fake_schedule_to_next_run(_schedule, start_time=None):
self.assertEqual(timeutils.utcnow(), start_time)
return expected_next_run
self.stubs.Set(api_utils, 'schedule_to_next_run',
fake_schedule_to_next_run)
self._stub_notifications(None, 'qonos.job.create', 'fake-payload',
'INFO')
request = unit_utils.get_fake_request(method='POST')
fixture = {'job': {'schedule_id': self.schedule_1['id'],
'next_run':
timeutils.isotime(self.schedule_1['next_run'])}}
job = self.controller.create(request, fixture).get('job')
self.assertNotEqual(job, None)
self.assertNotEqual(job.get('id'), None)
self.assertEqual(job['schedule_id'], self.schedule_1['id'])
self.assertEqual(job['tenant'], self.schedule_1['tenant'])
self.assertEqual(job['action'], self.schedule_1['action'])
self.assertEqual(job['status'], 'QUEUED')
self.assertEqual(len(job['metadata']), 0)
schedule = db_api.schedule_get_by_id(self.schedule_1['id'])
self.assertNotEqual(schedule['next_run'], self.schedule_1['next_run'])
self.assertEqual(schedule['next_run'], expected_next_run)
self.assertNotEqual(schedule['last_scheduled'],
self.schedule_1.get('last_scheduled'))
self.assertTrue(schedule.get('last_scheduled'))
示例5: list
def list(self, request):
params = request.params.copy()
try:
params = utils.get_pagination_limit(params)
except exception.Invalid as e:
raise webob.exc.HTTPBadRequest(explanation=str(e))
if 'status' in params:
params['status'] = str(params['status']).upper()
if 'timeout' in params:
timeout = timeutils.parse_isotime(params['timeout'])
params['timeout'] = timeutils.normalize_time(timeout)
if 'hard_timeout' in params:
hard_timeout = timeutils.parse_isotime(params['hard_timeout'])
params['hard_timeout'] = timeutils.normalize_time(hard_timeout)
try:
jobs = self.db_api.job_get_all(params)
except exception.NotFound:
raise webob.exc.HTTPNotFound()
limit = params.get('limit')
if len(jobs) != 0 and len(jobs) == limit:
next_page = '/v1/jobs?marker=%s' % jobs[-1].get('id')
else:
next_page = None
for job in jobs:
utils.serialize_datetimes(job)
api_utils.serialize_job_metadata(job)
links = [{'rel': 'next', 'href': next_page}]
return {'jobs': jobs, 'jobs_links': links}
示例6: update_status
def update_status(self, request, job_id, body):
status = body.get('status')
if not status:
raise webob.exc.HTTPBadRequest()
values = {'status': status['status'].upper()}
if 'timeout' in status:
timeout = timeutils.parse_isotime(status['timeout'])
values['timeout'] = timeutils.normalize_time(timeout)
job = None
try:
job = self.db_api.job_update(job_id, values)
except exception.NotFound:
msg = _('Job %s could not be found.') % job_id
raise webob.exc.HTTPNotFound(explanation=msg)
if status['status'].upper() in ['ERROR', 'CANCELLED']:
values = self._get_error_values(status, job)
self.db_api.job_fault_create(values)
return {'status': {'status': job['status'],
'timeout': job['timeout']}}
示例7: fake_schedule_to_next_run
def fake_schedule_to_next_run(*args, **kwargs):
return timeutils.parse_isotime(expected_next_run)
示例8: _create_jobs
def _create_jobs(self):
fixture = {
'id': unit_utils.SCHEDULE_UUID1,
'tenant': unit_utils.TENANT1,
'action': 'snapshot',
'minute': '30',
'hour': '2',
'next_run': timeutils.parse_isotime('2012-11-27T02:30:00Z')
}
self.schedule_1 = db_api.schedule_create(fixture)
fixture = {
'id': unit_utils.SCHEDULE_UUID2,
'tenant': unit_utils.TENANT2,
'action': 'snapshot',
'minute': '30',
'hour': '2',
'next_run': timeutils.parse_isotime('2012-11-27T02:30:00Z'),
'schedule_metadata': [
{
'key': 'instance_id',
'value': 'my_instance',
}
],
}
self.schedule_2 = db_api.schedule_create(fixture)
now = timeutils.utcnow()
timeout = now + datetime.timedelta(hours=1)
hard_timeout = now + datetime.timedelta(hours=4)
fixture = {
'id': unit_utils.JOB_UUID1,
'schedule_id': self.schedule_1['id'],
'tenant': unit_utils.TENANT1,
'worker_id': unit_utils.WORKER_UUID1,
'action': 'snapshot',
'status': 'QUEUED',
'timeout': timeout,
'hard_timeout': hard_timeout,
'retry_count': 0,
}
self.job_1 = db_api.job_create(fixture)
fixture = {
'id': unit_utils.JOB_UUID2,
'schedule_id': self.schedule_2['id'],
'tenant': unit_utils.TENANT2,
'worker_id': unit_utils.WORKER_UUID2,
'action': 'snapshot',
'status': 'ERROR',
'timeout': timeout,
'hard_timeout': hard_timeout,
'retry_count': 1,
'job_metadata': [
{
'key': 'instance_id',
'value': 'my_instance',
},
]
}
self.job_2 = db_api.job_create(fixture)
fixture = {
'id': unit_utils.JOB_UUID3,
'schedule_id': self.schedule_1['id'],
'tenant': unit_utils.TENANT1,
'worker_id': unit_utils.WORKER_UUID1,
'action': 'snapshot',
'status': 'QUEUED',
'timeout': timeout,
'hard_timeout': hard_timeout,
'retry_count': 0,
}
self.job_3 = db_api.job_create(fixture)
fixture = {
'id': unit_utils.JOB_UUID4,
'schedule_id': self.schedule_1['id'],
'tenant': unit_utils.TENANT1,
'worker_id': unit_utils.WORKER_UUID1,
'action': 'snapshot',
'status': 'QUEUED',
'timeout': timeout,
'hard_timeout': hard_timeout,
'retry_count': 0,
}
self.job_4 = db_api.job_create(fixture)
示例9: _process_job
def _process_job(self, job):
payload = {'job': job}
if job['status'] == 'QUEUED':
self.send_notification_start(payload)
else:
self.send_notification_retry(payload)
job_id = job['id']
hard_timeout = timeutils.normalize_time(
timeutils.parse_isotime(job['hard_timeout']))
hard_timed_out = hard_timeout <= self._get_utcnow()
if hard_timed_out:
msg = ('Job %(job_id)s has reached/exceeded its'
' hard timeout: %(hard_timeout)s.' %
{'job_id': job_id, 'hard_timeout': job['hard_timeout']})
self._job_hard_timed_out(job, msg)
LOG.info(_('[%(worker_tag)s] Job hard timed out: %(msg)s') %
{'worker_tag': self.get_worker_tag(), 'msg': msg})
return
max_retried = job['retry_count'] > self.max_retry
if max_retried:
msg = ('Job %(job_id)s has reached/exceeded its'
' max_retry count: %(retry_count)s.' %
{'job_id': job_id, 'retry_count': job['retry_count']})
self._job_max_retried(job, msg)
LOG.info(_('[%(worker_tag)s] Job max_retry reached: %(msg)s') %
{'worker_tag': self.get_worker_tag(), 'msg': msg})
return
schedule = self._get_schedule(job)
if schedule is None:
msg = ('Schedule %(schedule_id)s not found for job %(job_id)s' %
{'schedule_id': job['schedule_id'], 'job_id': job_id})
self._job_cancelled(job, msg)
LOG.info(_('[%(worker_tag)s] Job cancelled: %(msg)s') %
{'worker_tag': self.get_worker_tag(),
'msg': msg})
return
now = self._get_utcnow()
self.next_timeout = now + self.initial_timeout
self._job_processing(job, self.next_timeout)
self.next_update = self._get_utcnow() + self.update_interval
instance_id = self._get_instance_id(job)
if not instance_id:
msg = ('Job %s does not specify an instance_id in its metadata.'
% job_id)
self._job_cancelled(job, msg)
return
image_id = self._get_image_id(job)
if image_id is None:
image_id = self._create_image(job, instance_id,
schedule)
if image_id is None:
return
else:
LOG.info(_("[%(worker_tag)s] Resuming image: %(image_id)s")
% {'worker_tag': self.get_worker_tag(),
'image_id': image_id})
active = False
retry = True
while retry and not active and not self.stopping:
image_status = self._poll_image_status(job, image_id)
active = image_status == 'ACTIVE'
if not active:
retry = True
try:
self._update_job(job_id, "PROCESSING")
except exc.OutOfTimeException:
retry = False
else:
time.sleep(self.image_poll_interval)
if active:
self._process_retention(instance_id,
self.current_job['schedule_id'])
self._job_succeeded(self.current_job)
elif not active and not retry:
self._job_timed_out(self.current_job)
elif self.stopping:
# Timeout job so it gets picked up again quickly rather than
# queuing up behind a bunch of new jobs, but not so soon that
# another worker will pick it up before everything is shut down
# and thus burn through the retries
timeout = self._get_utcnow() + self.timeout_worker_stop
self._job_processing(self.current_job, timeout=timeout)
LOG.debug("[%s] Snapshot complete" % self.get_worker_tag())