本文整理汇总了Python中pinball.ui.data_builder.DataBuilder.get_execution方法的典型用法代码示例。如果您正苦于以下问题:Python DataBuilder.get_execution方法的具体用法?Python DataBuilder.get_execution怎么用?Python DataBuilder.get_execution使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pinball.ui.data_builder.DataBuilder
的用法示例。
在下文中一共展示了DataBuilder.get_execution方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_context_data
# 需要导入模块: from pinball.ui.data_builder import DataBuilder [as 别名]
# 或者: from pinball.ui.data_builder.DataBuilder import get_execution [as 别名]
def get_context_data(self, **kwargs):
context = super(ExecutionView, self).get_context_data(**kwargs)
workflow = self.request.GET['workflow']
instance = self.request.GET['instance']
job = self.request.GET['job']
execution = int(self.request.GET['execution'])
data_builder = DataBuilder(DbStore())
execution_data = data_builder.get_execution(workflow,
instance,
job,
execution)
formatted_data = execution_data.format()
for key, value in formatted_data.items():
context[key] = value
properties = []
for key, value in execution_data.properties.items():
properties.append('%s=%s' % (key, value))
context['properties'] = ', '.join(properties)
if not execution_data.end_time:
context['end_time'] = ''
if execution_data.exit_code is None:
context['exit_code'] = ''
return context
示例2: DataBuilderTestCase
# 需要导入模块: from pinball.ui.data_builder import DataBuilder [as 别名]
# 或者: from pinball.ui.data_builder.DataBuilder import get_execution [as 别名]
#.........这里部分代码省略.........
instance = self._data_builder.get_instance('workflow_0', 'instance_0')
self.assertEqual('workflow_0', instance.workflow)
self.assertEqual('instance_0', instance.instance)
def test_get_instance(self):
self._get_instance()
def test_get_instance_using_cache(self):
self._data_builder.use_cache = True
self._get_instance()
# Running instance should not have been cached.
self.assertEqual([], self._store.read_cached_data_names())
def test_get_jobs_empty(self):
self.assertEqual([],
self._data_builder.get_jobs('does_not_exist',
'does_not_exist'))
def test_get_jobs(self):
self._add_tokens()
jobs = self._data_builder.get_jobs('workflow_0', 'instance_0')
self.assertEqual(2, len(jobs))
for job in jobs:
self.assertEqual('workflow_0', job.workflow)
self.assertEqual('instance_0', job.instance)
self.assertEqual('ShellJob', job.job_type)
self.assertTrue(job.info.startswith('command=some command'))
self.assertEqual(Status.FAILURE, job.status)
self.assertEqual([(0, ''), (1, 'SUCCESS'), (9, 'FAILURE')],
jobs[0].progress)
self.assertEqual([(89, ''), (1, 'SUCCESS'), (9, 'FAILURE')],
jobs[1].progress)
def test_get_executions_empty(self):
self.assertEqual([],
self._data_builder.get_executions('does_not_exist',
'does_not_exist',
'does_not_exist'))
def test_get_executions(self):
self._add_tokens()
executions = self._data_builder.get_executions('workflow_0',
'instance_0',
'job_0')
self.assertEqual(2, len(executions))
exit_codes = [0, 1]
for execution in executions:
self.assertEqual('workflow_0', execution.workflow)
self.assertEqual('instance_0', execution.instance)
self.assertEqual('job_0', execution.job)
self.assertTrue(execution.info.startswith('some_command'))
exit_codes.remove(execution.exit_code)
self.assertEqual(2, len(execution.logs))
def test_get_executions_across_instances_empty(self):
self.assertEqual([],
self._data_builder.get_executions_across_instances(
'does_not_exist',
'does_not_exist'))
def test_get_executions_across_instances(self):
self._add_tokens()
executions = self._data_builder.get_executions_across_instances(
'workflow_0', 'job_0')
self.assertEqual(2 * 2, len(executions))
exit_codes = [0, 0, 1, 1]
示例3: Worker
# 需要导入模块: from pinball.ui.data_builder import DataBuilder [as 别名]
# 或者: from pinball.ui.data_builder.DataBuilder import get_execution [as 别名]
#.........这里部分代码省略.........
return True
def _execute_job(self):
"""Execute the owned job."""
assert self._owned_job_token
job = pickle.loads(self._owned_job_token.data)
name = Name.from_job_token_name(self._owned_job_token.name)
self._executor = JobExecutor.from_job(name.workflow,
name.instance,
name.job,
job,
self._data_builder,
self._emailer)
success = self._executor.prepare()
if success:
self._owned_job_token.data = pickle.dumps(self._executor.job)
success = self._update_owned_job_token()
if success:
self._start_renew_ownership()
success = self._executor.execute()
self._stop_renew_ownership()
if success:
self._move_job_token_to_waiting(self._executor.job, True)
elif self._executor.job.retry():
self._keep_job_token_in_runnable(self._executor.job)
else:
signaller = Signaller(self._client, name.workflow, name.instance)
# If ARCHIVE is not set, this is the first failed job in the
# workflow.
first_failure = not signaller.is_action_set(Signal.ARCHIVE)
self._move_job_token_to_waiting(self._executor.job, False)
self._send_job_failure_emails(first_failure)
self._executor = None
self._owned_job_token = None
# If needed, archive the workflow.
self._process_signals(name.workflow, name.instance)
def _send_instance_end_email(self, workflow, instance):
try:
schedule_data = self._data_builder.get_schedule(workflow)
if not schedule_data:
LOG.warning('no schedule found for workflow %s', workflow)
elif schedule_data.emails:
instance_data = self._data_builder.get_instance(workflow,
instance)
jobs_data = self._data_builder.get_jobs(workflow, instance)
self._emailer.send_instance_end_message(schedule_data.emails,
instance_data,
jobs_data)
except:
LOG.exception('error sending instance end email for workflow %s '
'instance %s', workflow, instance)
def _send_job_failure_emails(self, first_failure):
assert self._owned_job_token
name = Name.from_job_token_name(self._owned_job_token.name)
job = self._executor.job
emails = set(job.emails)
if first_failure:
schedule_data = self._data_builder.get_schedule(name.workflow)
if schedule_data:
emails.update(schedule_data.emails)
else:
LOG.warning('no schedule found for workflow %s', name.workflow)
if emails:
execution = len(job.history) - 1
job_execution_data = self._data_builder.get_execution(
name.workflow, name.instance, name.job, execution)
try:
self._emailer.send_job_execution_end_message(
list(emails), job_execution_data)
except:
LOG.exception('error sending job failure email for '
'workflow %s instance %s job %s execution %d',
name.workflow,
name.instance,
name.job,
execution)
@staticmethod
def _randomized_worker_polling_time():
"""Generate random worker polling time."""
return (1.0 + random.random()) * PinballConfig.WORKER_POLL_TIME_SEC
def run(self):
"""Run the worker."""
LOG.info('Running worker ' + self._name)
while True:
signaller = Signaller(self._client)
if signaller.is_action_set(Signal.EXIT):
return
if not signaller.is_action_set(Signal.DRAIN):
self._own_runnable_job_token()
if self._owned_job_token:
self._execute_job()
elif self._test_only_end_if_no_runnable:
return
else:
time.sleep(Worker._randomized_worker_polling_time())
LOG.info('Exiting worker ' + self._name)