本文整理汇总了Python中pinball.ui.data_builder.DataBuilder类的典型用法代码示例。如果您正苦于以下问题:Python DataBuilder类的具体用法?Python DataBuilder怎么用?Python DataBuilder使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了DataBuilder类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: is_failed
def is_failed(self, store):
data_builder = DataBuilder(store, use_cache=True)
workflow_data = data_builder.get_workflow(self.workflow)
if not workflow_data:
return False
return (workflow_data.status != Status.RUNNING and
workflow_data.status != Status.SUCCESS)
示例2: _compute_workflow
def _compute_workflow(dbstore):
"""Cache thread's target callable that computes the workflow.
This runnable is called my thread's run() method when thread
starts. It will compute workflows data, serialize it, and store it
in _WORKFLOW_JSON. This computation will infinitely
repeat itself, constantly updating the _WORKFLOW_JSON until pinball_ui
server stops.
Args:
dbstore: The store to retrieve runs status.
"""
global _WORKFLOWS_JSON
data_builder = DataBuilder(dbstore, use_cache=True)
while True:
try:
LOG.info("Workflow data computation starting.")
workflows_data = data_builder.get_workflows()
schedules_data = data_builder.get_schedules()
_WORKFLOWS_JSON = _serialize(workflows_data, schedules_data)
LOG.info("Workflow data computation complete.")
# TODO(mao): Tune this parameter depending on future
# pinball user experience.
# TODO(mao): Make this computation run at scheduled time intervals
# and cancel the next execution if the previous job hasn't
# finished.
time.sleep(60 * 20)
except Exception as e:
LOG.exception(e)
示例3: get_context_data
def get_context_data(self, **kwargs):
context = super(TokenView, self).get_context_data(**kwargs)
token_name = self.request.GET['path']
data_builder = DataBuilder(DbStore())
token_data = data_builder.get_token(token_name)
token_format = token_data.format()
for key, value in token_format.items():
context[key] = value
return context
示例4: schedules
def schedules(_):
try:
data_builder = DataBuilder(DbStore())
schedules_data = data_builder.get_schedules()
schedules_json = _serialize(schedules_data)
except:
LOG.exception('')
return HttpResponseServerError(traceback.format_exc())
else:
return HttpResponse(schedules_json, mimetype='application/json')
示例5: token_paths
def token_paths(request):
try:
path = request.GET['path']
data_builder = DataBuilder(DbStore())
tokens_data = data_builder.get_token_paths(path)
tokens_json = _serialize(tokens_data)
except:
LOG.exception('')
return HttpResponseServerError(traceback.format_exc())
else:
return HttpResponse(tokens_json, mimetype='application/json')
示例6: instances
def instances(request):
try:
workflow = request.GET['workflow']
data_builder = DataBuilder(DbStore(), use_cache=True)
instances_data = data_builder.get_instances(workflow)
instances_json = _serialize(instances_data)
except:
LOG.exception('')
return HttpResponseServerError(traceback.format_exc())
else:
return HttpResponse(instances_json, mimetype='application/json')
示例7: jobs
def jobs(request):
try:
data_builder = DataBuilder(DbStore(), use_cache=True)
workflow = request.GET['workflow']
instance = request.GET['instance']
if instance == 'latest':
instance = data_builder.get_latest_instance(workflow).instance
jobs_data = data_builder.get_jobs(workflow, instance)
jobs_json = _serialize(jobs_data)
except:
LOG.exception('')
return HttpResponseServerError(traceback.format_exc())
else:
return HttpResponse(jobs_json, mimetype='application/json')
示例8: _get_running_instances
def _get_running_instances(self, store):
"""Find running instances of the workflow.
Args:
store: The store to query for wokflow instance status.
Returns:
List of running workflow instance names.
"""
data_builder = DataBuilder(store, use_cache=True)
instances = data_builder.get_instances(self.workflow)
result = []
for instance in instances:
if instance.status == Status.RUNNING:
result.append(instance.instance)
return result
示例9: file_content
def file_content(request):
try:
workflow = request.GET['workflow']
instance = request.GET['instance']
job = request.GET['job']
execution = int(request.GET['execution'])
log_type = request.GET['log_type']
if execution < 0:
return HttpResponseServerError(
'execution must not be negative; got ' + execution)
data_builder = DataBuilder(DbStore())
file_data = data_builder.get_file_content(workflow, instance, job,
execution, log_type)
except:
LOG.exception('')
return HttpResponseServerError(traceback.format_exc())
else:
return HttpResponse(file_data, mimetype='text/plain')
示例10: executions
def executions(request):
try:
workflow = request.GET['workflow']
instance = request.GET.get('instance')
job = request.GET['job']
data_builder = DataBuilder(DbStore())
if instance:
executions_data = data_builder.get_executions(workflow,
instance,
job)
else:
executions_data = data_builder.get_executions_across_instances(
workflow, job)
executions_json = _serialize(executions_data)
except:
LOG.exception('')
return HttpResponseServerError(traceback.format_exc())
else:
return HttpResponse(executions_json, mimetype='application/json')
示例11: graph
def graph(request):
try:
data_builder = DataBuilder(DbStore(), use_cache=True)
workflow = request.GET['workflow']
if 'instance' in request.GET:
instance = request.GET['instance']
if instance == 'latest':
instance = data_builder.get_latest_instance(workflow).instance
jobs_data = data_builder.get_jobs(workflow=workflow,
instance=instance)
instance_data = data_builder.get_instance(workflow=workflow,
instance=instance)
workflow_graph = WorkflowGraph(jobs_data, instance_data)
else:
workflow_graph = WorkflowGraph.from_parser(workflow)
except:
LOG.exception('')
return HttpResponseServerError(traceback.format_exc())
else:
return HttpResponse(workflow_graph.get_svg(), mimetype='image/svg+xml')
示例12: status
def status(request):
try:
workflow = request.GET.get('workflow')
instance = request.GET.get('instance')
data_builder = DataBuilder(DbStore())
status = []
if data_builder.is_signal_set(workflow, instance, Signal.EXIT):
status = ['exiting']
elif data_builder.is_signal_set(workflow, instance, Signal.ABORT):
status = ['aborting']
elif data_builder.is_signal_set(workflow, instance, Signal.DRAIN):
status = ['draining']
if not _is_master_alive():
status.append('no master at %s:%d' % (socket.gethostname(),
PinballConfig.MASTER_PORT))
status_json = json.dumps(status)
except:
LOG.exception('')
return HttpResponseServerError(traceback.format_exc())
else:
return HttpResponse(status_json, mimetype='application/json')
示例13: __init__
def __init__(self, client, store, emailer):
self._client = client
self._emailer = emailer
self._data_builder = DataBuilder(store)
self._owned_job_token = None
self._name = get_unique_name()
self._inspector = Inspector(client)
# The lock synchronizes access to shared attributes between the worker
# thread and the lease renewer thread.
self._lock = threading.Lock()
self._lease_renewer = None
self._executor = None
self._test_only_end_if_no_runnable = False
示例14: test_workflow_data_from_instances_data2
def test_workflow_data_from_instances_data2(self):
wf_instance_list = [
WorkflowInstanceData('wf', '22346', Status.ABORTED, 12355, sys.maxint),
WorkflowInstanceData('wf', '22347', Status.SUCCESS, 12365, 12390),
WorkflowInstanceData('wf', '22345', Status.RUNNING, 12345, None),
]
wf_data = DataBuilder._workflow_data_from_instances_data(
wf_instance_list)
self.assertEquals(wf_data.workflow, 'wf')
self.assertEquals(wf_data.status, Status.RUNNING)
self.assertEquals(wf_data.last_instance, '22345')
self.assertEquals(wf_data.last_start_time, 12345)
self.assertEquals(wf_data.last_end_time, None)
self.assertEquals(wf_data.running_instance_number, 1)
示例15: test_workflow_data_from_instances_data4
def test_workflow_data_from_instances_data4(self):
wf_instance_list = [
WorkflowInstanceData('wf', '22346', Status.ABORTED, 12345, 12392),
WorkflowInstanceData('wf', '22347', Status.SUCCESS, 12346, 12393),
WorkflowInstanceData('wf', '22345', Status.FAILURE, 12391, sys.maxint),
]
wf_data = DataBuilder._workflow_data_from_instances_data(
wf_instance_list)
self.assertEquals(wf_data.workflow, 'wf')
self.assertEquals(wf_data.status, Status.SUCCESS)
self.assertEquals(wf_data.last_instance, '22347')
self.assertEquals(wf_data.last_start_time, 12346)
self.assertEquals(wf_data.last_end_time, 12393)
self.assertEquals(wf_data.running_instance_number, 0)