本文整理汇总了Python中taskflow.openstack.common.jsonutils.dumps函数的典型用法代码示例。如果您正苦于以下问题:Python dumps函数的具体用法?Python dumps怎么用?Python dumps使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了dumps函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _update_flow_details
def _update_flow_details(self, fd, txn, create_missing=False):
# Determine whether the desired data exists or not
fd_path = paths.join(self.flow_path, fd.uuid)
try:
fd_data, _zstat = self._client.get(fd_path)
except k_exc.NoNodeError:
# Not-existent: create or raise exception
if create_missing:
txn.create(fd_path)
e_fd = logbook.FlowDetail(name=fd.name, uuid=fd.uuid)
else:
raise exc.NotFound("No flow details found with id: %s"
% fd.uuid)
else:
# Existent: read it out
e_fd = logbook.FlowDetail.from_dict(misc.decode_json(fd_data))
# Update and write it back
e_fd = e_fd.merge(fd)
fd_data = e_fd.to_dict()
txn.set_data(fd_path, misc.binary_encode(jsonutils.dumps(fd_data)))
for ad in fd:
ad_path = paths.join(fd_path, ad.uuid)
# NOTE(harlowja): create an entry in the flow detail path
# for the provided atom detail so that a reference exists
# from the flow detail to its atom details.
if not self._client.exists(ad_path):
txn.create(ad_path)
e_fd.add(self._update_atom_details(ad, txn, create_missing=True))
return e_fd
示例2: _update_atom_details
def _update_atom_details(self, ad, txn, create_missing=False):
# Determine whether the desired data exists or not.
ad_path = paths.join(self.atom_path, ad.uuid)
e_ad = None
try:
ad_data, _zstat = self._client.get(ad_path)
except k_exc.NoNodeError:
# Not-existent: create or raise exception.
raise exc.NotFound("No atom details found with id: %s" % ad.uuid)
else:
# Existent: read it out.
try:
ad_data = misc.decode_json(ad_data)
ad_cls = logbook.atom_detail_class(ad_data['type'])
e_ad = ad_cls.from_dict(ad_data['atom'])
except KeyError:
pass
# Update and write it back
if e_ad:
e_ad = e_ad.merge(ad)
else:
e_ad = ad
ad_data = base._format_atom(e_ad)
txn.set_data(ad_path,
misc.binary_encode(jsonutils.dumps(ad_data)))
return e_ad
示例3: format
def format(self, record):
message = {'message': record.getMessage(),
'asctime': self.formatTime(record, self.datefmt),
'name': record.name,
'msg': record.msg,
'args': record.args,
'levelname': record.levelname,
'levelno': record.levelno,
'pathname': record.pathname,
'filename': record.filename,
'module': record.module,
'lineno': record.lineno,
'funcname': record.funcName,
'created': record.created,
'msecs': record.msecs,
'relative_created': record.relativeCreated,
'thread': record.thread,
'thread_name': record.threadName,
'process_name': record.processName,
'process': record.process,
'traceback': None}
if hasattr(record, 'extra'):
message['extra'] = record.extra
if record.exc_info:
message['traceback'] = self.formatException(record.exc_info)
return jsonutils.dumps(message)
示例4: _save_flow_details
def _save_flow_details(self, flow_detail, ignore_missing):
# See if we have an existing flow detail to merge with.
e_fd = None
try:
e_fd = self._get_flow_details(flow_detail.uuid, lock=False)
except EnvironmentError:
if not ignore_missing:
raise exc.NotFound("No flow details found with id: %s"
% flow_detail.uuid)
if e_fd is not None:
e_fd = p_utils.flow_details_merge(e_fd, flow_detail)
for td in flow_detail:
if e_fd.find(td.uuid) is None:
e_fd.add(td)
flow_detail = e_fd
flow_path = os.path.join(self._flow_path, flow_detail.uuid)
misc.ensure_tree(flow_path)
self._write_to(
os.path.join(flow_path, 'metadata'),
jsonutils.dumps(p_utils.format_flow_detail(flow_detail)))
if len(flow_detail):
task_path = os.path.join(flow_path, 'tasks')
misc.ensure_tree(task_path)
self._run_with_process_lock('task',
self._save_tasks_and_link,
list(flow_detail), task_path)
return flow_detail
示例5: _save_logbook
def _save_logbook(self, book):
# See if we have an existing logbook to merge with.
e_lb = None
try:
e_lb = self._get_logbook(book.uuid)
except exc.NotFound:
pass
if e_lb is not None:
e_lb = p_utils.logbook_merge(e_lb, book)
for fd in book:
if e_lb.find(fd.uuid) is None:
e_lb.add(fd)
book = e_lb
book_path = os.path.join(self._book_path, book.uuid)
misc.ensure_tree(book_path)
created_at = None
if e_lb is not None:
created_at = e_lb.created_at
self._write_to(os.path.join(book_path, 'metadata'), jsonutils.dumps(
p_utils.format_logbook(book, created_at=created_at)))
if len(book):
flow_path = os.path.join(book_path, 'flows')
misc.ensure_tree(flow_path)
self._run_with_process_lock('flow',
self._save_flows_and_link,
list(book), flow_path)
return book
示例6: _create_logbook
def _create_logbook(lb_path, txn):
lb_data = p_utils.format_logbook(lb, created_at=None)
txn.create(lb_path, misc.binary_encode(jsonutils.dumps(lb_data)))
for fd in lb:
# NOTE(harlowja): create an entry in the logbook path
# for the provided flow detail so that a reference exists
# from the logbook to its flow details.
txn.create(paths.join(lb_path, fd.uuid))
fd_path = paths.join(self.flow_path, fd.uuid)
fd_data = jsonutils.dumps(p_utils.format_flow_detail(fd))
txn.create(fd_path, misc.binary_encode(fd_data))
for td in fd:
# NOTE(harlowja): create an entry in the flow detail path
# for the provided task detail so that a reference exists
# from the flow detail to its task details.
txn.create(paths.join(fd_path, td.uuid))
td_path = paths.join(self.task_path, td.uuid)
td_data = jsonutils.dumps(p_utils.format_task_detail(td))
txn.create(td_path, misc.binary_encode(td_data))
return lb
示例7: _create_logbook
def _create_logbook(lb_path, txn):
lb_data = lb.to_dict(marshal_time=True)
txn.create(lb_path, misc.binary_encode(jsonutils.dumps(lb_data)))
for fd in lb:
# NOTE(harlowja): create an entry in the logbook path
# for the provided flow detail so that a reference exists
# from the logbook to its flow details.
txn.create(paths.join(lb_path, fd.uuid))
fd_path = paths.join(self.flow_path, fd.uuid)
fd_data = jsonutils.dumps(fd.to_dict())
txn.create(fd_path, misc.binary_encode(fd_data))
for ad in fd:
# NOTE(harlowja): create an entry in the flow detail path
# for the provided atom detail so that a reference exists
# from the flow detail to its atom details.
txn.create(paths.join(fd_path, ad.uuid))
ad_path = paths.join(self.atom_path, ad.uuid)
ad_data = base._format_atom(ad)
txn.create(ad_path,
misc.binary_encode(jsonutils.dumps(ad_data)))
return lb
示例8: _format_job
def _format_job(self, job):
posting = {
'uuid': job.uuid,
'name': job.name,
}
if job.details is not None:
posting['details'] = job.details
if job.book is not None:
posting['book'] = {
'name': job.book.name,
'uuid': job.book.uuid,
}
return misc.binary_encode(jsonutils.dumps(posting))
示例9: _update_logbook
def _update_logbook(lb_path, lb_data, txn):
e_lb = p_utils.unformat_logbook(lb.uuid, misc.decode_json(lb_data))
e_lb = p_utils.logbook_merge(e_lb, lb)
lb_data = p_utils.format_logbook(e_lb, created_at=lb.created_at)
txn.set_data(lb_path, misc.binary_encode(jsonutils.dumps(lb_data)))
for fd in lb:
fd_path = paths.join(lb_path, fd.uuid)
if not self._client.exists(fd_path):
# NOTE(harlowja): create an entry in the logbook path
# for the provided flow detail so that a reference exists
# from the logbook to its flow details.
txn.create(fd_path)
e_fd = self._update_flow_details(fd, txn, create_missing=True)
e_lb.add(e_fd)
return e_lb
示例10: _save_task_details
def _save_task_details(self, task_detail, ignore_missing):
# See if we have an existing task detail to merge with.
e_td = None
try:
e_td = self._get_task_details(task_detail.uuid, lock=False)
except EnvironmentError:
if not ignore_missing:
raise exc.NotFound("No task details found with id: %s"
% task_detail.uuid)
if e_td is not None:
task_detail = p_utils.task_details_merge(e_td, task_detail)
td_path = os.path.join(self._task_path, task_detail.uuid)
td_data = p_utils.format_task_detail(task_detail)
self._write_to(td_path, jsonutils.dumps(td_data))
return task_detail
示例11: _update_logbook
def _update_logbook(lb_path, lb_data, txn):
e_lb = logbook.LogBook.from_dict(misc.decode_json(lb_data),
unmarshal_time=True)
e_lb = e_lb.merge(lb)
lb_data = e_lb.to_dict(marshal_time=True)
txn.set_data(lb_path, misc.binary_encode(jsonutils.dumps(lb_data)))
for fd in lb:
fd_path = paths.join(lb_path, fd.uuid)
if not self._client.exists(fd_path):
# NOTE(harlowja): create an entry in the logbook path
# for the provided flow detail so that a reference exists
# from the logbook to its flow details.
txn.create(fd_path)
e_fd = self._update_flow_details(fd, txn, create_missing=True)
e_lb.add(e_fd)
return e_lb
示例12: claim
def claim(self, job, who):
def _unclaimable_try_find_owner(cause):
try:
owner = self.find_owner(job)
except Exception:
owner = None
if owner:
msg = "Job %s already claimed by '%s'" % (job.uuid, owner)
else:
msg = "Job %s already claimed" % (job.uuid)
return excp.UnclaimableJob(msg, cause)
_check_who(who)
with self._wrap(job.uuid, job.path, "Claiming failure: %s"):
# NOTE(harlowja): post as json which will allow for future changes
# more easily than a raw string/text.
value = jsonutils.dumps({
'owner': who,
})
# Ensure the target job is still existent (at the right version).
job_data, job_stat = self._client.get(job.path)
txn = self._client.transaction()
# This will abort (and not create the lock) if the job has been
# removed (somehow...) or updated by someone else to a different
# version...
txn.check(job.path, version=job_stat.version)
txn.create(job.lock_path, value=misc.binary_encode(value),
ephemeral=True)
try:
kazoo_utils.checked_commit(txn)
except k_exceptions.NodeExistsError as e:
raise _unclaimable_try_find_owner(e)
except kazoo_utils.KazooTransactionException as e:
if len(e.failures) < 2:
raise
else:
if isinstance(e.failures[0], k_exceptions.NoNodeError):
raise excp.NotFound(
"Job %s not found to be claimed" % job.uuid,
e.failures[0])
if isinstance(e.failures[1], k_exceptions.NodeExistsError):
raise _unclaimable_try_find_owner(e.failures[1])
else:
raise excp.UnclaimableJob(
"Job %s claim failed due to transaction"
" not succeeding" % (job.uuid), e)
示例13: post
def post(self, name, book, details=None):
def format_posting(job_uuid):
posting = {
'uuid': job_uuid,
'name': name,
}
if details:
posting['details'] = details
else:
posting['details'] = {}
if book is not None:
posting['book'] = {
'name': book.name,
'uuid': book.uuid,
}
return posting
# NOTE(harlowja): Jobs are not ephemeral, they will persist until they
# are consumed (this may change later, but seems safer to do this until
# further notice).
job_uuid = uuidutils.generate_uuid()
with self._wrap(job_uuid, None,
"Posting failure: %s", ensure_known=False):
job_posting = format_posting(job_uuid)
job_posting = misc.binary_encode(jsonutils.dumps(job_posting))
job_path = self._client.create(self._job_base,
value=job_posting,
sequence=True,
ephemeral=False)
job = ZookeeperJob(name, self, self._client,
self._persistence, job_path,
book=book, details=details,
uuid=job_uuid)
self._job_cond.acquire()
try:
self._known_jobs[job_path] = job
self._job_cond.notify_all()
finally:
self._job_cond.release()
self._emit(jobboard.POSTED, details={'job': job})
return job
示例14: test_posting_owner_lost
def test_posting_owner_lost(self):
with base.connect_close(self.board):
with base.flush(self.client):
j = self.board.post('test', p_utils.temporary_log_book())
self.assertEqual(states.UNCLAIMED, j.state)
with base.flush(self.client):
self.board.claim(j, self.board.name)
self.assertEqual(states.CLAIMED, j.state)
# Forcefully delete the owner from the backend storage to make
# sure the job becomes unclaimed (this may happen if some admin
# manually deletes the lock).
paths = list(six.iteritems(self.client.storage.paths))
for (path, value) in paths:
if path in self.bad_paths:
continue
if path.endswith('lock'):
value['data'] = misc.binary_encode(jsonutils.dumps({}))
self.assertEqual(states.UNCLAIMED, j.state)
示例15: claim
def claim(self, job, who):
_check_who(who)
with self._wrap(job.uuid, job.path, "Claiming failure: %s"):
# NOTE(harlowja): post as json which will allow for future changes
# more easily than a raw string/text.
value = jsonutils.dumps({
'owner': who,
})
try:
self._client.create(job.lock_path,
value=misc.binary_encode(value),
ephemeral=True)
except k_exceptions.NodeExistsException:
# Try to see if we can find who the owner really is...
try:
owner = self.find_owner(job)
except Exception:
owner = None
if owner:
msg = "Job %s already claimed by '%s'" % (job.uuid, owner)
else:
msg = "Job %s already claimed" % (job.uuid)
raise excp.UnclaimableJob(msg)