本文整理汇总了Python中celery.datastructures.LocalCache类的典型用法代码示例。如果您正苦于以下问题:Python LocalCache类的具体用法?Python LocalCache怎么用?Python LocalCache使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了LocalCache类的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, callback=None,
max_workers_in_memory=5000, max_tasks_in_memory=10000):
self.workers = LocalCache(max_workers_in_memory)
self.tasks = LocalCache(max_tasks_in_memory)
self.event_callback = callback
self.group_handlers = {"worker": self.worker_event,
"task": self.task_event}
示例2: test_expires
def test_expires(self):
limit = 100
x = LocalCache(limit=limit)
slots = list(range(limit * 2))
for i in slots:
x[i] = i
self.assertListEqual(x.keys(), slots[limit:])
示例3: DummyClient
class DummyClient(object):
def __init__(self, *args, **kwargs):
self.cache = LocalCache(5000)
def get(self, key, *args, **kwargs):
return self.cache.get(key)
def set(self, key, value, *args, **kwargs):
self.cache[key] = value
def delete(self, key, *args, **kwargs):
self.cache.pop(key, None)
示例4: DummyClient
class DummyClient(object):
def __init__(self, *args, **kwargs):
self.cache = LocalCache(5000)
def get(self, key, *args, **kwargs):
return self.cache.get(key)
def get_multi(self, keys):
cache = self.cache
return dict((k, cache[k]) for k in keys if k in cache)
def set(self, key, value, *args, **kwargs):
self.cache[key] = value
def delete(self, key, *args, **kwargs):
self.cache.pop(key, None)
示例5: __init__
def __init__(self, *args, **kwargs):
super(BaseDictBackend, self).__init__(*args, **kwargs)
self._cache = LocalCache(limit=kwargs.get("max_cached_results") or
self.app.conf.CELERY_MAX_CACHED_RESULTS)
示例6: BaseDictBackend
class BaseDictBackend(BaseBackend):
def __init__(self, *args, **kwargs):
super(BaseDictBackend, self).__init__(*args, **kwargs)
self._cache = LocalCache(limit=kwargs.get("max_cached_results") or
self.app.conf.CELERY_MAX_CACHED_RESULTS)
def store_result(self, task_id, result, status, traceback=None):
"""Store task result and status."""
result = self.encode_result(result, status)
return self._store_result(task_id, result, status, traceback)
def forget(self, task_id):
self._cache.pop(task_id, None)
self._forget(task_id)
def get_status(self, task_id):
"""Get the status of a task."""
return self.get_task_meta(task_id)["status"]
def get_traceback(self, task_id):
"""Get the traceback for a failed task."""
return self.get_task_meta(task_id).get("traceback")
def get_result(self, task_id):
"""Get the result of a task."""
meta = self.get_task_meta(task_id)
if meta["status"] in self.EXCEPTION_STATES:
return self.exception_to_python(meta["result"])
else:
return meta["result"]
def get_task_meta(self, task_id, cache=True):
if cache and task_id in self._cache:
return self._cache[task_id]
meta = self._get_task_meta_for(task_id)
if cache and meta.get("status") == states.SUCCESS:
self._cache[task_id] = meta
return meta
def reload_task_result(self, task_id):
self._cache[task_id] = self.get_task_meta(task_id, cache=False)
def reload_taskset_result(self, taskset_id):
self._cache[taskset_id] = self.get_taskset_meta(taskset_id,
cache=False)
def get_taskset_meta(self, taskset_id, cache=True):
if cache and taskset_id in self._cache:
return self._cache[taskset_id]
meta = self._restore_taskset(taskset_id)
if cache and meta is not None:
self._cache[taskset_id] = meta
return meta
def restore_taskset(self, taskset_id, cache=True):
"""Get the result for a taskset."""
meta = self.get_taskset_meta(taskset_id, cache=cache)
if meta:
return meta["result"]
def save_taskset(self, taskset_id, result):
"""Store the result of an executed taskset."""
return self._save_taskset(taskset_id, result)
示例7: State
class State(object):
"""Records clusters state."""
event_count = 0
task_count = 0
def __init__(self, callback=None,
max_workers_in_memory=5000, max_tasks_in_memory=10000):
self.workers = LocalCache(max_workers_in_memory)
self.tasks = LocalCache(max_tasks_in_memory)
self.event_callback = callback
self.group_handlers = {"worker": self.worker_event,
"task": self.task_event}
self._mutex = Lock()
def freeze_while(self, fun, *args, **kwargs):
clear_after = kwargs.pop("clear_after", False)
self._mutex.acquire()
try:
return fun(*args, **kwargs)
finally:
if clear_after:
self._clear()
self._mutex.release()
def clear_tasks(self, ready=True):
self._mutex.acquire()
try:
return self._clear_tasks(ready)
finally:
self._mutex.release()
def _clear_tasks(self, ready=True):
if ready:
self.tasks = dict((uuid, task)
for uuid, task in self.tasks.items()
if task.state not in states.READY_STATES)
else:
self.tasks.clear()
def _clear(self, ready=True):
self.workers.clear()
self._clear_tasks(ready)
self.event_count = 0
self.task_count = 0
def clear(self, ready=True):
self._mutex.acquire()
try:
return self._clear(ready)
finally:
self._mutex.release()
def get_or_create_worker(self, hostname, **kwargs):
"""Get or create worker by hostname."""
try:
worker = self.workers[hostname]
worker.update(kwargs)
except KeyError:
worker = self.workers[hostname] = Worker(
hostname=hostname, **kwargs)
return worker
def get_or_create_task(self, uuid):
"""Get or create task by uuid."""
try:
return self.tasks[uuid]
except KeyError:
task = self.tasks[uuid] = Task(uuid=uuid)
return task
def worker_event(self, type, fields):
"""Process worker event."""
hostname = fields.pop("hostname", None)
if hostname:
worker = self.get_or_create_worker(hostname)
handler = getattr(worker, "on_%s" % type, None)
if handler:
handler(**fields)
def task_event(self, type, fields):
"""Process task event."""
uuid = fields.pop("uuid")
hostname = fields.pop("hostname")
worker = self.get_or_create_worker(hostname)
task = self.get_or_create_task(uuid)
handler = getattr(task, "on_%s" % type, None)
if type == "received":
self.task_count += 1
if handler:
handler(**fields)
task.worker = worker
def event(self, event):
self._mutex.acquire()
try:
return self._dispatch_event(event)
finally:
self._mutex.release()
def _dispatch_event(self, event):
#.........这里部分代码省略.........
示例8: State
class State(object):
"""Records clusters state."""
event_count = 0
task_count = 0
_buffering = False
buffer = deque()
frozen = False
def __init__(self, callback=None,
max_workers_in_memory=5000, max_tasks_in_memory=10000):
self.workers = LocalCache(max_workers_in_memory)
self.tasks = LocalCache(max_tasks_in_memory)
self.event_callback = callback
self.group_handlers = {"worker": self.worker_event,
"task": self.task_event}
self._resource = RLock()
def freeze(self, buffer=True):
"""Stop recording the event stream.
:keyword buffer: If true, any events received while frozen
will be buffered, you can use ``thaw(replay=True)`` to apply
this buffer. :meth:`thaw` will clear the buffer and resume
recording the stream.
"""
self._buffering = buffer
self.frozen = True
def _replay(self):
while self.buffer:
try:
event = self.buffer.popleft()
except IndexError:
pass
self._dispatch_event(event)
def thaw(self, replay=True):
"""Resume recording of the event stream.
:keyword replay: Will replay buffered events received while
the stream was frozen.
This will always clear the buffer, deleting any events collected
while the stream was frozen.
"""
self._buffering = False
try:
if replay:
self._replay()
else:
self.buffer.clear()
finally:
self.frozen = False
def freeze_while(self, fun, *args, **kwargs):
self.freeze()
try:
return fun(*args, **kwargs)
finally:
self.thaw(replay=True)
def clear_tasks(self, ready=True):
if ready:
self.tasks = dict((uuid, task)
for uuid, task in self.tasks.items()
if task.state not in states.READY_STATES)
else:
self.tasks.clear()
def clear(self, ready=True):
try:
self.workers.clear()
self.clear_tasks(ready)
self.event_count = 0
self.task_count = 0
finally:
pass
def get_or_create_worker(self, hostname, **kwargs):
"""Get or create worker by hostname."""
try:
worker = self.workers[hostname]
worker.update(kwargs)
except KeyError:
worker = self.workers[hostname] = Worker(
hostname=hostname, **kwargs)
return worker
def get_or_create_task(self, uuid):
"""Get or create task by uuid."""
try:
return self.tasks[uuid]
except KeyError:
task = self.tasks[uuid] = Task(uuid=uuid)
return task
def worker_event(self, type, fields):
"""Process worker event."""
#.........这里部分代码省略.........
示例9: __init__
def __init__(self, *args, **kwargs):
self.cache = LocalCache(5000)
示例10: State
class State(object):
"""Represents a snapshot of a clusters state."""
event_count = 0
task_count = 0
def __init__(self, callback=None,
max_workers_in_memory=5000, max_tasks_in_memory=10000):
self.workers = LocalCache(max_workers_in_memory)
self.tasks = LocalCache(max_tasks_in_memory)
self.event_callback = callback
self.group_handlers = {"worker": self.worker_event,
"task": self.task_event}
def get_or_create_worker(self, hostname, **kwargs):
"""Get or create worker by hostname."""
try:
worker = self.workers[hostname]
worker.update(kwargs)
except KeyError:
worker = self.workers[hostname] = Worker(
hostname=hostname, **kwargs)
return worker
def get_or_create_task(self, uuid, **kwargs):
"""Get or create task by uuid."""
try:
task = self.tasks[uuid]
task.update(kwargs)
except KeyError:
task = self.tasks[uuid] = Task(uuid=uuid, **kwargs)
return task
def worker_event(self, type, fields):
"""Process worker event."""
hostname = fields.pop("hostname")
worker = self.get_or_create_worker(hostname)
handler = getattr(worker, "on_%s" % type)
if handler:
handler(**fields)
def task_event(self, type, fields):
"""Process task event."""
uuid = fields.pop("uuid")
hostname = fields.pop("hostname")
worker = self.get_or_create_worker(hostname)
task = self.get_or_create_task(uuid)
handler = getattr(task, "on_%s" % type)
if type == "received":
self.task_count += 1
if handler:
handler(**fields)
task.worker = worker
def event(self, event):
"""Process event."""
self.event_count += 1
event = kwdict(event)
group, _, type = partition(event.pop("type"), "-")
self.group_handlers[group](type, event)
if self.event_callback:
self.event_callback(self, event)
def tasks_by_timestamp(self):
"""Get tasks by timestamp.
Returns a list of ``(uuid, task)`` tuples.
"""
return self._sort_tasks_by_time(self.tasks.items())
def _sort_tasks_by_time(self, tasks):
"""Sort task items by time."""
return sorted(tasks, key=lambda t: t[1].timestamp, reverse=True)
def tasks_by_type(self, name):
"""Get all tasks by type.
Returns a list of ``(uuid, task)`` tuples.
"""
return self._sort_tasks_by_time([(uuid, task)
for uuid, task in self.tasks.items()
if task.name == name])
def tasks_by_worker(self, hostname):
"""Get all tasks by worker.
Returns a list of ``(uuid, task)`` tuples.
"""
return self._sort_tasks_by_time([(uuid, task)
for uuid, task in self.tasks.items()
if task.worker.hostname == hostname])
def task_types(self):
"""Returns a list of all seen task types."""
return list(set(task.name for task in self.tasks.values()))
def alive_workers(self):
"""Returns a list of (seemingly) alive workers."""
#.........这里部分代码省略.........