本文整理汇总了Python中django.core.cache.cache.add方法的典型用法代码示例。如果您正苦于以下问题:Python cache.add方法的具体用法?Python cache.add怎么用?Python cache.add使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类django.core.cache.cache
的用法示例。
在下文中一共展示了cache.add方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: add_cleanup_pod
# 需要导入模块: from django.core.cache import cache [as 别名]
# 或者: from django.core.cache.cache import add [as 别名]
def add_cleanup_pod(url):
"""populate the cleanup pod list"""
# variance allows a pod to stay alive past grace period
variance = random.uniform(0.1, 1.5)
grace = round(settings.KUBERNETES_POD_TERMINATION_GRACE_PERIOD_SECONDS * variance)
# save
pods = cache.get('cleanup_pods', {})
pods[url] = (datetime.utcnow() + timedelta(seconds=grace))
cache.set('cleanup_pods', pods)
# add grace period timestamp
pod = cache.get(url)
grace = settings.KUBERNETES_POD_TERMINATION_GRACE_PERIOD_SECONDS
pd = datetime.utcnow() + timedelta(seconds=grace)
timestamp = str(pd.strftime(MockSchedulerClient.DATETIME_FORMAT))
pod['metadata']['deletionTimestamp'] = timestamp
cache.set(url, pod)
示例2: get_lock
# 需要导入模块: from django.core.cache import cache [as 别名]
# 或者: from django.core.cache.cache import add [as 别名]
def get_lock():
"""Obtém lock para evitar que várias mensagens sejam sobrepostas no log"""
try:
from django.core.cache import cache
cache.default_timeout = 0
if cache._cache and hasattr(cache._cache, 'get_stats'):
stats = cache._cache.get_stats()
else:
stats = []
if stats:
while cache.add('logger_lock', 1, 1) == 0:
time.sleep(0.1)
pass
except ImportError:
dump_file = open('/tmp/networkapi_log_error_dump', 'a')
traceback.print_exc(file=dump_file)
dump_file.close()
pass
示例3: test_unicode
# 需要导入模块: from django.core.cache import cache [as 别名]
# 或者: from django.core.cache.cache import add [as 别名]
def test_unicode(self):
# Unicode values can be cached
stuff = {
"ascii": "ascii_value",
"unicode_ascii": "Iñtërnâtiônàlizætiøn1",
"Iñtërnâtiônàlizætiøn": "Iñtërnâtiônàlizætiøn2",
"ascii2": {"x": 1},
}
# Test `set`
for (key, value) in stuff.items():
cache.set(key, value)
assert cache.get(key) == value
# Test `add`
for (key, value) in stuff.items():
cache.delete(key)
cache.add(key, value)
assert cache.get(key) == value
# Test `set_many`
for key, _value in stuff.items():
cache.delete(key)
cache.set_many(stuff)
for (key, value) in stuff.items():
assert cache.get(key) == value
示例4: test_binary_string
# 需要导入模块: from django.core.cache import cache [as 别名]
# 或者: from django.core.cache.cache import add [as 别名]
def test_binary_string(self):
# Binary strings should be cacheable
from zlib import compress, decompress
value = "value_to_be_compressed"
compressed_value = compress(value.encode())
# Test set
cache.set("binary1", compressed_value)
compressed_result = cache.get("binary1")
assert compressed_value == compressed_result
assert value == decompress(compressed_result).decode()
# Test add
cache.add("binary1-add", compressed_value)
compressed_result = cache.get("binary1-add")
assert compressed_value == compressed_result
assert value == decompress(compressed_result).decode()
# Test set_many
cache.set_many({"binary1-set_many": compressed_value})
compressed_result = cache.get("binary1-set_many")
assert compressed_value == compressed_result
assert value == decompress(compressed_result).decode()
示例5: test_long_timeout
# 需要导入模块: from django.core.cache import cache [as 别名]
# 或者: from django.core.cache.cache import add [as 别名]
def test_long_timeout(self):
"""
Using a timeout greater than 30 days makes memcached think
it is an absolute expiration timestamp instead of a relative
offset. Test that we honour this convention. Refs #12399.
"""
cache.set("key1", "eggs", 60 * 60 * 24 * 30 + 1) # 30 days + 1 second
assert cache.get("key1") == "eggs"
cache.add("key2", "ham", 60 * 60 * 24 * 30 + 1)
assert cache.get("key2") == "ham"
cache.set_many(
{"key3": "sausage", "key4": "lobster bisque"}, 60 * 60 * 24 * 30 + 1
)
assert cache.get("key3") == "sausage"
assert cache.get("key4") == "lobster bisque"
示例6: test_forever_timeout
# 需要导入模块: from django.core.cache import cache [as 别名]
# 或者: from django.core.cache.cache import add [as 别名]
def test_forever_timeout(self):
"""
Passing in None into timeout results in a value that is cached forever
"""
cache.set("key1", "eggs", None)
assert cache.get("key1") == "eggs"
cache.add("key2", "ham", None)
assert cache.get("key2") == "ham"
added = cache.add("key1", "new eggs", None)
assert not added
assert cache.get("key1") == "eggs"
cache.set_many({"key3": "sausage", "key4": "lobster bisque"}, None)
assert cache.get("key3") == "sausage"
assert cache.get("key4") == "lobster bisque"
示例7: exclusive_lock
# 需要导入模块: from django.core.cache import cache [as 别名]
# 或者: from django.core.cache.cache import add [as 别名]
def exclusive_lock(task_identifier):
""" Obtain an exclusively lock, using the task_identifier as a unique ID.
This helps prevents the case of multiple workers executing the same task at
the same time, which can cause unexpected side effects.
"""
# See: https://celery.readthedocs.io/en/latest/tutorials/task-cookbook.html
# Plan to timeout a few seconds before the limit
# (After `LOCK_EXPIRE` seconds have passed, the cache will self-clean)
timeout_at = monotonic() + LOCK_EXPIRE - 3
# Try to add the value to the cache.
# Returns False if already cached (another worker added it already)
# Returns True otherwise (this worker is the first to add the key)
got_lock = cache.add(task_identifier, 'true', LOCK_EXPIRE)
# Yield our ability to obtain a lock, but always cleanup
try:
yield got_lock
finally:
# If `got_lock` was False, we don't own the lock - never clean up
# If we're close to the timeout, just let the cache self-clean
if got_lock and monotonic() < timeout_at:
cache.delete(task_identifier)
示例8: update_discount_sheet_for_participant
# 需要导入模块: from django.core.cache import cache [as 别名]
# 或者: from django.core.cache.cache import add [as 别名]
def update_discount_sheet_for_participant(discount_id, participant_id):
""" Lock the sheet and add/update a single participant.
This task should not run at the same time that we're updating the sheet for
another participant (or for all participants, as we do nightly).
"""
discount = models.Discount.objects.get(pk=discount_id)
participant = models.Participant.objects.get(pk=participant_id)
if settings.DISABLE_GSHEETS:
logger.warning(
"Google Sheets functionality is disabled, not updating " "'%s' for %s",
discount.name,
participant.name,
)
return
member_sheets.update_participant(discount, participant)
示例9: test_discount_tasks_share_same_key
# 需要导入模块: from django.core.cache import cache [as 别名]
# 或者: from django.core.cache.cache import add [as 别名]
def test_discount_tasks_share_same_key(
update_participant, update_discount_sheet, mock_cache
):
""" All tasks modifying the same discount sheet must share a task ID.
This prevents multiple tasks modifying the Google Sheet at the same time.
"""
discount = factories.DiscountFactory.create(pk=8675)
participant = factories.ParticipantFactory.create()
expected_lock_id = 'update_discount-8675'
tasks.update_discount_sheet_for_participant(discount.pk, participant.pk)
mock_cache.add.assert_called_with(expected_lock_id, 'true', 600)
tasks.update_discount_sheet(discount.pk)
mock_cache.add.assert_called_with(expected_lock_id, 'true', 600)
示例10: update_top100_ids
# 需要导入模块: from django.core.cache import cache [as 别名]
# 或者: from django.core.cache.cache import add [as 别名]
def update_top100_ids():
from astrobin.models import Image
LOCK_EXPIRE = 60 * 5 # Lock expires in 5 minutes
lock_id = 'top100_ids_lock'
# cache.add fails if the key already exists
acquire_lock = lambda: cache.add(lock_id, 'true', LOCK_EXPIRE)
# memcache delete is very slow, but we have to use it to take
# advantage of using add() for atomic locking
release_lock = lambda: cache.delete(lock_id)
logger.debug('Building Top100 ids...')
if acquire_lock():
try:
sqs = SearchQuerySet().models(Image).order_by('-likes')
top100_ids = [int(x.pk) for x in sqs][:100]
cache.set('top100_ids', top100_ids, 60 * 60 * 24)
finally:
release_lock()
return
logger.debug(
'Top100 ids task is already being run by another worker')
示例11: _acquire
# 需要导入模块: from django.core.cache import cache [as 别名]
# 或者: from django.core.cache.cache import add [as 别名]
def _acquire(self):
return cache.add(self.key, 'true', self.timeout)
示例12: acquire_googleadwords_lock
# 需要导入模块: from django.core.cache import cache [as 别名]
# 或者: from django.core.cache.cache import add [as 别名]
def acquire_googleadwords_lock(model, idenitier):
# cache.add fails if if the key already exists
return cache.add(get_googleadwords_lock_id(model, idenitier), "true", settings.GOOGLEADWORDS_LOCK_TIMEOUT)
示例13: release_googleadwords_lock
# 需要导入模块: from django.core.cache import cache [as 别名]
# 或者: from django.core.cache.cache import add [as 别名]
def release_googleadwords_lock(model, idenitier):
# memcache delete is very slow, but we have to use it to take
# advantage of using add() for atomic locking
return cache.delete(get_googleadwords_lock_id(model, idenitier))
示例14: cache_lock
# 需要导入模块: from django.core.cache import cache [as 别名]
# 或者: from django.core.cache.cache import add [as 别名]
def cache_lock(lock_id, lock_value):
"""Cache based locking mechanism.
Cache backends `memcached` and `redis` are recommended.
"""
# cache.add fails if the key already exists
status = cache.add(lock_id, lock_value)
try:
yield status
finally:
cache.delete(lock_id)
示例15: single_instance_task
# 需要导入模块: from django.core.cache import cache [as 别名]
# 或者: from django.core.cache.cache import add [as 别名]
def single_instance_task(timeout=3600 * 12):
"""
Decorator that ensures that a celery task is only run once.
Default timeout is 12 hours.
See: http://stackoverflow.com/questions/4095940/running-unique-tasks-with-celery
See: http://ask.github.com/celery/cookbook/tasks.html#ensuring-a-task-is-only-executed-one-at-a-time
.. note::
This only works if all tasks share the same django cache.
"""
def task_exc(func):
def wrapper(*args, **kwargs):
lock_id = "single_instance_task:" + func.__name__
acquire_lock = lambda: cache.add(lock_id, True, timeout)
release_lock = lambda: cache.delete(lock_id)
if acquire_lock():
try:
func(*args, **kwargs)
finally:
try:
release_lock()
except:
pass
else:
logger.info('Task %s already running' % func.__name__)
wrapper.__name__ = func.__name__
return wrapper
return task_exc