当前位置: 首页>>代码示例>>Python>>正文


Python LruCache.pop方法代码示例

本文整理汇总了Python中synapse.util.caches.lrucache.LruCache.pop方法的典型用法代码示例。如果您正苦于以下问题:Python LruCache.pop方法的具体用法?Python LruCache.pop怎么用?Python LruCache.pop使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在synapse.util.caches.lrucache.LruCache的用法示例。


在下文中一共展示了LruCache.pop方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_pop

# 需要导入模块: from synapse.util.caches.lrucache import LruCache [as 别名]
# 或者: from synapse.util.caches.lrucache.LruCache import pop [as 别名]
    def test_pop(self):
        m = Mock()
        cache = LruCache(1)

        cache.set("key", "value", callbacks=[m])
        self.assertFalse(m.called)

        cache.pop("key")
        self.assertEquals(m.call_count, 1)

        cache.set("key", "value")
        self.assertEquals(m.call_count, 1)

        cache.pop("key")
        self.assertEquals(m.call_count, 1)
开发者ID:DoubleMalt,项目名称:synapse,代码行数:17,代码来源:test_lrucache.py

示例2: Cache

# 需要导入模块: from synapse.util.caches.lrucache import LruCache [as 别名]
# 或者: from synapse.util.caches.lrucache.LruCache import pop [as 别名]
class Cache(object):
    __slots__ = (
        "cache",
        "max_entries",
        "name",
        "keylen",
        "thread",
        "metrics",
        "_pending_deferred_cache",
    )

    def __init__(self, name, max_entries=1000, keylen=1, tree=False, iterable=False):
        cache_type = TreeCache if tree else dict
        self._pending_deferred_cache = cache_type()

        self.cache = LruCache(
            max_size=max_entries, keylen=keylen, cache_type=cache_type,
            size_callback=(lambda d: len(d)) if iterable else None,
            evicted_callback=self._on_evicted,
        )

        self.name = name
        self.keylen = keylen
        self.thread = None
        self.metrics = register_cache("cache", name, self.cache)

    def _on_evicted(self, evicted_count):
        self.metrics.inc_evictions(evicted_count)

    def check_thread(self):
        expected_thread = self.thread
        if expected_thread is None:
            self.thread = threading.current_thread()
        else:
            if expected_thread is not threading.current_thread():
                raise ValueError(
                    "Cache objects can only be accessed from the main thread"
                )

    def get(self, key, default=_CacheSentinel, callback=None, update_metrics=True):
        """Looks the key up in the caches.

        Args:
            key(tuple)
            default: What is returned if key is not in the caches. If not
                specified then function throws KeyError instead
            callback(fn): Gets called when the entry in the cache is invalidated
            update_metrics (bool): whether to update the cache hit rate metrics

        Returns:
            Either a Deferred or the raw result
        """
        callbacks = [callback] if callback else []
        val = self._pending_deferred_cache.get(key, _CacheSentinel)
        if val is not _CacheSentinel:
            val.callbacks.update(callbacks)
            if update_metrics:
                self.metrics.inc_hits()
            return val.deferred

        val = self.cache.get(key, _CacheSentinel, callbacks=callbacks)
        if val is not _CacheSentinel:
            self.metrics.inc_hits()
            return val

        if update_metrics:
            self.metrics.inc_misses()

        if default is _CacheSentinel:
            raise KeyError()
        else:
            return default

    def set(self, key, value, callback=None):
        callbacks = [callback] if callback else []
        self.check_thread()
        entry = CacheEntry(
            deferred=value,
            callbacks=callbacks,
        )

        existing_entry = self._pending_deferred_cache.pop(key, None)
        if existing_entry:
            existing_entry.invalidate()

        self._pending_deferred_cache[key] = entry

        def shuffle(result):
            existing_entry = self._pending_deferred_cache.pop(key, None)
            if existing_entry is entry:
                self.cache.set(key, result, entry.callbacks)
            else:
                # oops, the _pending_deferred_cache has been updated since
                # we started our query, so we are out of date.
                #
                # Better put back whatever we took out. (We do it this way
                # round, rather than peeking into the _pending_deferred_cache
                # and then removing on a match, to make the common case faster)
                if existing_entry is not None:
                    self._pending_deferred_cache[key] = existing_entry
#.........这里部分代码省略.........
开发者ID:DoubleMalt,项目名称:synapse,代码行数:103,代码来源:descriptors.py

示例3: DictionaryCache

# 需要导入模块: from synapse.util.caches.lrucache import LruCache [as 别名]
# 或者: from synapse.util.caches.lrucache.LruCache import pop [as 别名]
class DictionaryCache(object):
    """Caches key -> dictionary lookups, supporting caching partial dicts, i.e.
    fetching a subset of dictionary keys for a particular key.
    """

    def __init__(self, name, max_entries=1000):
        self.cache = LruCache(max_size=max_entries)

        self.name = name
        self.sequence = 0
        self.thread = None
        # caches_by_name[name] = self.cache

        class Sentinel(object):
            __slots__ = []

        self.sentinel = Sentinel()
        caches_by_name[name] = self.cache

    def check_thread(self):
        expected_thread = self.thread
        if expected_thread is None:
            self.thread = threading.current_thread()
        else:
            if expected_thread is not threading.current_thread():
                raise ValueError(
                    "Cache objects can only be accessed from the main thread"
                )

    def get(self, key, dict_keys=None):
        entry = self.cache.get(key, self.sentinel)
        if entry is not self.sentinel:
            cache_counter.inc_hits(self.name)

            if dict_keys is None:
                return DictionaryEntry(entry.full, dict(entry.value))
            else:
                return DictionaryEntry(entry.full, {
                    k: entry.value[k]
                    for k in dict_keys
                    if k in entry.value
                })

        cache_counter.inc_misses(self.name)
        return DictionaryEntry(False, {})

    def invalidate(self, key):
        self.check_thread()

        # Increment the sequence number so that any SELECT statements that
        # raced with the INSERT don't update the cache (SYN-369)
        self.sequence += 1
        self.cache.pop(key, None)

    def invalidate_all(self):
        self.check_thread()
        self.sequence += 1
        self.cache.clear()

    def update(self, sequence, key, value, full=False):
        self.check_thread()
        if self.sequence == sequence:
            # Only update the cache if the caches sequence number matches the
            # number that the cache had before the SELECT was started (SYN-369)
            if full:
                self._insert(key, value)
            else:
                self._update_or_insert(key, value)

    def _update_or_insert(self, key, value):
        entry = self.cache.setdefault(key, DictionaryEntry(False, {}))
        entry.value.update(value)

    def _insert(self, key, value):
        self.cache[key] = DictionaryEntry(True, value)
开发者ID:roblabla,项目名称:synapse,代码行数:77,代码来源:dictionary_cache.py

示例4: DictionaryCache

# 需要导入模块: from synapse.util.caches.lrucache import LruCache [as 别名]
# 或者: from synapse.util.caches.lrucache.LruCache import pop [as 别名]
class DictionaryCache(object):
    """Caches key -> dictionary lookups, supporting caching partial dicts, i.e.
    fetching a subset of dictionary keys for a particular key.
    """

    def __init__(self, name, max_entries=1000):
        self.cache = LruCache(max_size=max_entries, size_callback=len)

        self.name = name
        self.sequence = 0
        self.thread = None
        # caches_by_name[name] = self.cache

        class Sentinel(object):
            __slots__ = []

        self.sentinel = Sentinel()
        self.metrics = register_cache(name, self.cache)

    def check_thread(self):
        expected_thread = self.thread
        if expected_thread is None:
            self.thread = threading.current_thread()
        else:
            if expected_thread is not threading.current_thread():
                raise ValueError(
                    "Cache objects can only be accessed from the main thread"
                )

    def get(self, key, dict_keys=None):
        """Fetch an entry out of the cache

        Args:
            key
            dict_key(list): If given a set of keys then return only those keys
                that exist in the cache.

        Returns:
            DictionaryEntry
        """
        entry = self.cache.get(key, self.sentinel)
        if entry is not self.sentinel:
            self.metrics.inc_hits()

            if dict_keys is None:
                return DictionaryEntry(entry.full, entry.known_absent, dict(entry.value))
            else:
                return DictionaryEntry(entry.full, entry.known_absent, {
                    k: entry.value[k]
                    for k in dict_keys
                    if k in entry.value
                })

        self.metrics.inc_misses()
        return DictionaryEntry(False, set(), {})

    def invalidate(self, key):
        self.check_thread()

        # Increment the sequence number so that any SELECT statements that
        # raced with the INSERT don't update the cache (SYN-369)
        self.sequence += 1
        self.cache.pop(key, None)

    def invalidate_all(self):
        self.check_thread()
        self.sequence += 1
        self.cache.clear()

    def update(self, sequence, key, value, full=False, known_absent=None):
        """Updates the entry in the cache

        Args:
            sequence
            key
            value (dict): The value to update the cache with.
            full (bool): Whether the given value is the full dict, or just a
                partial subset there of. If not full then any existing entries
                for the key will be updated.
            known_absent (set): Set of keys that we know don't exist in the full
                dict.
        """
        self.check_thread()
        if self.sequence == sequence:
            # Only update the cache if the caches sequence number matches the
            # number that the cache had before the SELECT was started (SYN-369)
            if known_absent is None:
                known_absent = set()
            if full:
                self._insert(key, value, known_absent)
            else:
                self._update_or_insert(key, value, known_absent)

    def _update_or_insert(self, key, value, known_absent):
        # We pop and reinsert as we need to tell the cache the size may have
        # changed

        entry = self.cache.pop(key, DictionaryEntry(False, set(), {}))
        entry.value.update(value)
        entry.known_absent.update(known_absent)
#.........这里部分代码省略.........
开发者ID:rubo77,项目名称:synapse,代码行数:103,代码来源:dictionary_cache.py

示例5: Cache

# 需要导入模块: from synapse.util.caches.lrucache import LruCache [as 别名]
# 或者: from synapse.util.caches.lrucache.LruCache import pop [as 别名]
class Cache(object):
    __slots__ = (
        "cache",
        "max_entries",
        "name",
        "keylen",
        "sequence",
        "thread",
        "metrics",
    )

    def __init__(self, name, max_entries=1000, keylen=1, tree=False):
        cache_type = TreeCache if tree else dict
        self.cache = LruCache(
            max_size=max_entries, keylen=keylen, cache_type=cache_type
        )

        self.name = name
        self.keylen = keylen
        self.sequence = 0
        self.thread = None
        self.metrics = register_cache(name, self.cache)

    def check_thread(self):
        expected_thread = self.thread
        if expected_thread is None:
            self.thread = threading.current_thread()
        else:
            if expected_thread is not threading.current_thread():
                raise ValueError(
                    "Cache objects can only be accessed from the main thread"
                )

    def get(self, key, default=_CacheSentinel, callback=None):
        val = self.cache.get(key, _CacheSentinel, callback=callback)
        if val is not _CacheSentinel:
            self.metrics.inc_hits()
            return val

        self.metrics.inc_misses()

        if default is _CacheSentinel:
            raise KeyError()
        else:
            return default

    def update(self, sequence, key, value, callback=None):
        self.check_thread()
        if self.sequence == sequence:
            # Only update the cache if the caches sequence number matches the
            # number that the cache had before the SELECT was started (SYN-369)
            self.prefill(key, value, callback=callback)

    def prefill(self, key, value, callback=None):
        self.cache.set(key, value, callback=callback)

    def invalidate(self, key):
        self.check_thread()
        if not isinstance(key, tuple):
            raise TypeError(
                "The cache key must be a tuple not %r" % (type(key),)
            )

        # Increment the sequence number so that any SELECT statements that
        # raced with the INSERT don't update the cache (SYN-369)
        self.sequence += 1
        self.cache.pop(key, None)

    def invalidate_many(self, key):
        self.check_thread()
        if not isinstance(key, tuple):
            raise TypeError(
                "The cache key must be a tuple not %r" % (type(key),)
            )
        self.sequence += 1
        self.cache.del_multi(key)

    def invalidate_all(self):
        self.check_thread()
        self.sequence += 1
        self.cache.clear()
开发者ID:mebjas,项目名称:synapse,代码行数:83,代码来源:descriptors.py


注:本文中的synapse.util.caches.lrucache.LruCache.pop方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。