当前位置: 首页>>代码示例>>Python>>正文


Python StrictRedis.zrange方法代码示例

本文整理汇总了Python中redis.StrictRedis.zrange方法的典型用法代码示例。如果您正苦于以下问题:Python StrictRedis.zrange方法的具体用法?Python StrictRedis.zrange怎么用?Python StrictRedis.zrange使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在redis.StrictRedis的用法示例。


在下文中一共展示了StrictRedis.zrange方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import zrange [as 别名]
class WordList:
  def __init__(self):
    self.conn = Redis()
    self.CACHE_SIZE = 50
    self.CACHE_KEYS = "words-keys"
    self.CACHE_STORE = "words-store"
    self.WORD_FILE = os.path.join(os.path.expanduser("~"), '.words.txt')

  def _reorganize(self):
    pop_n = self.conn.zcard(self.CACHE_KEYS) - self.CACHE_SIZE
    if pop_n >= 0:
      to_pop = self.conn.zrange(self.CACHE_KEYS, 0, pop_n)
      #print pop_n, to_pop
      self.conn.zremrangebyrank(self.CACHE_KEYS, 0, pop_n)
      for k in to_pop:
        self.conn.hdel(self.CACHE_STORE, k)

  def _add_word(self, key, value):
    result = self.conn.hget(self.CACHE_STORE, key)
    if result:
      self.conn.zincrby(self.CACHE_KEYS, key, 1.0)
    else:
      self._reorganize()
      self.conn.hset(self.CACHE_STORE, key, value)
      self.conn.zadd(self.CACHE_KEYS, 1, key)

  def _get_words(self):
    try:
      words = self.conn.zrevrange(self.CACHE_KEYS, 0, -1, True)
      #hashs = self.conn.hgetall(self.CACHE_STORE)
      #print words
      #print hashs
      return words
    except redis.exceptions.ConnectionError:
      return None

  def dump_console(self):
    if os.path.isfile(self.WORD_FILE):
      with open(self.WORD_FILE, 'r') as f:
        print f.read()

  def write_file(self):
    words = self._get_words()
    if words is None:
      return
    content = '\n'.join(["%d. %s\t %d"%(i, x[0], int(x[1])) for i, x in enumerate(words)])
    with open(self.WORD_FILE, 'w+') as f:
      f.write(content)
      f.write('\n')

  def add_word(self, key):
    try:
      self._add_word(key,key)
      self.write_file()
    except redis.exceptions.ConnectionError:
      return
开发者ID:vonnyfly,项目名称:dict-redis-lfu,代码行数:58,代码来源:_lfu.py

示例2: Persist

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import zrange [as 别名]
class Persist(object):
    """ Sequential writer for Carbon server.
    The story is simple, fetch data from redis, write them, wait, loop.
    This code is supervised by Carbon daemon.
    """

    def __init__(self, path="/tmp/"):
        self.redis = Redis()
        self.path = path
        self.dirs = set()
        self.redis.sadd(METRICS, METRIC_POINTS, METRIC_WRITE)

    def metric(self, name, value):
        "Add some metrics : make your own dogfood, just before lunch."
        timestamp = time.time()
        serialized = struct.pack('!ff', timestamp, value)
        pipe = self.redis.pipeline()
        pipe.zadd(name, timestamp, serialized)
        pipe.publish(name, serialized)
        pipe.execute()

    def run(self):
        while True:
            before = time.time()
            self.handle()
            after = time.time()
            self.metric(METRIC_WRITE, (after - before) * 1000)
            time.sleep(PERIOD - int(before) + int(after))

    def handle(self):
        points = 0
        for metric in self.redis.smembers(METRICS):
            values = self.redis.zrange(metric, 0, -1)
            points += len(values)
            f = target_to_path(self.path, metric)
            d = os.path.dirname(f)
            if d not in self.dirs:
                if not os.path.isdir(d):
                    os.makedirs(d)
                self.dirs.add(d)
            if not os.path.exists(f):
                whisper.create(f, [(10, 1000)])  # [FIXME] hardcoded values
            whisper.update_many(f, [struct.unpack('!ff', a) for a in values])
            if len(values):
                self.redis.zrem(metric, *values)
        self.metric(METRIC_POINTS, points)
开发者ID:bearstech,项目名称:whirlwind-tornado,代码行数:48,代码来源:persist.py

示例3: CacheModule

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import zrange [as 别名]
class CacheModule(BaseCacheModule):
    """
    A caching module backed by redis.

    Keys are maintained in a zset with their score being the timestamp
    when they are inserted. This allows for the usage of 'zremrangebyscore'
    to expire keys. This mechanism is used or a pattern matched 'scan' for
    performance.
    """
    def __init__(self, *args, **kwargs):
        if C.CACHE_PLUGIN_CONNECTION:
            connection = C.CACHE_PLUGIN_CONNECTION.split(':')
        else:
            connection = []

        self._timeout = float(C.CACHE_PLUGIN_TIMEOUT)
        self._prefix = C.CACHE_PLUGIN_PREFIX
        self._cache = StrictRedis(*connection)
        self._keys_set = 'ansible_cache_keys'

    def _make_key(self, key):
        return self._prefix + key

    def get(self, key):
        value = self._cache.get(self._make_key(key))
        # guard against the key not being removed from the zset;
        # this could happen in cases where the timeout value is changed
        # between invocations
        if value is None:
            self.delete(key)
            raise KeyError
        return json.loads(value)

    def set(self, key, value):
        value2 = json.dumps(value)
        if self._timeout > 0: # a timeout of 0 is handled as meaning 'never expire'
            self._cache.setex(self._make_key(key), int(self._timeout), value2)
        else:
            self._cache.set(self._make_key(key), value2)

        self._cache.zadd(self._keys_set, time.time(), key)

    def _expire_keys(self):
        if self._timeout > 0:
            expiry_age = time.time() - self._timeout
            self._cache.zremrangebyscore(self._keys_set, 0, expiry_age)

    def keys(self):
        self._expire_keys()
        return self._cache.zrange(self._keys_set, 0, -1)

    def contains(self, key):
        self._expire_keys()
        return (self._cache.zrank(self._keys_set, key) >= 0)

    def delete(self, key):
        self._cache.delete(self._make_key(key))
        self._cache.zrem(self._keys_set, key)

    def flush(self):
        for key in self.keys():
            self.delete(key)

    def copy(self):
        # TODO: there is probably a better way to do this in redis
        ret = dict()
        for key in self.keys():
            ret[key] = self.get(key)
        return ret

    def __getstate__(self):
        return dict()

    def __setstate__(self, data):
        self.__init__()
开发者ID:KMK-ONLINE,项目名称:ansible,代码行数:77,代码来源:redis.py

示例4: RedisJobStore

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import zrange [as 别名]
class RedisJobStore(BaseJobStore):
    """
    Stores jobs in a Redis database. Any leftover keyword arguments are directly passed to redis's StrictRedis.

    Plugin alias: ``redis``

    :param int db: the database number to store jobs in
    :param str jobs_key: key to store jobs in
    :param str run_times_key: key to store the jobs' run times in
    :param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the highest available
    """

    def __init__(self, db=0, jobs_key='apscheduler.jobs', run_times_key='apscheduler.run_times',
                 pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args):
        super(RedisJobStore, self).__init__()

        if db is None:
            raise ValueError('The "db" parameter must not be empty')
        if not jobs_key:
            raise ValueError('The "jobs_key" parameter must not be empty')
        if not run_times_key:
            raise ValueError('The "run_times_key" parameter must not be empty')

        self.pickle_protocol = pickle_protocol
        self.jobs_key = jobs_key
        self.run_times_key = run_times_key
        self.redis = StrictRedis(db=int(db), **connect_args)

    def lookup_job(self, job_id):
        job_state = self.redis.hget(self.jobs_key, job_id)
        return self._reconstitute_job(job_state) if job_state else None

    def get_due_jobs(self, now):
        timestamp = datetime_to_utc_timestamp(now)
        job_ids = self.redis.zrangebyscore(self.run_times_key, 0, timestamp)
        if job_ids:
            job_states = self.redis.hmget(self.jobs_key, *job_ids)
            return self._reconstitute_jobs(six.moves.zip(job_ids, job_states))
        return []

    def get_next_run_time(self):
        next_run_time = self.redis.zrange(self.run_times_key, 0, 0, withscores=True)
        if next_run_time:
            return utc_timestamp_to_datetime(next_run_time[0][1])

    def get_all_jobs(self):
        job_states = self.redis.hgetall(self.jobs_key)
        jobs = self._reconstitute_jobs(six.iteritems(job_states))
        paused_sort_key = datetime(9999, 12, 31, tzinfo=utc)
        return sorted(jobs, key=lambda job: job.next_run_time or paused_sort_key)

    def add_job(self, job):
        if self.redis.hexists(self.jobs_key, job.id):
            raise ConflictingIdError(job.id)

        with self.redis.pipeline() as pipe:
            pipe.multi()
            pipe.hset(self.jobs_key, job.id, pickle.dumps(job.__getstate__(), self.pickle_protocol))
            if job.next_run_time:
                pipe.zadd(self.run_times_key, datetime_to_utc_timestamp(job.next_run_time), job.id)
            pipe.execute()

    def update_job(self, job):
        if not self.redis.hexists(self.jobs_key, job.id):
            raise JobLookupError(job.id)

        with self.redis.pipeline() as pipe:
            pipe.hset(self.jobs_key, job.id, pickle.dumps(job.__getstate__(), self.pickle_protocol))
            if job.next_run_time:
                pipe.zadd(self.run_times_key, datetime_to_utc_timestamp(job.next_run_time), job.id)
            else:
                pipe.zrem(self.run_times_key, job.id)
            pipe.execute()

    def remove_job(self, job_id):
        if not self.redis.hexists(self.jobs_key, job_id):
            raise JobLookupError(job_id)

        with self.redis.pipeline() as pipe:
            pipe.hdel(self.jobs_key, job_id)
            pipe.zrem(self.run_times_key, job_id)
            pipe.execute()

    def remove_all_jobs(self):
        with self.redis.pipeline() as pipe:
            pipe.delete(self.jobs_key)
            pipe.delete(self.run_times_key)
            pipe.execute()

    def shutdown(self):
        self.redis.connection_pool.disconnect()

    def _reconstitute_job(self, job_state):
        job_state = pickle.loads(job_state)
        job = Job.__new__(Job)
        job.__setstate__(job_state)
        job._scheduler = self._scheduler
        job._jobstore_alias = self._alias
        return job

#.........这里部分代码省略.........
开发者ID:theguardian,项目名称:CherryStrap,代码行数:103,代码来源:redis.py

示例5: RedisTopTalkerTracker

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import zrange [as 别名]
class RedisTopTalkerTracker(object):
    def __init__(self, size=16384, redis_host='localhost', redis_port=6379,
                 redis_table='top_talkers'):
        self.size = size
        self.redis_table = 'top_talkers'
        self.client = StrictRedis(host=redis_host, port=redis_port)

        self.is_saturated = self.decide_is_saturated()

    def decide_is_saturated(self):
        return self.client.zcard(self.redis_table) >= self.size

    def clear(self):
        self.client.zremrangebyrank(self.redis_table, 0, -1)
        self.is_saturated = self.decide_is_saturated()

    def is_full(self):
        if self.is_saturated:
            return True

        self.is_saturated = self.decide_is_saturated()
        return self.is_saturated

    def get(self, key):
        count = self.client.zscore(self.redis_table, key)
        if count is None:
            return count

        return int(count)

    def contains(self, key):
        count = self.client.zscore(self.redis_table, key)
        return count is not None

    def add(self, key):
        # If it's already in there, increment its count and we're done.
        count = self.client.zscore(self.redis_table, key)
        if count is not None:
            self.client.zincrby(self.redis_table, key, 1)
            return

        # Else if the key is new to us but we're full, pop the lowest key/count
        # pair and insert the new key as count + 1.
        if self.is_full():
            keys_counts = self.client.zrange(
                self.redis_table, 0, 0, withscores=True, score_cast_func=int)
            old_count = keys_counts[0][1]
            self.client.zremrangebyrank(self.redis_table, 0, 0)
            new_count = old_count + 1
            self.client.zadd(self.redis_table, new_count, key)
            return

        # Or if the key is new to us and we have space, just insert it.
        self.client.zadd(self.redis_table, 1, key)

    def top_n_keys(self, n):
        return self.client.zrevrange(
            self.redis_table, 0, n - 1, score_cast_func=int)

    def top_n_keys_counts(self, n):
        keys_counts = self.client.zrevrange(
            self.redis_table, 0, n - 1, withscores=True, score_cast_func=int)
        if self.is_full():
            lowest_keys_counts = self.client.zrange(
                self.redis_table, 0, 0, withscores=True, score_cast_func=int)
            the_min = lowest_keys_counts[0][1] - 1
        else:
            the_min = 0
        return map(lambda (key, count): (key, count - the_min), keys_counts)
开发者ID:knighton,项目名称:top_talkers,代码行数:71,代码来源:with_redis.py

示例6: Leaderboard

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import zrange [as 别名]
class Leaderboard(object):
    """
    Main class for leaderboards.
    """
    _1_DAY_SECONDS = 60 * 60 * 24
    _1_WEEK_SECONDS = _1_DAY_SECONDS * 7
    _1_MONTH_SECONDS = _1_DAY_SECONDS * 31

    # Constants for specifying range(s) to Leaderboard constructor
    # TODO: make expiration configurable and setup a pruner task
    RANGE_DAILY = TimeRange('d', '%Y%m%d', 3 * _1_DAY_SECONDS, _KEY_DELIMITER)
    RANGE_WEEKLY = TimeRange('w', '%Y%W', 2 * _1_WEEK_SECONDS + 2 * _1_DAY_SECONDS, _KEY_DELIMITER)
    RANGE_MONTHLY = TimeRange('m', '%Y%m', 2 * _1_MONTH_SECONDS + 2 * _1_DAY_SECONDS, _KEY_DELIMITER)
    RANGE_ALLTIME = TimeRange('a', 'a', -1, _KEY_DELIMITER)
    RANGES_ALL = [RANGE_DAILY, RANGE_WEEKLY, RANGE_MONTHLY, RANGE_ALLTIME]

    def __init__(self, game, metric, ranges=RANGES_ALL, reverse=True,
                 timed_ties=False, tie_oldest_wins=True,
                 redis=None):
        """
        :param reverse: True for sorting by high to low scores
        :param timed_ties: True to use a given timestamp to resolve tie scores, assumes score values are ints
        :param tie_oldest_wins: True if the earlier time wins
        """
        self.game = game
        self.metric = metric
        self.ranges = ranges
        self.reverse = reverse
        self.timed_ties = timed_ties
        self.tie_oldest_wins = tie_oldest_wins

        if not redis:
            self.r = StrictRedis()
        else:
            self.r = redis

    def _board_key(self, range, slots_ago=0):
        """
        Board keys are of the format:
        /leaders/{game}/{metric}/{range_code}/{range_slot}
        e.g. /combat/highscore/d/20130207
        """
        if slots_ago != 0:
            d = range.date_range(slots_ago)[0]
        else:
            d = datetime.utcnow()
        return _KEY_DELIMITER.join(["leaders", self.game, self.metric,
                                    range.format(d)])

    def _hashlist(self, l):
        """
        hash from a list for creating unique temp zset keys
        """
        h = hashlib.sha1()
        for i in l:
            h.update(i)
        h.update(str(time.time()))
        return h.hexdigest()

    def _range(self, key, start, end):
        if self.reverse:
            return self.r.zrevrange(key, start, end, withscores=True, score_cast_func=float)
        else:
            return self.r.zrange(key, start, end, withscores=True, score_cast_func=float)

    def _add_ranks(self, leaders, offset=0):
        """
        Calculate ranks and update the given leader list to include them.
        Ranks start at 1.
        """
        with_ranks = [Leader(m, s, rank, t) for (m, s, t), rank in zip(leaders, itertools.count(offset + 1))]
        return with_ranks

    def _dt_to_ts(self, ts):
        """
        Ensure we are using a UNIX timestamp
        """
        if isinstance(ts, datetime):
            return (ts - datetime(1970, 1, 1)).total_seconds()
        else:
            return ts

    def _encode_value_with_time(self, value, ts):
        """
        Redis will rank members with identical scores lexigraphically. Often this is not
        what we want for a leaderboard. Using the timed_ties option, we will r the
        timestamp in the decimal part of the float score and thereby use it for tie-breaking.
        tie_oldest_wins controls whether older or newer timestamps get ranked higher.
        """
        if not ts:
            ts = time.time()
        else:
            ts = self._dt_to_ts(ts)
        if self.reverse == self.tie_oldest_wins:
            # invert the timestamp for proper ordering
            ts = 3000000000 - ts
        to_dec = 0.0000000001
        return float(value) + (ts * to_dec)

    def _decode_value_with_time(self, combo):
#.........这里部分代码省略.........
开发者ID:noise,项目名称:leaders-py,代码行数:103,代码来源:leaders.py

示例7: reload

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import zrange [as 别名]
from StringIO import StringIO
import time
from datetime import datetime
import pymongo
import re


reload(sys)
sys.setdefaultencoding('utf-8')

pattern = re.compile('^http://www.xiami.com/artist/\d+.*')

filtered_url = []

r = StrictRedis(host='127.0.0.1', port=6379)
for url in r.zrange('CQ:10001:www.xiami.com', 0, -1):
    info = json.loads(url)
    p = 1
    if pattern.match(info['url']):
        p = 16
    elif info['url'].startswith('http://www.xiami.com/album/'):
        p = 16
    elif info['url'].startswith('http://www.xiami.com/artist/album/id/'):
        p = 16
    filtered_url.append((url, p))

r.delete('CQ:10001:www.xiami.com')
for u in filtered_url:
    print u[1], u[0]
    #r.zadd('CQ:10001:www.xiami.com', u[1], u[0])
开发者ID:samuelduann,项目名称:scraper,代码行数:32,代码来源:clear.py

示例8: RunLogger

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import zrange [as 别名]
class RunLogger(object):

    def __init__(self, redis_config, max_logs=1000):
        self._redis = StrictRedis(**redis_config)
        self._lock = threading.Lock()
        self._logger = donothing  # All logger calls ignored
        self._job_id = None
        self.max_logs = max_logs

    @contextmanager
    def runlog(self, job_id, run_id=None):
        if not self._lock.acquire(False):
            raise Exception("Can't start %s. %s already started."
                            % (job_id, self._job_id))
        if run_id is None:
            run_id = str(datetime.datetime.now()).replace(' ', '-')
        try:
            hdlr = RunlogHandler(self._redis, job_id, run_id)
            self._logger = logging.getLogger("%s|%s" % (job_id, run_id))
            self._job_id = job_id
            self._logger.addHandler(hdlr)
            timestamp = time.time()
            self._redis.zadd('jobs', timestamp, job_id)
            self._redis.zadd('%s|runs' % job_id, timestamp, run_id)
            self._redis.set('%s|%s|start' % (job_id, run_id), timestamp)
            try:
                try:
                    yield self._logger
                finally:
                    self._redis.set('%s|%s|end' % (job_id, run_id),
                                    time.time())
            except CancelLog:
                self.forget_run(job_id, run_id)
            except Exception as ex:
                self._redis.zadd('exceptions',
                                 timestamp,
                                 "%s|%s" % (job_id, run_id))
                self._logger.exception(ex)
                del logging.Logger.manager.loggerDict[
                    "%s|%s" % (job_id, run_id)]
                raise ex  # Don't swallow errors.
        finally:
            self.forget_old_runs(job_id)
            self._job_id = None
            self._logger = donothing
            self._lock.release()

    def forget_run(self, job_id, run_id):
        self._redis.zrem('%s|runs' % job_id, run_id)
        self._redis.delete('%s|%s|start' % (job_id, run_id))
        self._redis.delete('%s|%s|end' % (job_id, run_id))
        self._redis.delete('%s|%s|log' % (job_id, run_id))

    def forget_old_runs(self, job_id):
        for run_id in self._redis.zrange('%s|runs' % job_id,
                                         self.max_logs,
                                         -1):
            self.forget_run(job_id, run_id)

    def debug(self, *a, **kw):
        self._logger.debug(*a, **kw)

    def info(self, *a, **kw):
        self._logger.info(*a, **kw)

    def warn(self, *a, **kw):
        self._logger.warn(*a, **kw)

    def warning(self, *a, **kw):
        self._logger.warning(*a, **kw)

    def error(self, *a, **kw):
        self._logger.error(*a, **kw)

    def critical(self, *a, **kw):
        self._logger.critical(*a, **kw)

    def exception(self, *a, **kw):
        self._logger.exception(*a, **kw)

    def list_jobs(self):
        return self._redis.zrevrange('jobs', 0, -1)

    def list_runs(self, job_id):
        return self._redis.zrange('%s|runs' % job_id, 0, -1)

    def run_times(self, job_id, run_id):
        return (self._redis.get('%s|%s|start' % (job_id, run_id)),
                self._redis.get('%s|%s|end' % (job_id, run_id)))

    def get_log(self, job_id, run_id):
        return self._redis.lrange('%s|%s|log' % (job_id, run_id), 0, -1)
开发者ID:Trapit,项目名称:runlog,代码行数:94,代码来源:loggage.py

示例9: __init__

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import zrange [as 别名]
class RedisBackend:

    colls_index_fmt = 'plumbca:' + dfconf['mark_version'] + ':collections:index'
    metadata_fmt = 'plumbca:' + dfconf['mark_version'] + ':metadata:timeline:{name}'
    inc_coll_cache_fmt = 'plumbca:' + dfconf['mark_version'] + ':cache:{name}'
    sorted_count_coll_cache_fmt = 'plumbca:' + dfconf['mark_version'] + \
                                  ':sorted:count:cache:{name}:{tagging}:{ts}'
    unique_count_coll_cache_fmt = 'plumbca:' + dfconf['mark_version'] + \
                                  ':unique:count:cache:{name}:{tagging}:{ts}'

    def __init__(self):
        self.rdb = StrictRedis(host=rdconf['host'], port=rdconf['port'],
                               db=rdconf['db'])
        self.version = dfconf['mark_version']

    def set_collection_index(self, name, instance):
        """ Set the collection info of instance to the backend.
        """
        key = self.colls_index_fmt
        v = instance.__class__.__name__
        self.rdb.hset(key, name, packb(v))

    def get_collection_index(self, name):
        """ Get the collection info from backend by name.
        """
        key = self.colls_index_fmt
        rv = self.rdb.hget(key, name)
        return [name, unpackb(rv)] if rv else None

    def get_collection_indexes(self):
        """ Get all of the collections info from backend.
        """
        key = self.colls_index_fmt
        rv = self.rdb.hgetall(key)
        if rv:
            return {name.decode("utf-8"): unpackb(info)
                        for name, info in rv.items()}

    def delete_collection_keys(self, coll, klass=''):
        """ Danger! This method will erasing all values store in the key that
        should be only use it when you really known what are you doing.

        It is good for the testing to clean up the environment.
        """
        md_key = self.metadata_fmt.format(name=coll.name)
        self.rdb.delete(md_key)

        if klass == 'IncreaseCollection':
            cache_key = self.inc_coll_cache_fmt.format(name=coll.name)
            self.rdb.delete(cache_key)

    def get_collection_length(self, coll, klass=''):
        if not klass:
            klass = coll.__class__.__name__

        rv = []
        md_key = self.metadata_fmt.format(name=coll.name)
        md_len = self.rdb.zcard(md_key)
        rv.append(md_len)
        # print('** TL -', self.rdb.zrange(md_key, 0, -1, withscores=True))

        if klass == 'IncreaseCollection':
            cache_key = self.inc_coll_cache_fmt.format(name=coll.name)
            cache_len = self.rdb.hlen(cache_key)
            # notice that the cache_len is the length of all the items in cache_key
            rv.append(cache_len)

        return rv

    def set_collection_metadata(self, coll, tagging, expts, ts, *args):
        """ Insert data to the metadata structure if timestamp data do not
        exists. Note that the metadata structure include two types, timeline
        and expire.

        :param coll: collection class
        :param tagging: specific tagging string
        :param ts: the timestamp of the data
        :param expts: the expired timestamp of the data
        """
        md_key = self.metadata_fmt.format(name=coll.name)
        # Ensure the item of the specific `ts` whether it's exists or not,
        element = self.rdb.zrangebyscore(md_key, ts, ts)

        if element:
            info = unpackb(element[0])
            if tagging in info:
                # the tagging info already exists then do nothings
                return
            info[tagging] = [expts] + list(args)
            # remove the md_key and update new value atomically
            p = self.rdb.pipeline()
            p.zremrangebyscore(md_key, ts, ts)
            p.zadd(md_key, ts, packb(info))
            p.execute()

        else:
            info = {tagging: [expts] + list(args)}
            self.rdb.zadd(md_key, ts, packb(info))
        # print('-'*10)
        # print(tagging)
#.........这里部分代码省略.........
开发者ID:JasonLai256,项目名称:plumbca,代码行数:103,代码来源:backend.py

示例10: RedisLRU

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import zrange [as 别名]
class RedisLRU(object):
    def __init__(self, redis=None, **kwargs):
        if redis is not None:
            self._redis = redis
        else:
            self._redis = Redis(**kwargs)
        self.namespaces = {
            "default": 10000
        }

    def setup_namespace(self, namespace, size):
        """Set the LRU Size for a namespace.
        """
        self.namespaces[namespace] = int(size)

    def _serialize(self, s):
        # return json.dumps(s)
        return s

    def _unserialize(self, s):
        # s = s.decode("utf-8")
        # return json.loads(s)
        return s

    def _size(self, namespace):
        return self.namespaces[namespace]

    def _hit_store(self, namespace):
        if namespace not in self.namespaces:
            raise KeyError("invalid namespace")
        return "cache_keys_{}".format(namespace)

    def _value_store(self, namespace):
        if namespace not in self.namespaces:
            raise KeyError("invalid namespace")
        return "cache_values_{}".format(namespace)

    def _expire_old(self, namespace):
        hits = self._hit_store(namespace)
        size = self._size(namespace)
        count = self._redis.zcard(hits)
        if count >= size:
            values = self._value_store(namespace)
            items = self._redis.zrange(hits, 0, count-size)
            logger.error(items)
            self._redis.zremrangebyrank(hits, 0, count-size)
            self._redis.hdel(values, *items)

    def clear(self, namespace="default"):
        """Clear the Cache.
        """
        hits = self._hit_store(namespace)
        values = self._value_store(namespace)
        self._redis.delete(hits, values)

    def clearAll(self):
        """Clear all known namespaces.
        """
        for k in self.namespaces.iterkeys():
            self.clear(k)

    def store(self, key, value, namespace="default"):
        """Store a key value pair in cache.
        This will not update an existing item.
        """
        values = self._value_store(namespace)
        if not self._redis.hexists(values, key):
            hits = self._hit_store(namespace)
            self._expire_old(namespace)
            self._redis.hset(values, key, self._serialize(value))
            self._redis.zadd(hits, time.time(), key)
        else:
            hits = self._hit_store(namespace)
            self._redis.hset(values, key, self._serialize(value))
            self._redis.zadd(hits, time.time(), key)

    def get(self, key, namespace="default"):
        """Get a value from the cache.
        returns none if the key is not found.
        """
        values = self._value_store(namespace)
        value = self._redis.hget(values, key)
        if value:
            hits = self._hit_store(namespace)
            self._redis.zadd(hits, time.time(), key)
            return self._unserialize(value)
        return None

    def expire(self, key, namespace="default"):
        """Expire (invalidate) a key from the cache.
        """
        values = self._value_store(namespace)
        if self._redis.hexists(values, key):
            hits = self._hit_store(namespace)
            self._redis.hdel(values, key)
            self._redis.zrem(hits, key)
开发者ID:wuttem,项目名称:pytsdb,代码行数:98,代码来源:cache.py

示例11: SSDC

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import zrange [as 别名]
class SSDC(object):
    def __init__(self):
        self.r = StrictRedis(unix_socket_path=redis_socket, decode_responses=True)

    def __get_all_7_char_chunks(self, h):
        return set((unpack("<Q", base64.b64decode(h[i : i + 7] + "=") + b"\x00\x00\x00")[0] for i in range(len(h) - 6)))

    def __preprocess_hash(self, h):
        block_size, block_data, double_block_data = h.split(":")

        # Reduce any sequence of the same char greater than 3 to 3
        re.sub(r"(\w)\1\1\1(\1+)", r"\1\1\1", block_data)
        re.sub(r"(\w)\1\1\1(\1+)", r"\1\1\1", double_block_data)

        return block_size, self.__get_all_7_char_chunks(block_data), self.__get_all_7_char_chunks(double_block_data)

    def __add_chunks_db(self, p, block_size, chunk, sha256):
        for i in chunk:
            chunk = "ssdeep:{}:{}".format(block_size, i)
            p.sadd(chunk, sha256)
            p.sadd("ssdeep:chunks", chunk)

    def update_chunks_db(self, sha256, deephash):
        block_size, chunk, double_chunk = self.__preprocess_hash(deephash)
        p = self.r.pipeline(False)
        self.__add_chunks_db(p, block_size, chunk, sha256)
        self.__add_chunks_db(p, block_size, double_chunk, sha256)
        p.execute()

    def generate_all_chunks(self):
        for sha256 in self.r.smembers("hashes_sha256"):
            self.update_chunks_db(sha256, self.r.hget(sha256, "ssdeep"))

    def find_matches(self, key):
        similar_hashes = self.r.smembers(key)
        if len(similar_hashes) > 1:
            cur_hash = similar_hashes.pop()
            cur_ssdeep = self.r.hget(cur_hash, "ssdeep")
            p = self.r.pipeline(False)
            for sha256 in similar_hashes:
                score = pydeep.compare(cur_ssdeep.encode("utf-8"), self.r.hget(sha256, "ssdeep").encode("utf-8"))
                if score > 0:
                    key1 = "ssdeep:matches_{}".format(cur_hash)
                    key2 = "ssdeep:matches_{}".format(sha256)
                    p.zadd(key1, score, sha256)
                    p.zadd(key2, score, cur_hash)
                    p.sadd("ssdeep:all_matches", key1)
                    p.sadd("ssdeep:all_matches", key2)
            p.execute()

    def compare_similar_chunks(self):
        for key in self.r.smembers("ssdeep:chunks"):
            self.find_matches(key)

    def make_groups(self):
        all_hashes = self.r.smembers("hashes_sha256")
        while all_hashes:
            cur_hash = all_hashes.pop()
            matches = self.r.zrange("ssdeep:matches_{}".format(cur_hash), 0, -1)
            if matches:
                if isinstance(matches, list):
                    matches = set(matches)
                else:
                    matches = set([matches])
                all_hashes -= matches
                matches |= set([cur_hash])
            else:
                # NOTE: Should we make a group?
                # matches = set([cur_hash])
                self.r.sadd("ssdeep:no_matches", cur_hash)
                continue
            key = "ssdeep:group_{}".format(self.r.scard("ssdeep:groups"))
            self.r.sadd("ssdeep:groups", key)
            self.r.sadd(key, *matches)

    def clean_groups(self):
        self.r.delete(*self.r.smembers("ssdeep:groups"))
        self.r.delete(*self.r.smembers("ssdeep:all_matches"))
        self.r.delete("ssdeep:groups")
        self.r.delete("ssdeep:all_matches")
        self.r.delete("ssdeep:no_matches")

    # ########## Querying ##########

    def get_all_groups(self):
        return [(g, self.r.smembers(g)) for g in self.r.smembers("ssdeep:groups")]

    def get_group_samples(self, group):
        return self.r.smembers(group)
开发者ID:MISP,项目名称:misp-workbench,代码行数:91,代码来源:ssdeep_processing.py


注:本文中的redis.StrictRedis.zrange方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。