当前位置: 首页>>代码示例>>Python>>正文


Python StrictRedis.zrangebyscore方法代码示例

本文整理汇总了Python中redis.StrictRedis.zrangebyscore方法的典型用法代码示例。如果您正苦于以下问题:Python StrictRedis.zrangebyscore方法的具体用法?Python StrictRedis.zrangebyscore怎么用?Python StrictRedis.zrangebyscore使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在redis.StrictRedis的用法示例。


在下文中一共展示了StrictRedis.zrangebyscore方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: sm

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import zrangebyscore [as 别名]
    db = sm()

    print "Obtaining lock..."
    db.query(func.pg_advisory_lock(413, 1)).scalar()
    print "Lock obtained."

    redis = StrictRedis(connection_pool=redis_pool)

    while True:

        current_time = int(time.time())

        # Make sure a message is sent every 25 seconds so the long poll requests
        # don't time out.
        # XXX INCREASE THIS TO SEVERAL MINUTES
        for chat_id in redis.zrangebyscore("longpoll_timeout", 0, current_time):
            redis.publish("channel:%s" % chat_id, '{"messages":[]}')
            if redis.hlen("chat:%s:online" % chat_id) != 0:
                redis.zadd("longpoll_timeout", time.time() + 25, chat_id)
            else:
                redis.zrem("longpoll_timeout", chat_id)

        # And do the reaping.
        for dead in redis.zrangebyscore("chats_alive", 0, current_time):
            print current_time, "Reaping ", dead
            chat_id, session_id = dead.split("/")
            user_id = redis.hget("chat:%s:online" % chat_id, session_id)
            disconnected = disconnect(redis, chat_id, session_id)
            # Only send a timeout message if they were already online.
            if not disconnected:
                print "Not sending timeout message."
开发者ID:thecount92,项目名称:newparp,代码行数:33,代码来源:reaper.py

示例2: RedisJobStore

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import zrangebyscore [as 别名]
class RedisJobStore(BaseJobStore):
    """
    Stores jobs in a Redis database. Any leftover keyword arguments are directly passed to redis's StrictRedis.

    Plugin alias: ``redis``

    :param int db: the database number to store jobs in
    :param str jobs_key: key to store jobs in
    :param str run_times_key: key to store the jobs' run times in
    :param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the highest available
    """

    def __init__(self, db=0, jobs_key='apscheduler.jobs', run_times_key='apscheduler.run_times',
                 pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args):
        super(RedisJobStore, self).__init__()

        if db is None:
            raise ValueError('The "db" parameter must not be empty')
        if not jobs_key:
            raise ValueError('The "jobs_key" parameter must not be empty')
        if not run_times_key:
            raise ValueError('The "run_times_key" parameter must not be empty')

        self.pickle_protocol = pickle_protocol
        self.jobs_key = jobs_key
        self.run_times_key = run_times_key
        self.redis = StrictRedis(db=int(db), **connect_args)

    def lookup_job(self, job_id):
        job_state = self.redis.hget(self.jobs_key, job_id)
        return self._reconstitute_job(job_state) if job_state else None

    def get_due_jobs(self, now):
        timestamp = datetime_to_utc_timestamp(now)
        job_ids = self.redis.zrangebyscore(self.run_times_key, 0, timestamp)
        if job_ids:
            job_states = self.redis.hmget(self.jobs_key, *job_ids)
            return self._reconstitute_jobs(six.moves.zip(job_ids, job_states))
        return []

    def get_next_run_time(self):
        next_run_time = self.redis.zrange(self.run_times_key, 0, 0, withscores=True)
        if next_run_time:
            return utc_timestamp_to_datetime(next_run_time[0][1])

    def get_all_jobs(self):
        job_states = self.redis.hgetall(self.jobs_key)
        jobs = self._reconstitute_jobs(six.iteritems(job_states))
        paused_sort_key = datetime(9999, 12, 31, tzinfo=utc)
        return sorted(jobs, key=lambda job: job.next_run_time or paused_sort_key)

    def add_job(self, job):
        if self.redis.hexists(self.jobs_key, job.id):
            raise ConflictingIdError(job.id)

        with self.redis.pipeline() as pipe:
            pipe.multi()
            pipe.hset(self.jobs_key, job.id, pickle.dumps(job.__getstate__(), self.pickle_protocol))
            if job.next_run_time:
                pipe.zadd(self.run_times_key, datetime_to_utc_timestamp(job.next_run_time), job.id)
            pipe.execute()

    def update_job(self, job):
        if not self.redis.hexists(self.jobs_key, job.id):
            raise JobLookupError(job.id)

        with self.redis.pipeline() as pipe:
            pipe.hset(self.jobs_key, job.id, pickle.dumps(job.__getstate__(), self.pickle_protocol))
            if job.next_run_time:
                pipe.zadd(self.run_times_key, datetime_to_utc_timestamp(job.next_run_time), job.id)
            else:
                pipe.zrem(self.run_times_key, job.id)
            pipe.execute()

    def remove_job(self, job_id):
        if not self.redis.hexists(self.jobs_key, job_id):
            raise JobLookupError(job_id)

        with self.redis.pipeline() as pipe:
            pipe.hdel(self.jobs_key, job_id)
            pipe.zrem(self.run_times_key, job_id)
            pipe.execute()

    def remove_all_jobs(self):
        with self.redis.pipeline() as pipe:
            pipe.delete(self.jobs_key)
            pipe.delete(self.run_times_key)
            pipe.execute()

    def shutdown(self):
        self.redis.connection_pool.disconnect()

    def _reconstitute_job(self, job_state):
        job_state = pickle.loads(job_state)
        job = Job.__new__(Job)
        job.__setstate__(job_state)
        job._scheduler = self._scheduler
        job._jobstore_alias = self._alias
        return job

#.........这里部分代码省略.........
开发者ID:theguardian,项目名称:CherryStrap,代码行数:103,代码来源:redis.py

示例3: RedisConnector

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import zrangebyscore [as 别名]

#.........这里部分代码省略.........
		elif isinstance(expr.field, RangeIndexField):
			key = self.ridx_key(expr.model_cls.getprefix(), expr.field.name)
			val = expr.field.to_db(expr.val)

			if expr.operator == expr.EQ:
				minval = val
				maxval = val

			elif expr.operator == expr.GT:
				minval = '(%d' % val
				maxval = '+inf'

			elif expr.operator == expr.GE:
				minval = val
				maxval = '+inf'

			elif expr.operator == expr.LT:
				minval = '-inf'
				maxval = '(%d' % val

			elif expr.operator == expr.LE:
				minval = '-inf'
				maxval = val

			else:
				raise Exception('Unsupported operator type given')

			start = expr.offset
			num = expr.limit

			if num is None and start == 0:
				start = None

			ids = self.handler.zrangebyscore(
				key,
				minval,
				maxval,
				start=start,
				num=num,
			)

			return [expr.model_cls(model_id) for model_id in ids]

	def choice (self, field, model_cls, val, count=1):
		key = self.idx_key(model_cls.getprefix(), field.name, val)
		ids = self.handler.srandmember(key, count)

		return None if not len(ids) else \
			[model_cls(model_id) for model_id in ids]

	def _save_idx (self, field, model, pipe=None):
		""" Save given model.field index. """

		if isinstance(field, IndexField):
			idx_key = self.idx_key(model.getprefix(), field.name, model[field.name])

			if field.unique:
				ids = self.handler.smembers(idx_key)

				if len(ids):
					ids.discard(bytes(model._id, 'utf-8') if PY3K else model._id)

					if len(ids):
						raise Exception('Duplicate key error')

			self._del_idx(field, model, pipe)
开发者ID:jg9lt,项目名称:redisca2,代码行数:70,代码来源:redis.py

示例4: print

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import zrangebyscore [as 别名]
    print("Terminating due to SIGTERM")
    sys.exit(0)


if __name__ == "__main__":
    signal.signal(signal.SIGTERM, sigterm_handler)
    r = False
    while not r:
        try:
            r = StrictRedis(host="hotlist-redis", port=6379, db=0)
        except:
            print("Waiting for redis...")
            time.sleep(2)

    last_time = None

    try:
        while True:
            timestamp = time.time()
            urls = r.zrangebyscore(KEY, 0, timestamp)
            if type(urls) == list and len(urls) > 0:
                for url_key in urls:
                    r.zincrby(URL_HOTLIST_KEY, url_key, -1.0)
                num = r.zremrangebyscore(URL_HOTLIST_KEY, 0, 0.0)
                print("Removed %d old URL entries from %s" % (num, URL_HOTLIST_KEY))
                num = r.zremrangebyscore(KEY, 0, timestamp)
                print("Removed %d old URL scores from %s" % (num, KEY))
            time.sleep(INTERVAL_SECONDS)
    finally:
        print("Exiting")
开发者ID:giantswarm,项目名称:twitter-hot-urls-example,代码行数:32,代码来源:cleaner.py

示例5: __init__

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import zrangebyscore [as 别名]
class RedisBackend:

    colls_index_fmt = 'plumbca:' + dfconf['mark_version'] + ':collections:index'
    metadata_fmt = 'plumbca:' + dfconf['mark_version'] + ':metadata:timeline:{name}'
    inc_coll_cache_fmt = 'plumbca:' + dfconf['mark_version'] + ':cache:{name}'
    sorted_count_coll_cache_fmt = 'plumbca:' + dfconf['mark_version'] + \
                                  ':sorted:count:cache:{name}:{tagging}:{ts}'
    unique_count_coll_cache_fmt = 'plumbca:' + dfconf['mark_version'] + \
                                  ':unique:count:cache:{name}:{tagging}:{ts}'

    def __init__(self):
        self.rdb = StrictRedis(host=rdconf['host'], port=rdconf['port'],
                               db=rdconf['db'])
        self.version = dfconf['mark_version']

    def set_collection_index(self, name, instance):
        """ Set the collection info of instance to the backend.
        """
        key = self.colls_index_fmt
        v = instance.__class__.__name__
        self.rdb.hset(key, name, packb(v))

    def get_collection_index(self, name):
        """ Get the collection info from backend by name.
        """
        key = self.colls_index_fmt
        rv = self.rdb.hget(key, name)
        return [name, unpackb(rv)] if rv else None

    def get_collection_indexes(self):
        """ Get all of the collections info from backend.
        """
        key = self.colls_index_fmt
        rv = self.rdb.hgetall(key)
        if rv:
            return {name.decode("utf-8"): unpackb(info)
                        for name, info in rv.items()}

    def delete_collection_keys(self, coll, klass=''):
        """ Danger! This method will erasing all values store in the key that
        should be only use it when you really known what are you doing.

        It is good for the testing to clean up the environment.
        """
        md_key = self.metadata_fmt.format(name=coll.name)
        self.rdb.delete(md_key)

        if klass == 'IncreaseCollection':
            cache_key = self.inc_coll_cache_fmt.format(name=coll.name)
            self.rdb.delete(cache_key)

    def get_collection_length(self, coll, klass=''):
        if not klass:
            klass = coll.__class__.__name__

        rv = []
        md_key = self.metadata_fmt.format(name=coll.name)
        md_len = self.rdb.zcard(md_key)
        rv.append(md_len)
        # print('** TL -', self.rdb.zrange(md_key, 0, -1, withscores=True))

        if klass == 'IncreaseCollection':
            cache_key = self.inc_coll_cache_fmt.format(name=coll.name)
            cache_len = self.rdb.hlen(cache_key)
            # notice that the cache_len is the length of all the items in cache_key
            rv.append(cache_len)

        return rv

    def set_collection_metadata(self, coll, tagging, expts, ts, *args):
        """ Insert data to the metadata structure if timestamp data do not
        exists. Note that the metadata structure include two types, timeline
        and expire.

        :param coll: collection class
        :param tagging: specific tagging string
        :param ts: the timestamp of the data
        :param expts: the expired timestamp of the data
        """
        md_key = self.metadata_fmt.format(name=coll.name)
        # Ensure the item of the specific `ts` whether it's exists or not,
        element = self.rdb.zrangebyscore(md_key, ts, ts)

        if element:
            info = unpackb(element[0])
            if tagging in info:
                # the tagging info already exists then do nothings
                return
            info[tagging] = [expts] + list(args)
            # remove the md_key and update new value atomically
            p = self.rdb.pipeline()
            p.zremrangebyscore(md_key, ts, ts)
            p.zadd(md_key, ts, packb(info))
            p.execute()

        else:
            info = {tagging: [expts] + list(args)}
            self.rdb.zadd(md_key, ts, packb(info))
        # print('-'*10)
        # print(tagging)
#.........这里部分代码省略.........
开发者ID:JasonLai256,项目名称:plumbca,代码行数:103,代码来源:backend.py


注:本文中的redis.StrictRedis.zrangebyscore方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。