当前位置: 首页>>代码示例>>Python>>正文


Python Redis.pipeline方法代码示例

本文整理汇总了Python中redis.client.Redis.pipeline方法的典型用法代码示例。如果您正苦于以下问题:Python Redis.pipeline方法的具体用法?Python Redis.pipeline怎么用?Python Redis.pipeline使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在redis.client.Redis的用法示例。


在下文中一共展示了Redis.pipeline方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _transfer_slots

# 需要导入模块: from redis.client import Redis [as 别名]
# 或者: from redis.client.Redis import pipeline [as 别名]
    def _transfer_slots(redis_conn_from: Redis, redis_id_from: str, redis_conn_to: Redis, redis_id_to: str, slots: list):
        """
        Documentation from http://redis.io/commands/cluster-setslot
         1. Set the destination node slot to importing state using CLUSTER SETSLOT <slot> IMPORTING <source-node-id>.
         2. Set the source node slot to migrating state using CLUSTER SETSLOT <slot> MIGRATING <destination-node-id>.
         3. Get keys from the source node with CLUSTER GETKEYSINSLOT command and move them into the destination node
            using the MIGRATE command.
         4. Use CLUSTER SETSLOT <slot> NODE <destination-node-id> in the source or destination.
        """
        print('Transfering %d slots from %s to %s...' % (len(slots), redis_id_from, redis_id_to))
        dest_host = redis_conn_to.connection_pool.connection_kwargs['host']
        dest_port = redis_conn_to.connection_pool.connection_kwargs['port']

        pipeline_to = redis_conn_to.pipeline()
        pipeline_from = redis_conn_from.pipeline()
        for slot in slots:
            # 1, 2
            pipeline_to.execute_command('CLUSTER SETSLOT', slot, 'IMPORTING', redis_id_from)
            pipeline_from.execute_command('CLUSTER SETSLOT', slot, 'MIGRATING', redis_id_to)
        pipeline_to.execute()
        pipeline_from.execute()

        for slot in slots:
            # 3
            keys = redis_conn_from.execute_command('CLUSTER GETKEYSINSLOT', slot, 1000000)
            if len(keys) > 0:
                redis_conn_from.execute_command('MIGRATE', dest_host, dest_port, "", 0, 180000, 'KEYS', *keys)
            # 4
            redis_conn_to.execute_command('CLUSTER SETSLOT', slot, 'NODE', redis_id_to)
开发者ID:safecloud-project,项目名称:erasurebench,代码行数:31,代码来源:redis_cluster.py

示例2: TSStore

# 需要导入模块: from redis.client import Redis [as 别名]
# 或者: from redis.client.Redis import pipeline [as 别名]
class TSStore(object):

    def __init__(self, config):
        self._redis = Redis(host=config.get('redis','host'), 
                            port=int(config.get('redis','port')),
                            db=int(config.get('redis','db')))
        self._delta_secs = int(eval(config.get('timeseries',
                                               'delta_secs')))
        self._expiration_delay_secs = int(eval(config.get('timeseries',
                                                          'expiration_delay_secs')))

    def queries_key(self):
        return 'queries'
    @property
    def queries(self):
        return self._redis.smembers(self.queries_key())
    @queries.setter
    def queries(self, values):
        pipe = self._redis.pipeline()
        pipe.delete(self.queries_key())
        for v in values:
            pipe.sadd(self.queries_key(),
                      v)
        return pipe.execute()

    def _interval_key(self, timestamp):
        return int(timestamp) - int(timestamp) % self._delta_secs
    def _ts_key(self, timestamp, query):
        return 'ts:%(query)s:%(timestamp_key)s'%{'query':query,
                                                 'timestamp_key':self._interval_key(timestamp)}
    def _tweet_key(self, t):
        if type(t) == Tweet:
            return 'tweet:%s'%t.id
        return 'tweet:%s'%t
    def _query_key(self, query):
        return 'query:%s:last_tweet_id'%query

    def _store_tweet(self, pipe, tweet):
        tweet_key = self._tweet_key(tweet)
        pipe.set(tweet_key, tweet.serialize())
        pipe.expire(tweet_key, self._expiration_delay_secs)
    def _reference_tweet(self, pipe, timestamp, query, tweet):
        ts_key = self._ts_key(timestamp, query)
        pipe.lpush(ts_key,tweet.id)
        pipe.expire(ts_key,self._expiration_delay_secs)
    def _update_last_query_tweet(self, pipe, query, tweet):
        query_key = self._query_key(query)
        pipe.set(query_key,tweet.id)
    def append(self, query, tweet):
        pipe = self._redis.pipeline()
        timestamp = time.time()
        self._store_tweet(pipe, tweet)
        self._reference_tweet(pipe, timestamp, query, tweet)
        self._update_last_query_tweet(pipe, query, tweet)
        return pipe.execute()

    def retrieve_ts(self, query, timestamp, n_elements=-1):
        ts_key = self._ts_key(timestamp, query)
        return self._redis.lrange(ts_key, 0, n_elements)
    def retrieve_last_tweet_id(self, query):
        query_key = self._query_key(query)
        return self._redis.get(query_key)
    def retrieve_tweet(self, tweet_id):
        tweet_key = self._tweet_key(tweet_id)
        data = self._redis.get(tweet_key)
        return Tweet.deserialize(data).todict()
    def retrieve(self, query, n_periods=30):
        current_timestamp = now = int(time.time())
        start_timestamp = now - self._delta_secs * n_periods
        tweets = []
        while current_timestamp > start_timestamp:
            current_tweet_ids = self.retrieve_ts(query, current_timestamp)
            tweets.append({'timestamp': current_timestamp,
                           'tweets' : [ self.retrieve_tweet(tid) for tid in current_tweet_ids ] })
            current_timestamp -= self._delta_secs 
        return { 'now' : now,
                 'ts' : tweets }
开发者ID:oddskool,项目名称:varan,代码行数:79,代码来源:ts_store.py

示例3: RedisSocketManager

# 需要导入模块: from redis.client import Redis [as 别名]
# 或者: from redis.client.Redis import pipeline [as 别名]
class RedisSocketManager(BaseSocketManager):
    def __init__(self, *args, **kwargs):
        super(RedisSocketManager, self).__init__(*args, **kwargs)

        self.settings = self.config.get("socket_manager", {})

        self.prefix = self.settings.get("key_prefix", "socketio")
        self.buckets_count = self.settings.get("buckets_count", 1000)

        redis_settings = {}
        for k, v in self.settings.items():
            if k.startswith("redis_"):
                k = k.replace("redis_", "", 1)
                if k == "port":
                    v = int(v)
                redis_settings[k] = v

        self.redis_settings = redis_settings

        self.connected_key = "%s:connected" % self.prefix
        self.sockets_key = "%s:sockets" % self.prefix

        lock_factory = lambda sessid: GroupLock(self.redis, self.make_session_key(sessid, "lock"))
        self.locks = DefaultDict(lock_factory)
        self.uuid = str(uuid.uuid1())

        self.event_handlers = {"socket.events": self.on_socket_event, "endpoint.events": self.on_endpoint_event}

        self.jobs = []

    def spawn(self, fn, *args, **kwargs):
        new = gevent.spawn(fn, *args, **kwargs)
        self.jobs.append(new)
        return new

    def start(self):
        self.redis = Redis(**self.redis_settings)

        self.spawn(self._redis_listener)
        self.spawn(self._orphan_cleaner)

    def stop(self):
        gevent.killall(self.jobs)
        self.redis = None

    def clean_redis(self, sessid, client=None):
        single = client is None
        if client is None:
            client = self.redis.pipeline()
        client.delete(self.make_session_key(sessid, "session"))
        client.delete(self.make_session_key(sessid, "lock"))
        client.delete(self.make_session_key(sessid, "endpoints"))
        for qname in virtsocket.QUEUE_NAMES:
            client.delete(self.make_session_key(sessid, "queue:%s" % qname))

        client.zrem(self.sockets_key, sessid)
        client.hdel(self.make_bucket_name("hits", sessid), sessid)

        client.srem(self.connected_key, sessid)
        if single:
            client.execute()

    def detach(self, sessid):
        super(RedisSocketManager, self).detach(sessid)
        self.clean_redis(sessid)
        try:
            del self.locks[sessid]
        except KeyError:
            pass

    def make_bucket_name(self, key, sessid):
        return "%s:%s:b%s" % (self.prefix, key, self.bucket_id(sessid))

    def make_session_key(self, sessid, suffix):
        return "%s:%s:%s" % (self.prefix, sessid, suffix)

    def make_queue(self, sessid, name):
        """Returns a Redis based message queue.
        """
        return RedisQueue(self.redis, self.make_session_key(sessid, "queue:%s" % name))

    def bucket_id(self, sessid):
        """Returns the id of the corresponding bucket for a socket
        
        We'll keep socket hashed data in buckets.
        As we use random sessid these should be normally be quite sparse.
        """
        nid = int(sessid.lstrip("0") or 0)
        return str(nid % self.buckets_count)

    def read_queue(self, queue, **kwargs):
        """Optimized for faster bulk read from Redis, while still supporting ``block`` and ``timeout`` for the first read.
        
        Returns a list of all messages currently in the Queue.
        Raises ``gevent.queue.Empty`` if the queue is empty
        """
        ret = []
        block = kwargs.get("block", True)
        if block:
            ret.append(queue.get(**kwargs))  # block while reading the first
#.........这里部分代码省略.........
开发者ID:ryesoft,项目名称:gevent-socketio,代码行数:103,代码来源:socket_manager.py


注:本文中的redis.client.Redis.pipeline方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。