当前位置: 首页>>代码示例>>Python>>正文


Python StrictRedis.zrem方法代码示例

本文整理汇总了Python中redis.StrictRedis.zrem方法的典型用法代码示例。如果您正苦于以下问题:Python StrictRedis.zrem方法的具体用法?Python StrictRedis.zrem怎么用?Python StrictRedis.zrem使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在redis.StrictRedis的用法示例。


在下文中一共展示了StrictRedis.zrem方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: copy_values

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import zrem [as 别名]
def copy_values():
	source = StrictRedis()
	dest = StrictRedis(db=1)
	for key in source.keys():
		try:
			for doc_id, value in source.zrevrange(key, 0, -1, "WITHSCORES"):
				print(dest.zadd(key, value, doc_id))
				source.zrem(key, doc_id)
		except redis.exceptions.ResponseError:
			pass
	print key
开发者ID:LouisVN,项目名称:LR-Data,代码行数:13,代码来源:seel.py

示例2: tfidf_reduce

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import zrem [as 别名]
def tfidf_reduce(args):
    if args is None:
        return
    key, doc_id, value = args
    import json
    import math
    counts = None
    r = StrictRedis(db=1)
    def freq(word, doc_id):
        return r.zscore(word, doc_id)

    def word_count(doc_id):        
        try:
            with open("counts/" + doc_id, "r+") as f:
                return float(f.read())
        except:
            return 1.0 

    def num_docs_containing(word):
        return r.zcard(word)

    def tf(word, doc_id):
        return (freq(word, doc_id) / float(word_count(doc_id)))

    def idf(word):
        return math.log(doc_count / float(num_docs_containing(word)))

    def tf_idf(word, doc_id):
        return (tf(word, doc_id) * idf(word))    
    if doc_id not in db:
        print("Deleted " + doc_id + " from " + key)
        r.zrem(key, doc_id)
        return
    doc = db[doc_id]
    multiplier = 1
    try:
        if key.lower() in doc['title'].lower():
            multiplier = 4
        elif key.lower() in doc['description'].lower():
            multiplier = 2
    except:
        pass
    rank = tf_idf(key, doc_id) * multiplier
    if rank is None :
        rank = 0
    print("{0}: {1} is {2}".format(doc_id, key, rank))
    r.zadd(key, rank, doc_id)
    return rank
开发者ID:LouisVN,项目名称:LR-Data,代码行数:50,代码来源:tfidf.py

示例3: Persist

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import zrem [as 别名]
class Persist(object):
    """ Sequential writer for Carbon server.
    The story is simple, fetch data from redis, write them, wait, loop.
    This code is supervised by Carbon daemon.
    """

    def __init__(self, path="/tmp/"):
        self.redis = Redis()
        self.path = path
        self.dirs = set()
        self.redis.sadd(METRICS, METRIC_POINTS, METRIC_WRITE)

    def metric(self, name, value):
        "Add some metrics : make your own dogfood, just before lunch."
        timestamp = time.time()
        serialized = struct.pack('!ff', timestamp, value)
        pipe = self.redis.pipeline()
        pipe.zadd(name, timestamp, serialized)
        pipe.publish(name, serialized)
        pipe.execute()

    def run(self):
        while True:
            before = time.time()
            self.handle()
            after = time.time()
            self.metric(METRIC_WRITE, (after - before) * 1000)
            time.sleep(PERIOD - int(before) + int(after))

    def handle(self):
        points = 0
        for metric in self.redis.smembers(METRICS):
            values = self.redis.zrange(metric, 0, -1)
            points += len(values)
            f = target_to_path(self.path, metric)
            d = os.path.dirname(f)
            if d not in self.dirs:
                if not os.path.isdir(d):
                    os.makedirs(d)
                self.dirs.add(d)
            if not os.path.exists(f):
                whisper.create(f, [(10, 1000)])  # [FIXME] hardcoded values
            whisper.update_many(f, [struct.unpack('!ff', a) for a in values])
            if len(values):
                self.redis.zrem(metric, *values)
        self.metric(METRIC_POINTS, points)
开发者ID:bearstech,项目名称:whirlwind-tornado,代码行数:48,代码来源:persist.py

示例4: remove

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import zrem [as 别名]
def remove(payload, redis_host='localhost', redis_port=6379):
    """
    Remove a package (or packages if the payload matches multiple) from the
    queue
    """
    connection = StrictRedis(host=redis_host, port=redis_port)
    packages = _get_packages_in_queue(connection)

    packages_to_remove = [
        package for package in packages if package.payload == payload]

    if packages_to_remove:
        LOG.debug(
            'Removing the following packages: {}'.format(packages_to_remove))
        entries_to_remove = [
            package.raw_entry for package in packages_to_remove]
        connection.zrem(DEFAULT_REDIS_KEY, *entries_to_remove)
        connection.publish(DEFAULT_REDIS_KEY, 'new_message')
开发者ID:brendanmaguire,项目名称:kamikaze,代码行数:20,代码来源:operations.py

示例5: StrictRedis

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import zrem [as 别名]
    redis = StrictRedis(connection_pool=redis_pool)

    while True:

        current_time = int(time.time())

        # Make sure a message is sent every 25 seconds so the long poll requests
        # don't time out.
        # XXX INCREASE THIS TO SEVERAL MINUTES
        for chat_id in redis.zrangebyscore("longpoll_timeout", 0, current_time):
            redis.publish("channel:%s" % chat_id, '{"messages":[]}')
            if redis.hlen("chat:%s:online" % chat_id) != 0:
                redis.zadd("longpoll_timeout", time.time() + 25, chat_id)
            else:
                redis.zrem("longpoll_timeout", chat_id)

        # And do the reaping.
        for dead in redis.zrangebyscore("chats_alive", 0, current_time):
            print current_time, "Reaping ", dead
            chat_id, session_id = dead.split("/")
            user_id = redis.hget("chat:%s:online" % chat_id, session_id)
            disconnected = disconnect(redis, chat_id, session_id)
            # Only send a timeout message if they were already online.
            if not disconnected:
                print "Not sending timeout message."
                continue
            try:
                dead_chat_user = (
                    db.query(ChatUser)
                    .filter(and_(ChatUser.user_id == user_id, ChatUser.chat_id == chat_id))
开发者ID:thecount92,项目名称:newparp,代码行数:32,代码来源:reaper.py

示例6: CacheModule

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import zrem [as 别名]
class CacheModule(BaseCacheModule):
    """
    A caching module backed by redis.

    Keys are maintained in a zset with their score being the timestamp
    when they are inserted. This allows for the usage of 'zremrangebyscore'
    to expire keys. This mechanism is used or a pattern matched 'scan' for
    performance.
    """
    def __init__(self, *args, **kwargs):
        if C.CACHE_PLUGIN_CONNECTION:
            connection = C.CACHE_PLUGIN_CONNECTION.split(':')
        else:
            connection = []

        self._timeout = float(C.CACHE_PLUGIN_TIMEOUT)
        self._prefix = C.CACHE_PLUGIN_PREFIX
        self._cache = StrictRedis(*connection)
        self._keys_set = 'ansible_cache_keys'

    def _make_key(self, key):
        return self._prefix + key

    def get(self, key):
        value = self._cache.get(self._make_key(key))
        # guard against the key not being removed from the zset;
        # this could happen in cases where the timeout value is changed
        # between invocations
        if value is None:
            self.delete(key)
            raise KeyError
        return json.loads(value)

    def set(self, key, value):
        value2 = json.dumps(value)
        if self._timeout > 0: # a timeout of 0 is handled as meaning 'never expire'
            self._cache.setex(self._make_key(key), int(self._timeout), value2)
        else:
            self._cache.set(self._make_key(key), value2)

        self._cache.zadd(self._keys_set, time.time(), key)

    def _expire_keys(self):
        if self._timeout > 0:
            expiry_age = time.time() - self._timeout
            self._cache.zremrangebyscore(self._keys_set, 0, expiry_age)

    def keys(self):
        self._expire_keys()
        return self._cache.zrange(self._keys_set, 0, -1)

    def contains(self, key):
        self._expire_keys()
        return (self._cache.zrank(self._keys_set, key) >= 0)

    def delete(self, key):
        self._cache.delete(self._make_key(key))
        self._cache.zrem(self._keys_set, key)

    def flush(self):
        for key in self.keys():
            self.delete(key)

    def copy(self):
        # TODO: there is probably a better way to do this in redis
        ret = dict()
        for key in self.keys():
            ret[key] = self.get(key)
        return ret

    def __getstate__(self):
        return dict()

    def __setstate__(self, data):
        self.__init__()
开发者ID:KMK-ONLINE,项目名称:ansible,代码行数:77,代码来源:redis.py

示例7: Graph

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import zrem [as 别名]

#.........这里部分代码省略.........
        in_edges = self.redis.smembers('{}:n:{}:ie'.format(self.name, node))
        out_edges = self.redis.smembers('{}:n:{}:oe'.format(self.name, node))
        for e in in_edges | out_edges:
            self.del_edge(e)
        props = node_obj.properties()
        for p in props.keys():
            self._deindex(node_obj.name, p, props[p]) 
        self.redis.delete('{}:n:{}:p'.format(self.name, node)) #might be unnecessary
        self.redis.srem(self.nodes_key, node)

    def del_edge(self, edge):
        """
            Delete an edge from the graph, by Edge() object or by edge id.
            
            Usage::

                >>> g.del_edge(e) #with Edge() object
                >>> g.del_edge(25) #by edge id
                >>> del e #you should probably do this too
            
            :param edge: rgr.Edge or edge ID to delete.
        """
        if type(edge) is Edge:
            edge_obj = edge
            edge = edge.id
        else:
            edge = str(edge)
            edge_obj = Edge(self, edge)
        if not self.redis.sismember(self.edges_key, edge):
            raise ValueError(edge)
        parent = self.redis.get('{}:e:{}:in'.format(self.name, edge))
        child = self.redis.get('{}:e:{}:on'.format(self.name, edge))
        if self.redis.zincrby('{}:n:{}:cn'.format(self.name, parent), child, -1) == 0:
            self.redis.zrem('{}:n:{}:cn'.format(self.name, parent), child)
        if self.redis.zincrby('{}:n:{}:pn'.format(self.name, child), parent, -1) == 0:
            self.redis.zrem('{}:n:{}:pn'.format(self.name, child), parent)
        self.redis.srem('{}:n:{}:oe'.format(self.name, parent), edge)
        self.redis.srem('{}:n:{}:ie'.format(self.name, child), edge)
        props = edge_obj.properties()
        for p in props.keys(): #most likely works
            self._deindex(edge_obj.name, p, props[p])
        self.redis.delete(
            '{}:e:{}:in'.format(self.name, edge),
            '{}:e:{}:on'.format(self.name, edge),
            '{}:e:{}:p'.format(self.name, edge)
        )
        self.redis.srem(self.edges_key, edge)

    def get_nodes(self, **kwargs):
        """
            Return a list of nodes that have properties that exactly match all kwargs supplied.
           
            Usage::
 
                >>> johns = g.get_nodes(name='John')
                >>> johnsmiths = g.get_nodes(name='John', lastname='Smith')

            :param kwargs: properties to look up.
        """
        return [Node(self, x) for x in self.redis.sinter(
            ['{}:i:n:{}:{}'.format(self.name, k, kwargs[k]) for k in kwargs]
        )]
    
    def get_edges(self, **kwargs):
        """
            Return a list of edges that have properties that exactly match all kwargs supplied.
开发者ID:la11111,项目名称:rgr,代码行数:70,代码来源:rgr.py

示例8: RunLogger

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import zrem [as 别名]
class RunLogger(object):

    def __init__(self, redis_config, max_logs=1000):
        self._redis = StrictRedis(**redis_config)
        self._lock = threading.Lock()
        self._logger = donothing  # All logger calls ignored
        self._job_id = None
        self.max_logs = max_logs

    @contextmanager
    def runlog(self, job_id, run_id=None):
        if not self._lock.acquire(False):
            raise Exception("Can't start %s. %s already started."
                            % (job_id, self._job_id))
        if run_id is None:
            run_id = str(datetime.datetime.now()).replace(' ', '-')
        try:
            hdlr = RunlogHandler(self._redis, job_id, run_id)
            self._logger = logging.getLogger("%s|%s" % (job_id, run_id))
            self._job_id = job_id
            self._logger.addHandler(hdlr)
            timestamp = time.time()
            self._redis.zadd('jobs', timestamp, job_id)
            self._redis.zadd('%s|runs' % job_id, timestamp, run_id)
            self._redis.set('%s|%s|start' % (job_id, run_id), timestamp)
            try:
                try:
                    yield self._logger
                finally:
                    self._redis.set('%s|%s|end' % (job_id, run_id),
                                    time.time())
            except CancelLog:
                self.forget_run(job_id, run_id)
            except Exception as ex:
                self._redis.zadd('exceptions',
                                 timestamp,
                                 "%s|%s" % (job_id, run_id))
                self._logger.exception(ex)
                del logging.Logger.manager.loggerDict[
                    "%s|%s" % (job_id, run_id)]
                raise ex  # Don't swallow errors.
        finally:
            self.forget_old_runs(job_id)
            self._job_id = None
            self._logger = donothing
            self._lock.release()

    def forget_run(self, job_id, run_id):
        self._redis.zrem('%s|runs' % job_id, run_id)
        self._redis.delete('%s|%s|start' % (job_id, run_id))
        self._redis.delete('%s|%s|end' % (job_id, run_id))
        self._redis.delete('%s|%s|log' % (job_id, run_id))

    def forget_old_runs(self, job_id):
        for run_id in self._redis.zrange('%s|runs' % job_id,
                                         self.max_logs,
                                         -1):
            self.forget_run(job_id, run_id)

    def debug(self, *a, **kw):
        self._logger.debug(*a, **kw)

    def info(self, *a, **kw):
        self._logger.info(*a, **kw)

    def warn(self, *a, **kw):
        self._logger.warn(*a, **kw)

    def warning(self, *a, **kw):
        self._logger.warning(*a, **kw)

    def error(self, *a, **kw):
        self._logger.error(*a, **kw)

    def critical(self, *a, **kw):
        self._logger.critical(*a, **kw)

    def exception(self, *a, **kw):
        self._logger.exception(*a, **kw)

    def list_jobs(self):
        return self._redis.zrevrange('jobs', 0, -1)

    def list_runs(self, job_id):
        return self._redis.zrange('%s|runs' % job_id, 0, -1)

    def run_times(self, job_id, run_id):
        return (self._redis.get('%s|%s|start' % (job_id, run_id)),
                self._redis.get('%s|%s|end' % (job_id, run_id)))

    def get_log(self, job_id, run_id):
        return self._redis.lrange('%s|%s|log' % (job_id, run_id), 0, -1)
开发者ID:Trapit,项目名称:runlog,代码行数:94,代码来源:loggage.py

示例9: Queue

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import zrem [as 别名]
class Queue(object):
    """
    Implements a sort-of-queue of crawl tasks.
    """
    def __init__(self, name, url_canonicalizer=None):
        self.name = name
        self.canonicalize_url = url_canonicalizer or (lambda url: url)
        self.redis = StrictRedis(host='localhost', port=6379, db=0)
        # XXX Temporary
        #self.redis.flushall()

    def serialize(self, task):
        return pickle.dumps(task)

    def deserialize(self, s):
        return pickle.loads(s)

    def prefix_redis_key(self, prefix, key):
        return ':'.join([self.name, prefix, key])

    def pick_queue_key(self, high_priority):
        return self.prefix_redis_key('todo', 'hp' if high_priority else 'nn')

    def hash_url(self, url):
        return hashlib.md5(url).hexdigest()

    def record_crawl_timestamp(self, url, now):
        self.redis.set(self.prefix_redis_key('urlts', self.hash_url(url)),
                       '%d' % now)

    def get_crawl_timestamp(self, url):
        s = self.redis.get(self.prefix_redis_key('urlts', self.hash_url(url)))
        if s:
            return int(s)

    def get_existing_task_id(self, url):
        s = self.redis.get(self.prefix_redis_key('taskbyurl',
                                                 self.hash_url(url)))
        if s:
            task_id, high_priority_bool = s.split(',')
            return task_id, high_priority_bool == '1'
        else:
            return None, None

    def get_task(self, task_id):
        s = self.redis.get(task_id)
        return self.deserialize(s)

    def drop_task(self, task_id, high_priority):
        self.redis.zrem(self.pick_queue_key(high_priority), task_id)
        self.redis.delete(task_id)

    def push(self, task):
        """
        Schedule a new crawl task, which is an instance of ``Task``.

        If ``high_priority`` is set, drop all other tasks for the same
        canonicalized URL and enqueue to a separate high priority queue.

        If ``min_age`` is set and this task URL has been crawled before, check
        the last time it was crawled, and reschedule it to a timestamp which is
        ``min_age`` after the last crawl time.

        If a different crawl task is already scheduled for this URL, check the
        scheduled time. If it has an intentionally delayed crawl time and that
        scheduled time is *after* this task, drop it. Otherwise, drop this
        task. Either way, the earlier of the two tasks should be kept.
        """
        # XXX FIXME
        # We really should take a lock on this URL, to prevent race conditions
        # in the conditional logic below.

        existing_task_id, existing_is_hp = self.get_existing_task_id(task.url)

        if task.high_priority:
            # If this task is HP, drop any other task for this URL..
            if existing_task_id:
                self.drop_task(existing_task_id, existing_is_hp)

        elif existing_is_hp:
            # If there's an existing high priority task, drop this one.
            return

        else:
            # If min_age is set, check for previous crawls and further delay
            # this task in order to ensure that min_age is not violated.
            if task.min_age:
                last_timestamp = self.get_crawl_timestamp(task.url)
                if last_timestamp:
                    task.scheduled_timestamp = max(last_timestamp +
                                                   task.min_age,
                                                   task.scheduled_timestamp)

            # If there is an existing task, check if it's scheduled earlier.
            if existing_task_id:
                scheduled_ts = self.redis.zscore(
                    self.pick_queue_key(existing_is_hp),
                    existing_task_id)

                if scheduled_ts > task.scheduled_timestamp:
#.........这里部分代码省略.........
开发者ID:storborg,项目名称:itsy,代码行数:103,代码来源:queue.py

示例10: RedisLRU

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import zrem [as 别名]
class RedisLRU(object):
    def __init__(self, redis=None, **kwargs):
        if redis is not None:
            self._redis = redis
        else:
            self._redis = Redis(**kwargs)
        self.namespaces = {
            "default": 10000
        }

    def setup_namespace(self, namespace, size):
        """Set the LRU Size for a namespace.
        """
        self.namespaces[namespace] = int(size)

    def _serialize(self, s):
        # return json.dumps(s)
        return s

    def _unserialize(self, s):
        # s = s.decode("utf-8")
        # return json.loads(s)
        return s

    def _size(self, namespace):
        return self.namespaces[namespace]

    def _hit_store(self, namespace):
        if namespace not in self.namespaces:
            raise KeyError("invalid namespace")
        return "cache_keys_{}".format(namespace)

    def _value_store(self, namespace):
        if namespace not in self.namespaces:
            raise KeyError("invalid namespace")
        return "cache_values_{}".format(namespace)

    def _expire_old(self, namespace):
        hits = self._hit_store(namespace)
        size = self._size(namespace)
        count = self._redis.zcard(hits)
        if count >= size:
            values = self._value_store(namespace)
            items = self._redis.zrange(hits, 0, count-size)
            logger.error(items)
            self._redis.zremrangebyrank(hits, 0, count-size)
            self._redis.hdel(values, *items)

    def clear(self, namespace="default"):
        """Clear the Cache.
        """
        hits = self._hit_store(namespace)
        values = self._value_store(namespace)
        self._redis.delete(hits, values)

    def clearAll(self):
        """Clear all known namespaces.
        """
        for k in self.namespaces.iterkeys():
            self.clear(k)

    def store(self, key, value, namespace="default"):
        """Store a key value pair in cache.
        This will not update an existing item.
        """
        values = self._value_store(namespace)
        if not self._redis.hexists(values, key):
            hits = self._hit_store(namespace)
            self._expire_old(namespace)
            self._redis.hset(values, key, self._serialize(value))
            self._redis.zadd(hits, time.time(), key)
        else:
            hits = self._hit_store(namespace)
            self._redis.hset(values, key, self._serialize(value))
            self._redis.zadd(hits, time.time(), key)

    def get(self, key, namespace="default"):
        """Get a value from the cache.
        returns none if the key is not found.
        """
        values = self._value_store(namespace)
        value = self._redis.hget(values, key)
        if value:
            hits = self._hit_store(namespace)
            self._redis.zadd(hits, time.time(), key)
            return self._unserialize(value)
        return None

    def expire(self, key, namespace="default"):
        """Expire (invalidate) a key from the cache.
        """
        values = self._value_store(namespace)
        if self._redis.hexists(values, key):
            hits = self._hit_store(namespace)
            self._redis.hdel(values, key)
            self._redis.zrem(hits, key)
开发者ID:wuttem,项目名称:pytsdb,代码行数:98,代码来源:cache.py


注:本文中的redis.StrictRedis.zrem方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。