本文整理汇总了Python中redis.StrictRedis.pipeline方法的典型用法代码示例。如果您正苦于以下问题:Python StrictRedis.pipeline方法的具体用法?Python StrictRedis.pipeline怎么用?Python StrictRedis.pipeline使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类redis.StrictRedis
的用法示例。
在下文中一共展示了StrictRedis.pipeline方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: JSONRedisBackend
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import pipeline [as 别名]
class JSONRedisBackend(BaseBackend):
def __init__(self):
self.redis = StrictRedis()
def serialize(self, value):
return json.dumps(value)
def deserialize(self, value):
return value and json.loads(value) or None
def get(self, key):
value = self.redis.get(key)
result = self.deserialize(value)
return result
def set(self, key, value):
product = self.serialize(value)
return self.redis.set(key, product)
def rpush(self, key, value):
product = self.serialize(value)
return self.redis.rpush(key, product)
def lpush(self, key, value):
product = self.serialize(value)
return self.redis.lpush(key, product)
def lpop(self, key):
value = self.redis.lpop(key)
result = self.deserialize(value)
return result
def llen(self, key):
return self.redis.llen(key)
def rpop(self, key):
value = self.redis.rpop(key)
result = self.deserialize(value)
return result
def report_steps(self, name, consumers, producers):
pipeline = self.redis.pipeline()
producers_key = ':'.join([name, 'producers'])
consumers_key = ':'.join([name, 'consumers'])
for consumer in consumers:
pipeline.sadd(consumers_key, consumer)
for producer in producers:
pipeline.sadd(producers_key, producer)
pipeline.smembers(consumers_key)
pipeline.smembers(producers_key)
result = pipeline.execute()
all_consumers = result[-2]
all_producers = result[-1]
return all_consumers, all_producers
示例2: load_data
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import pipeline [as 别名]
def load_data(ids, host, port):
"""from redis
uniq in index
"""
from redis import StrictRedis
from json import loads
db = StrictRedis(host, port, decode_responses=True)
logging.debug("all: %d", len(ids))
if 0 in ids:
raise KeyError(0)
from collections import Counter
checker = Counter(ids)
if len(checker) != len(ids): # not unique
raise ValueError(checker.most_common(3))
pipe = db.pipeline()
ids.insert(0, 0)
for i in ids:
pipe.hgetall(i)
properties = pipe.execute(True) # DO NOT allow error occurs in redis
raw = {}
for i, p in zip(ids, properties):
raw[i] = {k: loads(v) for k, v in p.items()}
return raw
示例3: RedisProducer
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import pipeline [as 别名]
class RedisProducer(object):
def __init__(self, hostname = 'localhost', port = 6379):
log.debug("Initializing RedisProducer with hostname of %s and port %s" % (hostname, port))
self.r = StrictRedis(host = hostname, port = port)
def send(self, message):
tries = 0
next_index_key = get_next_index_for_topic_key(message.topic)
next_index = 1
result = None
log.debug("Sending message on topic %s" % message.topic)
while result is None and tries < TRIES_LIMIT:
if self.r.exists(next_index_key):
next_index = long(self.r.get(next_index_key)) + 1
message_key = get_message_key(message.topic, next_index)
try:
pl = self.r.pipeline()
pl.watch(next_index_key, message_key)
pl.multi()
pl.incr(next_index_key).set(message_key, message.payload)
result = pl.execute()
except WatchError:
# Should probably log something here, but all it means is we're
# retrying
pass
if result is None:
log.error("Could not send message, retry amount exceeded")
raise RuntimeError("Attempted to send message %s times and failed" % TRIES_LIMIT)
示例4: RedisJobStore
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import pipeline [as 别名]
class RedisJobStore(JobStore):
def __init__(self, db=0, key_prefix="jobs.", pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args):
self.jobs = []
self.pickle_protocol = pickle_protocol
self.key_prefix = key_prefix
if db is None:
raise ValueError('The "db" parameter must not be empty')
if not key_prefix:
raise ValueError('The "key_prefix" parameter must not be empty')
self.redis = StrictRedis(db=db, **connect_args)
def add_job(self, job):
job.id = str(uuid4())
job_state = job.__getstate__()
job_dict = {
"job_state": pickle.dumps(job_state, self.pickle_protocol),
"runs": "0",
"next_run_time": job_state.pop("next_run_time").isoformat(),
}
self.redis.hmset(self.key_prefix + job.id, job_dict)
self.jobs.append(job)
def remove_job(self, job):
self.redis.delete(self.key_prefix + job.id)
self.jobs.remove(job)
def load_jobs(self):
jobs = []
keys = self.redis.keys(self.key_prefix + "*")
pipeline = self.redis.pipeline()
for key in keys:
pipeline.hgetall(key)
results = pipeline.execute()
for job_dict in results:
job_state = {}
try:
job = Job.__new__(Job)
job_state = pickle.loads(job_dict["job_state".encode()])
job_state["runs"] = long(job_dict["runs".encode()])
dateval = job_dict["next_run_time".encode()].decode()
job_state["next_run_time"] = datetime.strptime(dateval, "%Y-%m-%dT%H:%M:%S")
job.__setstate__(job_state)
jobs.append(job)
except Exception:
job_name = job_state.get("name", "(unknown)")
logger.exception('Unable to restore job "%s"', job_name)
self.jobs = jobs
def update_job(self, job):
attrs = {"next_run_time": job.next_run_time.isoformat(), "runs": job.runs}
self.redis.hmset(self.key_prefix + job.id, attrs)
def close(self):
self.redis.connection_pool.disconnect()
def __repr__(self):
return "<%s>" % self.__class__.__name__
示例5: _setex
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import pipeline [as 别名]
def _setex(self, redis: StrictRedis, name, value):
pipe = redis.pipeline()
pipe.set(name, value)
pipe.expire(name, self.expire_time)
response = pipe.execute()
del pipe
return response
示例6: Simmetrica
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import pipeline [as 别名]
class Simmetrica(object):
DEFAULT_INCREMENT = 1
DEFAULT_RESOLUTION = '5min'
DEFAULT_REDIS_HOST = 'localhost'
DEFAULT_REDIS_PORT = 6379
DEFAULT_REDIS_DB = 0
resolutions = {
'min': 60,
'5min': 300,
'15min': 900,
'hour': 3600,
'day': 86400,
'week': 86400 * 7,
'month': 86400 * 30,
'year': 86400 * 365
}
def __init__(self, host=None, port=None, db=None):
self.backend = StrictRedis(
host=host or self.DEFAULT_REDIS_HOST,
port=int(port or self.DEFAULT_REDIS_PORT),
db=db or self.DEFAULT_REDIS_DB
)
def push(self, event, increment=DEFAULT_INCREMENT, now=None):
pipe = self.backend.pipeline()
for resolution, timestamp in self.get_timestamps_for_push(now):
key = self.get_event_key(event, resolution)
pipe.hincrby(key, timestamp, increment)
return pipe.execute()
def query(self, event, start, end, resolution=DEFAULT_RESOLUTION):
key = self.get_event_key(event, resolution)
timestamps = self.get_timestamps_for_query(
start, end, self.resolutions[resolution])
values = self.backend.hmget(key, timestamps)
for timestamp, value in zip(timestamps, values):
yield timestamp, value or 0
def get_timestamps_for_query(self, start, end, resolution):
return range(self.round_time(start, resolution),
self.round_time(end, resolution),
resolution)
def get_timestamps_for_push(self, now):
now = now or self.get_current_timestamp()
for resolution, timestamp in self.resolutions.items():
yield resolution, self.round_time(now, timestamp)
def round_time(self, time, resolution):
return int(time - (time % resolution))
def get_event_key(self, event, resolution):
return 'simmetrica:{0}:{1}'.format(event, resolution)
def get_current_timestamp(self):
return int(time.time())
示例7: set
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import pipeline [as 别名]
def set(cls, name, value):
db = StrictRedis()
with db.pipeline() as pipe:
try:
cls._set(pipe, name, value)
pipe.execute()
db.incr(name+'::v')
return
except ResponseError:
pass
# RPUSH bug in some platform
db = StrictRedis()
with db.pipeline() as pipe:
cls._set(pipe, name, value, True)
pipe.execute()
db.incr(name+'::v')
示例8: persist
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import pipeline [as 别名]
def persist(db_params):
cxn1 = Redis(db=db_params['events_db_id'])
while 1:
line = (yield)
k = "{}:{}".format(iso8601_to_unixtime(line.get('eventDate')),
line.get('eventId'))
v = line.get('userId')
pipe = cxn1.pipeline()
pipe.set(k, v)
pipe.execute()
示例9: process_key
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import pipeline [as 别名]
def process_key(k):
r = StrictRedis(db=INDEX_DB)
try:
p = r.pipeline()
for (doc_id, value) in r.zrevrange(k, 0, -1, "WITHSCORES"):
if doc_id not in db:
print("Deleted " + doc_id + " from " + key)
p.zrem(key, doc_id)
continue
doc = db[doc_id]
if k.lower() in doc['title'].lower() or k.lower() in doc['description'].lower():
p.zadd(k, value, doc_id)
print(p.exceute())
except redis.exceptions.ResponseError:
pass
示例10: do_updates
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import pipeline [as 别名]
def do_updates():
"""UPDATE"""
db = StrictRedis()
db_pipe = db.pipeline()
update_keys = (260, 271, 285, 300, 310)
print 'before...'
for key in update_keys:
print db.hgetall(key)
db_pipe.hset(key, 'city', 'NYC')
db_pipe.execute()
print '\nafter...'
for key in update_keys:
print db.hgetall(key)
示例11: Persist
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import pipeline [as 别名]
class Persist(object):
""" Sequential writer for Carbon server.
The story is simple, fetch data from redis, write them, wait, loop.
This code is supervised by Carbon daemon.
"""
def __init__(self, path="/tmp/"):
self.redis = Redis()
self.path = path
self.dirs = set()
self.redis.sadd(METRICS, METRIC_POINTS, METRIC_WRITE)
def metric(self, name, value):
"Add some metrics : make your own dogfood, just before lunch."
timestamp = time.time()
serialized = struct.pack('!ff', timestamp, value)
pipe = self.redis.pipeline()
pipe.zadd(name, timestamp, serialized)
pipe.publish(name, serialized)
pipe.execute()
def run(self):
while True:
before = time.time()
self.handle()
after = time.time()
self.metric(METRIC_WRITE, (after - before) * 1000)
time.sleep(PERIOD - int(before) + int(after))
def handle(self):
points = 0
for metric in self.redis.smembers(METRICS):
values = self.redis.zrange(metric, 0, -1)
points += len(values)
f = target_to_path(self.path, metric)
d = os.path.dirname(f)
if d not in self.dirs:
if not os.path.isdir(d):
os.makedirs(d)
self.dirs.add(d)
if not os.path.exists(f):
whisper.create(f, [(10, 1000)]) # [FIXME] hardcoded values
whisper.update_many(f, [struct.unpack('!ff', a) for a in values])
if len(values):
self.redis.zrem(metric, *values)
self.metric(METRIC_POINTS, points)
示例12: RedisConsumer
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import pipeline [as 别名]
class RedisConsumer(object):
def __init__(self, timeout, group_id, hostname = 'localhost', port = 6379):
self.group_id = group_id
self.timeout = timeout
log.debug("Initializing RedisConsumer with hostname of %s and port %s" % (hostname, port))
self.r = StrictRedis(host = hostname, port = port)
def poll(self, topic):
result = None
current_index_key = get_next_index_for_topic_key(topic)
end_millis = time.time() * 1000 + self.timeout
log.debug("Polling topic %s" % topic)
while time.time() * 1000 < end_millis:
if self.r.exists(current_index_key):
current_index = long(self.r.get(current_index_key))
consumer_index_key = get_next_index_for_group_id_key(topic, self.group_id)
pl = self.r.pipeline()
pl.watch(consumer_index_key)
consumer_index = 0
if self.r.exists(consumer_index_key):
consumer_index = long(self.r.get(consumer_index_key))
if current_index > consumer_index:
try:
pl.multi()
pl.incr(consumer_index_key)
incr_result = pl.execute()
if not incr_result is None and len(incr_result) > 0:
consumer_index = long(incr_result[0])
key = get_message_key(topic, consumer_index)
if self.r.exists(key):
result = self.r.get(key)
break
except WatchError:
log.debug("Redis keys changed for topic %s and group %s, trying again" % (topic, self.group_id))
pass
return result
def unsubscribe_from_topic(self, topic):
self.r.delete(get_next_index_for_group_id_key(topic, self.group_id))
示例13: Storage
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import pipeline [as 别名]
class Storage(object):
def __init__(self, host=None, port=None, *args, **kwargs):
if host is None:
host = os.environ.get('REDIS_HOST', 'localhost')
if port is None:
port = os.environ.get('REDIS_PORT', '6379')
self.redis = StrictRedis(host, port, *args, **kwargs)
def add_labels(self, labels):
self.redis.sadd('labels', *labels)
def add_image(self, image_url, labels):
p = self.redis.pipeline()
for label in labels:
p.sadd(label, image_url)
p.setnx('repr_img:{}'.format(label), image_url)
p.execute()
示例14: main
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import pipeline [as 别名]
def main():
logging.basicConfig(level=logging.INFO)
r = StrictRedis()
while True:
logging.info("Waiting for a screenshot...")
# block if no screenshots
r.brpop('pending_screenshot_blocked_list')[1]
# get a pending screenshot
url = r.spop('pending_screenshots')
if url:
pipeline = r.pipeline()
pipeline.get('pending_screenshot_{}'.format(url))
pipeline.delete('pending_screenshot_{}'.format(url))
filename, _ = pipeline.execute()
logging.info("Taking screenshot of {} to {}".format(url, filename))
screenshot_path = os.path.join('/Users/dhumbert/Code/Projects/Misc/clementia/public/user/screenshots', filename)
subprocess.Popen(['/usr/local/bin/phantomjs', 'screenshot.js', url, screenshot_path], stderr=subprocess.STDOUT, stdout=subprocess.PIPE).communicate()
示例15: MetricsExplorer
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import pipeline [as 别名]
class MetricsExplorer(Explorer):
def __init__(self, config: Configuration):
super().__init__('RedisCassandra')
redis_host = config.get('redis.host')
redis_port = config.get('redis.port')
redis_pass = config.get('redis.password')
#TODO: change to Sentinel so we can handle multiple hosts
self.redis_conn = StrictRedis(host=redis_host, port=redis_port, password=redis_pass)
self.cache = get_cache()
def get_template(self):
return 'explorers/metrics.html'
def get_table_data(self):
ret = self.cache.get('_tname')
if ret is not None:
return ret
tag_names = self.redis_conn.smembers('_tname')
tag_names = [t.decode() for t in tag_names]
pipeline = self.redis_conn.pipeline() # setup a pipeline
for tag_name in tag_names:
# {'tag': name.decode()}
pipeline.smembers('_t|%s'%tag_name)
values = pipeline.execute()
ret = []
for i in range(len(tag_names)):
ret.append({'tag': tag_names[i], 'values': [v.decode() for v in values[i]]})
self.cache.set('_tname', ret)
return ret