本文整理汇总了Python中redis.StrictRedis.lrange方法的典型用法代码示例。如果您正苦于以下问题:Python StrictRedis.lrange方法的具体用法?Python StrictRedis.lrange怎么用?Python StrictRedis.lrange使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类redis.StrictRedis
的用法示例。
在下文中一共展示了StrictRedis.lrange方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: show_search_results
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import lrange [as 别名]
def show_search_results(query):
print("""
Search Result: {keyword}
==================================================
""".format(keyword=query))
p = Persistent("minamo")
r = StrictRedis(decode_responses=True)
resultset = None
for _query in query.split(" "):
qgram = ngram.ngram(_query, 2)
for bi in list(qgram)[:-1]:
if resultset is None:
resultset = set(r.lrange("minamo:bi:{}".format(bi), 0, -1))
else:
resultset = resultset & set(r.lrange("minamo:bi:{}".format(bi), 0, -1))
for page in (p.load(models.Page, x) for x in resultset):
if page.title is None:
continue
print("*", page.title)
print(" ", page.url)
示例2: test_group_chord_group_chain
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import lrange [as 别名]
def test_group_chord_group_chain(self, manager):
from celery.five import bytes_if_py2
if not manager.app.conf.result_backend.startswith('redis'):
raise pytest.skip('Requires redis result backend.')
redis_connection = StrictRedis()
redis_connection.delete('redis-echo')
before = group(redis_echo.si('before {}'.format(i)) for i in range(3))
connect = redis_echo.si('connect')
after = group(redis_echo.si('after {}'.format(i)) for i in range(2))
result = (before | connect | after).delay()
result.get(timeout=TIMEOUT)
redis_messages = list(map(
bytes_if_py2,
redis_connection.lrange('redis-echo', 0, -1)
))
before_items = \
set(map(bytes_if_py2, (b'before 0', b'before 1', b'before 2')))
after_items = set(map(bytes_if_py2, (b'after 0', b'after 1')))
assert set(redis_messages[:3]) == before_items
assert redis_messages[3] == b'connect'
assert set(redis_messages[4:]) == after_items
redis_connection.delete('redis-echo')
示例3: get_mail_from_redis
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import lrange [as 别名]
def get_mail_from_redis(num=10):
r = StrictRedis('127.0.0.1', 6379)
key = "QQMAIL"
mail_list = r.lrange(key, 0, num)
for i in range(num):
r.lpop(key)
return set(mail_list)
示例4: DbRedis
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import lrange [as 别名]
class DbRedis(Db):
def __init__(self):
super().__init__()
self.redis = StrictRedis(**dbSettings)
def generateKey(self):
return self.redis.incr("id")
def store(self, key, objSerial):
self.redis.setex(key, TTL, objSerial)
def retrieve(self, key):
return self.redis.get(key)
def lenList(self, name):
return self.redis.llen(name)
def listAppend(self, name, val):
self.redis.lpush(name, val)
def listPopLeft(self, name):
return self.redis.lpop(name)
def retrieveList(self, name):
return self.redis.lrange(name, 0, -1)
def removeFromList(self, name, item):
self.redis.lrem(name, item, 0)
def keyExists(self, key):
return self.redis.exists(key)
示例5: get_recent
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import lrange [as 别名]
def get_recent(**redis_kwargs):
""" Retrieve recent messages from Redis, in reverse chronological order.
Two lists are returned: one a single most-recent status message from
each process, the other a list of numerous messages from each process.
Each message is a tuple with floating point seconds elapsed, integer
process ID that created it, and an associated text message such as
"Got cache lock in 0.001 seconds" or "Started /osm/12/656/1582.png".
Keyword args are passed directly to redis.StrictRedis().
"""
pid = getpid()
red = StrictRedis(**redis_kwargs)
processes = []
messages = []
for key in red.keys('pid-*-statuses'):
try:
now = time()
pid = int(key.split('-')[1])
msgs = [msg.split(' ', 1) for msg in red.lrange(key, 0, _keep)]
msgs = [(now - float(t), pid, msg) for (t, msg) in msgs]
except:
continue
else:
messages += msgs
processes += msgs[:1]
messages.sort() # youngest-first
processes.sort() # youngest-first
return processes, messages
示例6: Hipache
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import lrange [as 别名]
class Hipache(object):
def __init__(self, url):
host, port, password, database = parse_redis_url(url)
self.r = StrictRedis(host=host, port=port, db=database, password=password)
def __iter__(self):
for i, k in enumerate(self.r.keys()):
vhost = k.split(":")[1]
xs = self.r.lrange(k, 0, -1)
if len(xs) == 2:
id, url = xs
yield {"id": id, "host": vhost, "url": url}
def add(self, id, host, ip, port):
url = format_url(ip, port)
vhost = "frontend:{0:s}".format(host)
if vhost in self.r.keys():
members = self.r.lrange(vhost, 0, -1)
if id in members:
if url not in members:
self.r.linsert(vhost, "after", id, url)
else:
self.r.rpush(vhost, id)
self.r.rpush(vhost, url)
else:
self.r.rpush(vhost, id)
self.r.rpush(vhost, url)
def delete(self, id, host, ip, port):
vhost = "frontend:{0:s}".format(host)
if not ip:
self.r.delete(vhost, id)
else:
url = format_url(ip, port)
self.r.lrem(vhost, 0, url)
示例7: get_context_data
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import lrange [as 别名]
def get_context_data(self, **kwargs):
projects = (User.objects.filter(pk=self.request.user.id)
.values_list('projects__id', flat=True))
kwargs['active_projects'] = Project.active_objects.filter(id__in=projects)
kwargs['inactive_projects'] = Project.inactive_objects.filter(id__in=projects)
kwargs['reservations'] = []
r = StrictRedis(settings.REDIS_HOST,
settings.REDIS_PORT,
settings.REDIS_DB)
kwargs['action_items'] = []
for i in r.lrange('users:{0}:action.items'.format(self.request.user.id),
0, -1):
kwargs['action_items'].append(pickle.loads(i))
return super(DashboardMixin, self).get_context_data(**kwargs)
示例8: main
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import lrange [as 别名]
def main(stdscr):
# no delay on user input
stdscr.nodelay(True)
# move cursos to 0, 0
stdscr.leaveok(0)
# connect to the redis server
r = StrictRedis(host='127.0.0.1', port=6379, db=0)
# main loop
while True:
# clear screen
stdscr.erase()
# print info message
stdscr.addstr(0, 0, "press 'q' to exit.")
# get list of aphids keys
akeys = sorted(r.scan_iter('aphids*'))
# get and show all aphids keys
for i, k in enumerate(akeys):
# get the type of this key
k_type = r.type(k)
# read differently depending on type
if k_type == "string":
v = r.get(k)
elif k_type == "list":
v = r.lrange(k, 0, -1)
opts = A_STANDOUT if k.endswith('seg_rate') else 0
stdscr.addstr(i+2, 8, k)
stdscr.addstr(i+2, 68, "{0}".format(v), opts)
# refresh screen
stdscr.refresh()
# handle user input
try:
c = stdscr.getkey()
if c == 'q':
break
except:
continue
示例9: test_second_order_replace
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import lrange [as 别名]
def test_second_order_replace(self, manager):
from celery.five import bytes_if_py2
if not manager.app.conf.result_backend.startswith('redis'):
raise pytest.skip('Requires redis result backend.')
redis_connection = StrictRedis()
redis_connection.delete('redis-echo')
result = second_order_replace1.delay()
result.get(timeout=TIMEOUT)
redis_messages = list(map(
bytes_if_py2,
redis_connection.lrange('redis-echo', 0, -1)
))
expected_messages = [b'In A', b'In B', b'In/Out C', b'Out B', b'Out A']
assert redis_messages == expected_messages
示例10: __init__
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import lrange [as 别名]
class RedisSeries:
def __init__(self, host=None, port=None, db=None, password=None):
self._redis = StrictRedis(decode_responses=True)
self.log = getLogger('home.ts.redis.RedisSeries')
def push(self, series, value, dt=None):
pipeline = self._redis.pipeline()
if dt is None:
dt = datetime.utcnow()
timestamp = dt.isoformat()
recent_key = self.named_key(series, "recent")
pipeline.lpush(recent_key, value)
pipeline.ltrim(recent_key, 0, 60)
pipeline.sadd("series", series)
pipeline.hset("latest", series, value)
pipeline.hset("updated", series, timestamp)
for resolution, (dt_fmt, delta) in RESOLUTIONS.items():
key = self.event_key(series, dt, dt_fmt)
pipeline.hset(key, timestamp, value)
pipeline.expire(key, EXPIRE_AFTER)
count = len(pipeline)
with timer("Redis Push (commands: {0})".format(count), self.log):
pipeline.execute()
def event_key(self, series, dt, dt_fmt):
return self.named_key(series, dt.strftime(dt_fmt))
def named_key(self, series, name):
return "{0}:{1}".format(series, name)
def query(self, series, start, end, resolution):
dt_fmt, delta = RESOLUTIONS[resolution]
current = start
pipeline = self._redis.pipeline()
while current <= end:
key = self.event_key(series, current, dt_fmt)
self.log.debug("HGETALL {0}".format(key))
pipeline.hgetall(key)
current += delta
count = len(pipeline)
with timer("Redis Query (commands: {0})".format(count), self.log):
results = pipeline.execute()
results = sorted(chain(*[result.items() for result in results]))
return [(r[0], Decimal(r[1])) for r in results]
def status(self):
pipeline = self._redis.pipeline()
pipeline.hgetall("latest")
pipeline.hgetall("updated")
return pipeline.execute()
def latest(self, series):
return self._redis.lrange(self.named_key(series, "recent"), 0, -1)
示例11: DataBaseHandler
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import lrange [as 别名]
class DataBaseHandler(object):
def __init__(self, truncate=False, db_num=15):
self.redis = StrictRedis(db=db_num)
if truncate:
self.redis.flushdb()
def get_url(self, url_id):
return self.redis.hget('url_ids', url_id)
def get_word(self, word_id):
return self.redis.hget('word_ids', word_id)
def add_url(self, url):
url_id = id(url)
saved = self.redis.hget('url_ids', url_id)
if not saved:
self.redis.hset('url_ids', url_id, url)
return url_id
def add_word(self, word):
word_id = id(word)
saved = self.redis.hget('word_ids', word_id)
if not saved:
self.redis.hset('word_ids', word_id, word)
return word_id
def set_word_location(self, url_id, word_id, location):
locations_id = url_have_word(url_id, word_id)
self.redis.rpush(locations_id, location)
# url contains words
self.redis.hset(word_location(url_id), word_id, locations_id)
# word in urls
self.redis.hset(url_location(word_id), url_id, locations_id)
def get_word_location_in_url(self, location_id):
return [int(el) for el in self.redis.lrange(location_id, 0, -1)]
def set_link(self, from_url, to_url, via_word):
from_id = self.add_url(from_url)
to_id = self.add_url(to_url)
word_id = self.add_word(via_word)
self.redis.set(link(from_id, to_id), word_id)
self.redis.lpush(from_(from_id), to_id)
self.redis.lpush(_to(to_id), from_id)
def is_url_saved(self, url):
return self.redis.get(id(url))
def get_urls_locations_of_(self, word):
"""
:returns word_id, {url_id:[locations],...}
"""
word_id = id(word)
return word_id, dict([(int(k), self.get_word_location_in_url(v)) \
for k, v in self.redis.hgetall(url_location(word_id)).iteritems()])
def get_words_locations_in_(self, url):
'''
:param url:
:return: url_id, {word_id:[locations]}
'''
url_id = id(url)
return url_id, dict([(int(k), self.get_word_location_in_url(v)) for k, v in
self.redis.hgetall(word_location(url_id)).iteritems()])
示例12: JobsDB
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import lrange [as 别名]
#.........这里部分代码省略.........
"""
job = self.get_job(job_id)
if not job:
return (False, 'no such job id')
if job['status'] == 'completed':
return self.get_stored_log(job_id, timestamp=timestamp)
else:
return self.get_logstream(job_id, timestamp=timestamp)
def get_logstream(self, job_id, timestamp=True):
"""
Returns a generator object to stream all job output
until the job has completed
params:
- timestamp(bool): prefix lines with timestamp. default True.
"""
key = self._key('log', job_id)
sub = self.subs[job_id]
for msg in sub.listen():
if str(msg['data']) == 'EOF':
break
else:
yield self._read_jsonlog(msg['data'], append_ts=timestamp)
def get_stored_log(self, job_id, timestamp=True):
"""
Return the stored output of a given job id
params:
- timestamp(bool): prefix lines with timestamp. default True.
"""
logs = self.redis.lrange(self._key('log', job_id), 0, -1)
return [ self._read_jsonlog(l,append_ts=timestamp) for \
l in reversed(logs) ]
def append_job_log(self, job_id, text):
"""
Append a line of job output to a redis list and
publish to relevant channel
"""
key = self._key('log', job_id)
#split up the line if carriage returns, newlines
if len(text.splitlines()) > 1:
for line in text.splitlines():
self.append_job_log(job_id, line)
else:
if not text.isspace(): #don't keep empty lines
logjson = self._jsonlog(text)
self.redis.publish(key, logjson)
self.redis.lpush(key, logjson)
@staticmethod
def _read_jsonlog(jsonlog, append_ts=True):
ts,msg = json.loads(jsonlog)
if not append_ts:
return msg
return '[%s] %s' % (ts, msg)
@staticmethod
def _jsonlog(msg):
ts = datetime.utcnow().strftime('%a %b %d %H:%M:%S %Y')
return json.dumps((ts, msg))
示例13: RunLogger
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import lrange [as 别名]
class RunLogger(object):
def __init__(self, redis_config, max_logs=1000):
self._redis = StrictRedis(**redis_config)
self._lock = threading.Lock()
self._logger = donothing # All logger calls ignored
self._job_id = None
self.max_logs = max_logs
@contextmanager
def runlog(self, job_id, run_id=None):
if not self._lock.acquire(False):
raise Exception("Can't start %s. %s already started."
% (job_id, self._job_id))
if run_id is None:
run_id = str(datetime.datetime.now()).replace(' ', '-')
try:
hdlr = RunlogHandler(self._redis, job_id, run_id)
self._logger = logging.getLogger("%s|%s" % (job_id, run_id))
self._job_id = job_id
self._logger.addHandler(hdlr)
timestamp = time.time()
self._redis.zadd('jobs', timestamp, job_id)
self._redis.zadd('%s|runs' % job_id, timestamp, run_id)
self._redis.set('%s|%s|start' % (job_id, run_id), timestamp)
try:
try:
yield self._logger
finally:
self._redis.set('%s|%s|end' % (job_id, run_id),
time.time())
except CancelLog:
self.forget_run(job_id, run_id)
except Exception as ex:
self._redis.zadd('exceptions',
timestamp,
"%s|%s" % (job_id, run_id))
self._logger.exception(ex)
del logging.Logger.manager.loggerDict[
"%s|%s" % (job_id, run_id)]
raise ex # Don't swallow errors.
finally:
self.forget_old_runs(job_id)
self._job_id = None
self._logger = donothing
self._lock.release()
def forget_run(self, job_id, run_id):
self._redis.zrem('%s|runs' % job_id, run_id)
self._redis.delete('%s|%s|start' % (job_id, run_id))
self._redis.delete('%s|%s|end' % (job_id, run_id))
self._redis.delete('%s|%s|log' % (job_id, run_id))
def forget_old_runs(self, job_id):
for run_id in self._redis.zrange('%s|runs' % job_id,
self.max_logs,
-1):
self.forget_run(job_id, run_id)
def debug(self, *a, **kw):
self._logger.debug(*a, **kw)
def info(self, *a, **kw):
self._logger.info(*a, **kw)
def warn(self, *a, **kw):
self._logger.warn(*a, **kw)
def warning(self, *a, **kw):
self._logger.warning(*a, **kw)
def error(self, *a, **kw):
self._logger.error(*a, **kw)
def critical(self, *a, **kw):
self._logger.critical(*a, **kw)
def exception(self, *a, **kw):
self._logger.exception(*a, **kw)
def list_jobs(self):
return self._redis.zrevrange('jobs', 0, -1)
def list_runs(self, job_id):
return self._redis.zrange('%s|runs' % job_id, 0, -1)
def run_times(self, job_id, run_id):
return (self._redis.get('%s|%s|start' % (job_id, run_id)),
self._redis.get('%s|%s|end' % (job_id, run_id)))
def get_log(self, job_id, run_id):
return self._redis.lrange('%s|%s|log' % (job_id, run_id), 0, -1)
示例14: Redabas
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import lrange [as 别名]
class Redabas(object):
"""docstring for redabas"""
__rdb = None
__redis_opt = None
__files = None
__net = None
__proc = None
def __init__(self, redis_opt):
super().__init__()
if not self.__rdb:
self.__rdb = StrictRedis(
host=redis_opt["host"],
port=redis_opt["port"],
db=redis_opt["db"],
decode_responses=redis_opt["decode_responses"],
)
self.__redis_opt = redis_opt
LOGGER.info("created new redis connection")
if not self.__files:
self.__files = Files()
if not self.__net:
self.__net = Net()
if not self.__proc:
self.__proc = Cycle(redis_opt["image_timeout"], self.next_image).start()
pass
def redis_ping(self):
if self.__rdb:
try:
LOGGER.info("redis ping")
return self.__rdb.ping()
except RedisConnectionError as ex:
LOGGER.error("could not ping redis: %s" % (ex))
return False
def get_ropt(self, field):
if field in self.__redis_opt.keys():
return self.__redis_opt[field]
def flush_all(self):
rdbfields = list()
for folder in self.__files.get_contentsub(full=False):
rdbfields.append("%s:%s" % (self.__redis_opt["image_prefix"], folder))
rdbfields.append("%s:feed" % (self.__redis_opt["status_prefix"]))
for entry in rdbfields:
self.__rdb.delete(entry)
LOGGER.info("flushed data for %s" % (entry))
#
def get_images(self, folder="public"):
"""gets images from redis"""
result = list()
rdbfield = "%s:%s" % (self.__redis_opt["image_prefix"], folder)
def __readin():
"""reloads db"""
self.__rdb.delete(rdbfield)
for image in self.__files.find_images(folder=folder):
self.__rdb.rpush(rdbfield, image)
result.append(image)
LOGGER.info("rebuilt redis image cache for %s" % (rdbfield))
return result
if folder in self.__files.get_contentsub():
result = sorted(self.__rdb.lrange(rdbfield, 0, -1))
return result if result else __readin()
def __dblocate_image(self, name):
for folder in self.__files.get_contentsub(full=False):
if name in self.get_images(folder):
return folder
def locate_image(self, name):
"""locates images"""
folder = self.__dblocate_image(name)
if folder:
LOGGER.info("found requested image %s in folder %s" % (name, folder))
image = self.__files.jinja_static_file(name, folder=folder)
if image:
return image
LOGGER.info("requested image %s not found" % (name))
return self.__files.jinja_nullimg()
def get_imagestats(self):
"""counts images"""
result = dict()
for folder in self.__files.get_contentsub(full=False):
result[folder] = len(self.get_images(folder=folder))
return result
def get_all_images(self):
"""suppenkasper needs a list of all images"""
result = list()
for folder in self.__files.get_contentsub(full=False):
result += self.get_images(folder=folder)
#.........这里部分代码省略.........
示例15: JSONRedisBackend
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import lrange [as 别名]
class JSONRedisBackend(BaseBackend):
def initialize(self):
conf = env.get_uri("LINEUP_REDIS_URI")
self.redis = StrictRedis(
db=conf.username or 0,
host=conf.host,
port=conf.port,
# using `path` as password to support the URI like:
# redis://[email protected]:port/veryverylongpasswordhash
password=conf.path,
)
def serialize(self, value):
return json.dumps(value, default=bytes)
def deserialize(self, value):
return value and json.loads(value) or None
# read operations
@io_operation
def get(self, key):
value = self.redis.get(key)
result = self.deserialize(value)
return result
@io_operation
def lpop(self, key):
value = self.redis.lpop(key)
result = self.deserialize(value)
return result
@io_operation
def llen(self, key):
return self.redis.llen(key)
@io_operation
def lrange(self, key, start, stop):
return map(self.deserialize, self.redis.lrange(key, start, stop))
@io_operation
def rpop(self, key):
value = self.redis.rpop(key)
result = self.deserialize(value)
return result
# Write operations
@io_operation
def set(self, key, value):
product = self.serialize(value)
return self.redis.set(key, product)
@io_operation
def rpush(self, key, value):
product = self.serialize(value)
return self.redis.rpush(key, product)
@io_operation
def lpush(self, key, value):
product = self.serialize(value)
return self.redis.lpush(key, product)
# Pipeline operations
@io_operation
def report_steps(self, name, consumers, producers):
pipeline = self.redis.pipeline()
producers_key = ':'.join([name, 'producers'])
consumers_key = ':'.join([name, 'consumers'])
for consumer in consumers:
pipeline.sadd(consumers_key, consumer)
for producer in producers:
pipeline.sadd(producers_key, producer)
pipeline.smembers(consumers_key)
pipeline.smembers(producers_key)
result = pipeline.execute()
all_consumers = result[-2]
all_producers = result[-1]
return all_consumers, all_producers