本文整理汇总了Python中redis.StrictRedis.blpop方法的典型用法代码示例。如果您正苦于以下问题:Python StrictRedis.blpop方法的具体用法?Python StrictRedis.blpop怎么用?Python StrictRedis.blpop使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类redis.StrictRedis
的用法示例。
在下文中一共展示了StrictRedis.blpop方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import blpop [as 别名]
def main(args=None):
if args is None:
args = sys.argv[1:]
parser = argparse.ArgumentParser(description="Kinto Deployment Worker")
parser.add_argument('--ini',
help='Application configuration file',
dest='ini_file')
parsed_args = vars(parser.parse_args(args))
logging.basicConfig(level=DEFAULT_LOG_LEVEL, format=DEFAULT_LOG_FORMAT)
config_file = parsed_args['ini_file']
env = bootstrap(config_file)
registry = env['registry']
r = StrictRedis(**registry.redis)
while True:
try:
queue, b64_credentials = r.blpop(DEPLOY_QUEUE, 0)
except KeyboardInterrupt:
print("\rBye bye buddy")
sys.exit(0)
user_id = hmac_digest(registry.hmac_secret, b64_credentials)
credentials = base64.b64decode(b64_credentials).split(':', 1)
id_alwaysdata = r.get(ID_ALWAYSDATA_KEY.format(user_id))
settings = {
'id_alwaysdata': id_alwaysdata,
'credentials': tuple(credentials),
'postgresql_host': "postgresql-%s.alwaysdata.net" % id_alwaysdata,
'ssh_host': "ssh-%s.alwaysdata.net" % id_alwaysdata,
'ftp_host': "ftp-%s.alwaysdata.net" % id_alwaysdata,
'prefixed_username': "%s_kinto" % id_alwaysdata
}
status_handler = RedisStatusHandler(r, user_id)
try:
deploy_kinto_to_alwaysdata(status_handler, file_root=FILE_ROOT, **settings)
except Exception as e:
logger.error(e)
# Retry later
r.rpush(DEPLOY_QUEUE, b64_credentials)
示例2: RedisEngine
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import blpop [as 别名]
class RedisEngine(QueueEngine):
"""docstring for RedisEngine"""
def __init__(self):
super(RedisEngine, self).__init__()
self.r = None
self.host = 'localhost'
self.port = 6379
def connect(self):
self.r = StrictRedis(self.host, self.port, db=0)
return self.is_available()
def is_available(self):
print('is redis available')
if self.r is None:
return False
return self.r.ping() is not None
def enqueue(self, queue, msg, timeout=0):
self.r.rpush(queue, msg)
def dequeue(self, queue, timeout):
rsp = self.r.blpop(queue, timeout=0)
return rsp[1]
示例3: Zone
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import blpop [as 别名]
#.........这里部分代码省略.........
# Clear any existing tick events
self.redis.ltrim(self.tick_key, 0, 0)
try:
while self.running:
self.process_one_event()
except Exception as e:
log.critical(traceback.format_exc())
except BaseException as e:
pass
finally:
self.save_snapshot()
def stop(self):
self.running = False
def start_ticker(self):
log.info('Ticking every %ss.' % self.tick_interval)
tock = False
while True:
log.debug('Tock.' if tock else 'Tick.')
# TODO: timestamp here instead of True, for debugging?
self.redis.rpush(self.tick_key, True)
sleep(self.tick_interval)
tock = not tock
def send_message(self, entity_id, message):
self.redis.publish(self.messages_key(entity_id), message)
def listen(self, entity_id):
subscription = self.subscribe(entity_id)
for message in subscription.listen():
yield message['data']
# TODO: Leaky abstraction :\
def subscribe(self, entity_id):
subscription = self.redis.pubsub(ignore_subscribe_messages=True)
subscription.subscribe(self.messages_key(entity_id))
return subscription
def process_one_event(self):
key, value = self.redis.blpop([self.tick_key, self.incoming_key])
if key == self.tick_key:
self.perform_tick()
else:
entity_id, _, command = value.partition(' ')
self.perform_command(int(entity_id), command)
def enqueue_command(self, entity_id, command):
self.redis.rpush(self.incoming_key, ' '.join([str(entity_id), command]))
def perform_command(self, entity_id, command):
entity = self.get(entity_id)
log.debug('Processing: [%s] %s' % (entity.id, command))
entity.perform(command)
def perform_tick(self):
for entity in self.ticking_entities:
# TODO: Somehow iterate over only ticking components
for component in entity.components:
if component.ticking:
component.tick()
# Entity helpers
def get(self, id):
return self.entities.get(id)
def all(self):
return self.entities.values()
def find(self, component_name):
if inspect.isclass(component_name):
component_name = component_name.__name__
return self.entities_by_component_name.get(component_name, set())
def spawn(self, components=[], **kwargs):
entity = Entity(**kwargs)
self.add(entity)
if components:
entity.components.add(components)
return entity
def clone(self, entity):
# TODO FIXME: This is fairly awful
return Entity.from_dict(entity.to_dict(), self)
def destroy(self, entity):
entity.components.purge()
self.remove(entity)
def add(self, entity):
entity.id = self.next_id()
entity.zone = self
self.entities[entity.id] = entity
def remove(self, entity):
self.entities.pop(entity.id)
entity.zone = None
示例4: Auth
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import blpop [as 别名]
BUCKET_NAME = "elfin"
Proxy = {"http": "http://127.0.0.1:48657"}
qiniu = Auth(ACCESS_KEY, SECRET_KEY)
def get_name():
name = str(time())
return name.replace(".", "")
def upload_media(media):
key = get_name()
data = media
token = qiniu.upload_token(BUCKET_NAME)
return put_data(token, key, data)
if __name__ == "__main__":
while 1:
media_url = redis.blpop("media_url")
if media_url:
media = get(url=media_url[1], proxies=Proxy)
try:
ret, info = upload_media(media.content)
except Exception, e:
redis.rpush("media_url", media_url)
logbook.error(e)
else:
media_name = ret["key"]
redis.zadd("image_name", int(media_name), int(media_name))
finally:
logbook.info("work on {}".format(media_url))
示例5: __init__
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import blpop [as 别名]
class BeatmapDaemon:
def __init__(self):
self.crawler = BeatmapCrawler(OSU_ACCOUNT['username'], OSU_ACCOUNT['password'])
handlers = []
for SERVER in SERVERS:
handler_class = SERVER['handler']
handler = handler_class(DownloadServer.objects.get(pk=SERVER['server_id']), SERVER['config'])
handler.init()
handlers.append(handler)
self.handlers = tuple(handlers)
self.redis = None
self.logger = logging.getLogger('osubeatmaps.daemon')
""":type : logging.RootLogger """
def ensure_beatmap(self, beatmap_id):
try:
beatmap = Beatmap.objects.get(pk=beatmap_id)
except Beatmap.DoesNotExist:
beatmap = self.crawler.crawl_single(beatmap_id)
beatmap.save()
return beatmap
def check_all(self):
for handler in self.handlers:
downloads = handler.check_all()
for download in downloads:
beatmap = self.ensure_beatmap(download.beatmap_id)
try:
Download.objects.get(server_id=handler.server.id, beatmap_id=beatmap.id)
except Download.DoesNotExist:
download.save()
def process_single(self, beatmap_id):
"""
:param beatmap_id:
:return: timestamp of finishing downloading
"""
self.crawler.ensure_login()
self.crawler.crawl_single(beatmap_id).save()
p = self.crawler.download_beatmap(beatmap_id)
tmp = tempfile.mkstemp()
os.close(tmp[0])
tmp_filename = tmp[1]
tmp_file = open(tmp_filename, 'wb')
shutil.copyfileobj(p[1], tmp_file)
tmp_file.close()
p[1].close()
# record the finishing time.
ret = time.time()
threads = []
""":type : list of Thread """
# invoke each handler
for handler in self.handlers:
def handle():
_tmp_file = open(tmp_filename, 'rb')
handler.upload(p[0], _tmp_file).save()
_tmp_file.close()
thread = Thread(None, handle)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# remove downloaded file
os.unlink(tmp_filename)
# return the timestamp indicating download finishing time.
return ret
# noinspection PyBroadException
def run_daemon(self):
self.logger.info("Starting beatmap daemon.")
self.redis = StrictRedis()
while True:
try:
beatmap_id = int(self.redis.blpop(FETCH_QUEUE_KEY)[1])
self.logger.info('Now processing beatmap #%d.', beatmap_id)
try:
django.db.close_old_connections()
stamp = self.process_single(beatmap_id)
self.logger.info('Finished processing beatmap #%d.', beatmap_id)
delta_time = time.time() - stamp
if delta_time < DOWNLOAD_SLEEP_TIME:
time.sleep(DOWNLOAD_SLEEP_TIME - delta_time)
except Exception as e:
self.logger.exception("An exception raised while processing beatmap #%d. Aborting.", beatmap_id)
except KeyboardInterrupt:
self.logger.info("stopping beatmap daemon.")
quit()
except Exception as e:
self.logger.exception("An exception raised while processing crawling queue.")
self.logger.critical("Error whiling processing crawling queue.")