本文整理汇总了Python中redis.StrictRedis.type方法的典型用法代码示例。如果您正苦于以下问题:Python StrictRedis.type方法的具体用法?Python StrictRedis.type怎么用?Python StrictRedis.type使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类redis.StrictRedis
的用法示例。
在下文中一共展示了StrictRedis.type方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_preparation
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import type [as 别名]
def test_preparation(self) -> None:
"""
Test the preparation after initializing the class
"""
rqa = RedisQueueAccess(self.__config)
redis = StrictRedis(connection_pool=self.__pool)
type_queue = redis.type(rqa.queue)
type_pubsub = redis.type(rqa.pubsub_channel)
self.assertEqual(type_queue, b'none')
self.assertEqual(type_pubsub, b'none')
示例2: test_list_preparation_with_exisiting_list
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import type [as 别名]
def test_list_preparation_with_exisiting_list(self) -> None:
"""
Test preparation with an already existing list
"""
redis = StrictRedis(connection_pool=self.__pool)
redis.lpush(self.__config['queue'], 'test')
rqa = RedisQueueAccess(self.__config)
type_queue = redis.type(rqa.queue)
self.assertEqual(type_queue, b'list')
示例3: main
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import type [as 别名]
def main(stdscr):
# no delay on user input
stdscr.nodelay(True)
# move cursos to 0, 0
stdscr.leaveok(0)
# connect to the redis server
r = StrictRedis(host='127.0.0.1', port=6379, db=0)
# main loop
while True:
# clear screen
stdscr.erase()
# print info message
stdscr.addstr(0, 0, "press 'q' to exit.")
# get list of aphids keys
akeys = sorted(r.scan_iter('aphids*'))
# get and show all aphids keys
for i, k in enumerate(akeys):
# get the type of this key
k_type = r.type(k)
# read differently depending on type
if k_type == "string":
v = r.get(k)
elif k_type == "list":
v = r.lrange(k, 0, -1)
opts = A_STANDOUT if k.endswith('seg_rate') else 0
stdscr.addstr(i+2, 8, k)
stdscr.addstr(i+2, 68, "{0}".format(v), opts)
# refresh screen
stdscr.refresh()
# handle user input
try:
c = stdscr.getkey()
if c == 'q':
break
except:
continue
示例4: sync_cache
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import type [as 别名]
def sync_cache(self):
'''
Update the local token and scope cache from cloud storage
'''
# Collect Token management info from the cloud
client = gdata.spreadsheet.service.SpreadsheetsService()
client.ClientLogin(config.tokens_google_client_login, config.tokens_google_client_password)
worksheets_feed = client.GetWorksheetsFeed(config.tokens_spreadsheet_id).entry
worksheet_ids = {}
for worksheet in worksheets_feed:
worksheet_ids[worksheet.title.text] = worksheet.id.text.split('/')[-1]
self.log.debug('sync_cache() found sheets: ' + worksheet.title.text)
# Connect to the local token cache
cache = StrictRedis(db=config.tokens_cache_redis_db)
# Fetch token list
query = gdata.spreadsheet.service.CellQuery()
query.min_row = '1'
cells = client.GetCellsFeed(config.tokens_spreadsheet_id, wksht_id=worksheet_ids['tokens'], query=query).entry
cols = 3
rows = len(cells) / cols
cloud_tokens = []
token_ent_by_token = {}
for row in range(0, rows):
token_ent = [str(cell.content.text) for cell in cells[row * cols : (row + 1) * cols]]
(user, token, datetime) = token_ent
token_ent_by_token[token] = token_ent
cloud_tokens.append(token_ent)
self.log.debug('sync_cache() fetched tokens from cloud: ' + str(rows))
# Add our admin token for maintaining caches
admin_token_hash = auth.calc_hash(config.admin_token)
admin_ent = ('[email protected]', admin_token_hash,'')
token_ent_by_token[admin_token_hash] = admin_ent
cloud_tokens.append(admin_ent)
# Fetch scopes
cloud_scopes = {}
for worksheet_title in worksheet_ids.keys():
if worksheet_title not in ['tokens', 'change log']: # must then be a scope list
self.log.debug('sync_cache() fetching scope: ' + str(worksheet_title))
cloud_scopes[worksheet_title] = []
cells = client.GetCellsFeed(config.tokens_spreadsheet_id, wksht_id=worksheet_ids[worksheet_title], query=query).entry
for cell in cells:
cloud_scopes[worksheet_title].append(cell.content.text)
self.log.debug('sync_cache() fetched scope: ' + str(worksheet_title) + ', with number of items: ' + str(len(cells)))
# At this point the current live state of all tokens and scopes has been retrieved from cloud storage
#cache.flushdb() # Remove all keys from the current database -- ok in dev, evil in practice
# Eliminate keys in local cache which are not in the cloud cache
for token in cache.keys():
if cache.type(token) <> 'string': # This is a scope set, and not a token
continue
if len(token) < 60 or len(token) > 70: # The cache item is the wrong size to be a token
continue
if token not in token_ent_by_token:
self.log.info('sync_cache() eliminating stale token: ' + token)
cache.delete(token)
# Update keys in the local cache
# Build user and general cache
for token_ent in cloud_tokens:
(user, token, datetime) = token_ent
self.log.debug('sync_cache() adding to local cache, token: ' + token + ', user: ' + str(user))
cache.set(token, user)
self.log.debug('sync_cache() updated tokens, count: ' + str(len(cloud_tokens)))
# Eliminate keys in local cache which are not in the cloud cache
for (cloud_scope_name, cloud_scope_list) in cloud_scopes.items():
for user in cache.smembers(cloud_scope_name):
if user not in cloud_scope_list:
cache.srem(cloud_scope_name, user)
self.log.info('sync_cache() eliminated user: ' + user + ' from scope: ' + cloud_scope_name)
# TODO: Eliminate deactivated scopes
# Build scopes
for (scope_name, scope_list) in cloud_scopes.items():
for user in scope_list:
cache.sadd(scope_name, user)
self.log.debug('sync_cache() added scope: ' + scope_name + ', with member count: ' + str(len(scope_list)))
# Reset the cache log index to the last entry
num_log_entries = len(client.GetListFeed( config.tokens_spreadsheet_id, wksht_id=worksheet_ids['change log']).entry) + 1
cache.set('log_index', num_log_entries) # Reset the cache log index to the start
self.log.debug('sync_cache() set log_index to: ' + str(num_log_entries))