本文整理汇总了Python中redis.StrictRedis.hget方法的典型用法代码示例。如果您正苦于以下问题:Python StrictRedis.hget方法的具体用法?Python StrictRedis.hget怎么用?Python StrictRedis.hget使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类redis.StrictRedis
的用法示例。
在下文中一共展示了StrictRedis.hget方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: sync_get
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import hget [as 别名]
def sync_get(self, hash_id, identity, *args, **kwargs):
"""
For getting data from cache
:param hash_id: Unique Hash key for the data
:param identity: Unique Integer for the data
:param args: Args for the sync function. (Default: None)
"""
redis = StrictRedis(connection_pool=self.redis_pool)
hash_key = key_generator(self.hash_key, hash_id)
key = key_generator(self.key, identity)
try:
if redis.hexists(hash_key, key):
data = self.get_func(redis.hget(hash_key, key))
else:
data = self.sync_func(identity, *args, **kwargs)
redis.hset(hash_key, key, self.set_func(data))
if data is not None or data != "":
return data
return None
except RedisError as re:
self.log.error("[REDIS] %s", str(re))
data = self.sync_func(identity, args)
return data
finally:
del redis
示例2: load
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import hget [as 别名]
def load(obj, namespace=None, silent=True, key=None):
"""
Reads and loads in to "settings" a single key or all keys from redis
:param obj: the settings instance
:param namespace: settings namespace default='DYNACONF'
:param silent: if errors should raise
:param key: if defined load a single key, else load all in namespace
:return: None
"""
redis = StrictRedis(**obj.REDIS_FOR_DYNACONF)
namespace = namespace or obj.DYNACONF_NAMESPACE
holder = "DYNACONF_%s" % namespace
try:
if key:
value = parse_conf_data(redis.hget(holder.upper(), key))
if value:
obj.set(key, value)
else:
data = {
key: parse_conf_data(value)
for key, value in redis.hgetall(holder.upper()).items()
}
if data:
obj.update(data, loader_identifier=IDENTIFIER)
except Exception as e:
e.message = 'Unable to load config from redis (%s)' % e.message
if silent:
obj.logger.error(e.message)
return False
raise
示例3: MispRedisConnector
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import hget [as 别名]
class MispRedisConnector(object):
def __init__(self):
self.r = StrictRedis(unix_socket_path=redis_socket)
def search(self, authkey, values=None, hash_values=None, return_eid=False, quiet=False):
if isinstance(values, list):
hash_values = [SHA256.new(v.lower()).hexdigest() for v in values]
elif values:
hash_values = [SHA256.new(values.lower()).hexdigest()]
elif not isinstance(hash_values, list):
hash_values = [hash_values]
if not hash_values:
raise Exception('No value to search.')
org = self.__get_org_by_auth(authkey)
if not org:
raise Exception('Invalid authkey')
if quiet:
return [(self.r.exists(h) or self.r.exists(org + ':' + h)) for h in hash_values]
uuid_by_hashes = [self.r.smembers(h).union(self.r.smembers(org + ':' + h)) for h in hash_values]
if not return_eid:
to_return = uuid_by_hashes
else:
to_return = []
for h in uuid_by_hashes:
to_return.append([self.r.hget('uuid_id', uuid) for uuid in h])
return to_return
def __get_org_by_auth(self, authkey):
return self.r.get(authkey)
示例4: post_updates
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import hget [as 别名]
def post_updates(self, updates, log_index):
'''
Update the cache with CRUD changes
'''
cache = StrictRedis(db=config.tokens_cache_redis_db)
self.log.info('post_updates(): posting updates to local storage')
for update in updates: # TODO: could re-add the Redis "Pipelines" feature to combine Redis requests for better performance when available
(user, token, date, action) = update
if action == 'add':
cache.hset('general', token, user) # future method - user-by-token -- really just existence of a token
cache.hset('users', user, token) # future-method - token-by-user: allow lookup of previous token on token changes
cache.set(token, user) # Current method
self.log.info('post_updates(): added token for user: ' + user)
elif action == 'delete':
cache.hdel('general', token) # future method - disables the ability to authenticate
cache.hdel('users', user) # future method - removes history of token
cache.delete(token)
self.log.info('post_updates(): deleted token for user: ' + user)
elif action == 'update':
prev_token = cache.hget('users', user)
cache.hdel('general', prev_token) # future method - disables the ability to authenticate with previous token
cache.hset('general', token, user) # future method - set the new token for the user
cache.hset('users', user, token) # future method - set the user as possessing the new token
cache.set(token, user)
self.log.info('post_updates(): updated token for user: ' + user)
else:
self.log.critical('post_updates(): unexpected change type: ' + action)
if len(updates) > 0: # don't set if there is nothing to do and also don't set if there are errors
cache.set('log_index', log_index)
示例5: RedisBackend
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import hget [as 别名]
class RedisBackend(BaseBackend):
def __init__(self):
from redis import StrictRedis
host = conf.METADATA.get('host', 'localhost')
port = conf.METADATA.get('port', 6379)
password = conf.METADATA.get('password', None)
db = conf.METADATA.get('db', 0)
prefix = conf.METADATA.get('PREFIX', 'djthumbs')
self.prefix = prefix + ":"
self.redis = StrictRedis(host=host, port=port, password=password, db=db)
def get_source_key(self, name):
return "%ssources:%s" % (self.prefix, name)
def get_thumbnail_key(self, name):
return "%sthumbnails:%s" % (self.prefix, name)
def add_source(self, name):
self.redis.hset(self.get_source_key(name), name, name)
return name
def get_source(self, name):
return compat.as_text(self.redis.hget(self.get_source_key(name), name))
def delete_source(self, name):
return self.redis.hdel(self.get_source_key(name), name)
def get_thumbnails(self, name):
metas = self.redis.hgetall(self.get_thumbnail_key(name))
return [ImageMeta(name, thumbnail_name, size) for size, thumbnail_name in metas.items()]
def get_thumbnail(self, source_name, size):
name = compat.as_text(self.redis.hget(self.get_thumbnail_key(source_name), size))
if name:
return ImageMeta(source_name, name, size)
return None
def add_thumbnail(self, source_name, size, name):
self.redis.hset(self.get_thumbnail_key(source_name), size, name)
return ImageMeta(source_name, name, size)
def delete_thumbnail(self, source_name, size):
self.redis.hdel(self.get_thumbnail_key(source_name), size)
示例6: __init__
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import hget [as 别名]
class WordList:
def __init__(self):
self.conn = Redis()
self.CACHE_SIZE = 50
self.CACHE_KEYS = "words-keys"
self.CACHE_STORE = "words-store"
self.WORD_FILE = os.path.join(os.path.expanduser("~"), '.words.txt')
def _reorganize(self):
pop_n = self.conn.zcard(self.CACHE_KEYS) - self.CACHE_SIZE
if pop_n >= 0:
to_pop = self.conn.zrange(self.CACHE_KEYS, 0, pop_n)
#print pop_n, to_pop
self.conn.zremrangebyrank(self.CACHE_KEYS, 0, pop_n)
for k in to_pop:
self.conn.hdel(self.CACHE_STORE, k)
def _add_word(self, key, value):
result = self.conn.hget(self.CACHE_STORE, key)
if result:
self.conn.zincrby(self.CACHE_KEYS, key, 1.0)
else:
self._reorganize()
self.conn.hset(self.CACHE_STORE, key, value)
self.conn.zadd(self.CACHE_KEYS, 1, key)
def _get_words(self):
try:
words = self.conn.zrevrange(self.CACHE_KEYS, 0, -1, True)
#hashs = self.conn.hgetall(self.CACHE_STORE)
#print words
#print hashs
return words
except redis.exceptions.ConnectionError:
return None
def dump_console(self):
if os.path.isfile(self.WORD_FILE):
with open(self.WORD_FILE, 'r') as f:
print f.read()
def write_file(self):
words = self._get_words()
if words is None:
return
content = '\n'.join(["%d. %s\t %d"%(i, x[0], int(x[1])) for i, x in enumerate(words)])
with open(self.WORD_FILE, 'w+') as f:
f.write(content)
f.write('\n')
def add_word(self, key):
try:
self._add_word(key,key)
self.write_file()
except redis.exceptions.ConnectionError:
return
示例7: RedisDB
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import hget [as 别名]
class RedisDB(object):
def __init__(self):
if not hasattr(RedisDB, 'pool'):
RedisDB.createPool()
self.r = StrictRedis(connection_pool = RedisDB.pool)
@staticmethod
def createPool():
RedisDB.pool = redis.ConnectionPool(
host = '127.0.0.1',
port = 6379,
db = 0
)
def saveToRedis(self, receiver, to_station_ab, to_station_name, from_station_ab, from_station_name, querydate, purpose_code, noticetime, publishtime):
'''将需要抓取的信息存入redis'''
uid = self.r.incr('uid')
tickets_info = {'uid':uid, 'receiver':receiver, 'to_station_ab':to_station_ab, 'to_station_name':to_station_name, 'from_station_ab':from_station_ab,'from_station_name':from_station_name, 'querydate':querydate, 'purpose_code':purpose_code, 'noticetime':noticetime, 'publishtime': publishtime}
self.r.zadd('email_que_set_all', uid, str(tickets_info))
if noticetime == '9am':
self.r.zadd('email_que_set_9am', uid, str(tickets_info))
elif noticetime == '11am':
self.r.zadd('email_que_set_11am', uid, str(tickets_info))
elif noticetime == '3pm':
self.r.zadd('email_que_set_3pm', uid, str(tickets_info))
elif noticetime == '5pm':
self.r.zadd('email_que_set_5pm', uid, str(tickets_info))
self.r.save()
def getStation(self, set, name):
return self.r.hget(set, name)
def zrevrange(self, set, begin, end):
return self.r.zrevrange(set, begin, end)
def zremrangebyscore(self, queue, uid):
return self.r.zremrangebyscore(queue, uid, uid)
def station_validate(self, form, field):
'''
表单tostation和tostation验证函数
'''
if not self.r.getStation(field.data):
raise ValidationError(u'木有这个站')
def saveJSONToSet(self, setName, json):
if not self.r.exists(setName):
for i, name in enumerate(json):
self.r.hset(setName, name, json[name])
print 'insert'+name
self.r.save()
else:
pass
示例8: RedisDict
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import hget [as 别名]
class RedisDict(MutableMapping):
""" RedisDict - a dictionary-like interface for ``redis`` key-stores
"""
def __init__(self, namespace, collection_name='redis_dict_data',
connection=None):
"""
The actual key name on the redis server will be
``namespace``:``collection_name``
In order to deal with how redis stores data/keys,
everything, i.e. keys and data, must be pickled.
:param namespace: namespace to use
:param collection_name: name of the hash map stored in redis
(default: redis_dict_data)
:param connection: ``redis.StrictRedis`` instance.
If it's ``None`` (default), a new connection with
default options will be created
"""
if connection is not None:
self.connection = connection
else:
self.connection = Redis()
self._self_key = ':'.join([namespace, collection_name])
def __getitem__(self, key):
result = self.connection.hget(self._self_key, pickle.dumps(key))
if result is None:
raise KeyError
return pickle.loads(bytes(result))
def __setitem__(self, key, item):
self.connection.hset(self._self_key, pickle.dumps(key),
pickle.dumps(item))
def __delitem__(self, key):
if not self.connection.hdel(self._self_key, pickle.dumps(key)):
raise KeyError
def __len__(self):
return self.connection.hlen(self._self_key)
def __iter__(self):
for v in self.connection.hkeys(self._self_key):
yield pickle.loads(bytes(v))
def clear(self):
self.connection.delete(self._self_key)
def __str__(self):
return str(dict(self.items()))
示例9: RedisManager
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import hget [as 别名]
class RedisManager(NoSqlManager):
def __init__(self, namespace, url=None, data_dir=None, lock_dir=None, **params):
self.expiretime = params.pop('expiretime', None)
NoSqlManager.__init__(self, namespace, url=url, data_dir=data_dir, lock_dir=lock_dir, **params)
def open_connection(self, host, port, **params):
self.db_conn = StrictRedis(host=host, port=int(port), **params)
def __getitem__(self, key):
return pickle.loads(self.db_conn.hget(self._format_key(key), 'data'))
def __contains__(self, key):
return self.db_conn.exists(self._format_key(key))
def set_value(self, key, value, expiretime=None):
key = self._format_key(key)
#
# beaker.container.Value.set_value calls NamespaceManager.set_value
# however it (until version 1.6.4) never sets expiretime param.
#
# Checking "type(value) is tuple" is a compromise
# because Manager class can be instantiated outside container.py (See: session.py)
#
if (expiretime is None) and (type(value) is tuple):
expiretime = value[1]
self.db_conn.hset(key, 'data', pickle.dumps(value))
self.db_conn.hset(key, 'accessed', datetime.now())
self.db_conn.hsetnx(key, 'created', datetime.now())
if expiretime or self.expiretime:
self.db_conn.expire(key, expiretime or self.expiretime)
def __delitem__(self, key):
self.db_conn.delete(self._format_key(key))
def _format_key(self, key):
return 'beaker:%s:%s' % (self.namespace, key.replace(' ', '\302\267'))
def do_remove(self):
self.db_conn.flushdb()
def keys(self):
return self.db_conn.keys('beaker:%s:*' % self.namespace)
示例10: RedisStorage
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import hget [as 别名]
class RedisStorage(BaseSharedStorage):
def __init__(self, db_num):
self._redis = StrictRedis(db=db_num)
def save(self, filename, key, data):
self._purge_duplicates(filename)
self._redis.hset(filename, key, data)
def load(self, filename):
redis_keys = self._redis.hkeys(filename)
key = redis_keys[0] # Should never be more than one key
data = self._redis.hget(filename, key)
return key, data
def _purge_duplicates(self, dict_key):
"""Remove identical files from server to be replaced by new files."""
keys = self._redis.hkeys(dict_key)
for key in keys:
self._redis.hdel(dict_key, key)
示例11: process
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import hget [as 别名]
def process(event, r_host):
"""
event.event_type
'modified' | 'created' | 'moved' | 'deleted'
event.is_directory
True | False
event.src_path
path/to/observed/file
"""
uid = str(uuid.uuid4())
hostname = os.environ.get("VENT_HOST")
if not hostname:
hostname = ""
# let jobs run for up to one day
try:
q = Queue(connection=Redis(host=r_host), default_timeout=86400)
# TODO should directories be treated as bulk paths to send to a plugin?
if event.event_type == "created" and event.is_directory == False:
# check if the file was already queued and ignore
time.sleep(15)
exists = False
print(uid+" started " + event.src_path)
r = StrictRedis(host=r_host, port=6379, db=0)
jobs = r.keys(pattern="rq:job*")
for job in jobs:
print(uid+" ***")
description = r.hget(job, 'description')
print(uid+" "+description)
print(uid+" "+description.split("file_watch.file_queue('"+hostname+"_")[1][:-2])
print(uid+" "+event.src_path)
if description.split("file_watch.file_queue('"+hostname+"_")[1][:-2] == event.src_path:
print(uid+" true")
exists = True
print(uid+" ***")
if not exists:
# !! TODO this should be a configuration option in the vent.template
print(uid+" let's queue it "+event.src_path)
# let jobs be queued for up to 30 days
result = q.enqueue('file_watch.file_queue', hostname+"_"+event.src_path, ttl=2592000)
print(uid+" end "+event.src_path)
except Exception as e: # pragma: no cover
print(str(e))
示例12: response
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import hget [as 别名]
def response(self):
db = StrictRedis(DB_HOST, port=DB_PORT, password=DB_PASSWORD)
host_id = db.hget('LIMIT_QR', self.ticket)
#print 'LIMIT_QR {}, {}'.format(self.event_key, host_id)
oid = str(uuid4())
ws_conn = yield websocket_connect(DB_SERVER)
msg = enc('DB_TEMP_AUTHCODE', {'ID': self.source,
'OID': oid,
'HID': host_id,
'TYPE': DEMO_SHOWROOM})
ws_conn.write_message(msg)
resp = yield ws_conn.read_message()
event, args = dec(resp)
if event == 'DB_OK' and args['OID'] == oid:
auth_code = args['AUTHCODE']
raise gen.Return(build_response('text', source=self.target,
target=self.source, format='xml',
content=demo_url.format(auth_code)))
else:
raise gen.Return('')
示例13: WikiController
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import hget [as 别名]
class WikiController(object):
def __init__(self, settings):
"""Initialize the controler and preload basic metadata"""
self.redis = Redis(host=settings.redis.bind_address, port=settings.redis.port)
self.store = Store(settings.content.path)
self.get_all_pages() # page modification times
self.get_all_aliases() # page aliases
def get_page(self, path):
"""Returns a single page"""
if path in self.store.pages:
return self.store.get_page(path)
raise KeyError
def resolve_alias(self, path):
"""Attempts to resolve an alias to a page"""
# Check locally first, to save overhead
if path in self.store.aliases:
return self.store.aliases[path]
# Check if there's been an update in Redis
alias = self.redis.hget(META_ALIASES, path)
if alias:
self.store.aliases[path] = alias
return alias
return None
def get_all_pages(self):
"""Returns a hash of all known pages and mtimes"""
if not len(self.store.pages):
if self.redis.exists(META_PAGES):
self.store.pages = self.redis.hgetall(META_PAGES)
else:
# force filesystem scan and alias generation
pages = self.store.get_all_pages()
log.debug(pages)
self.redis.hmset(META_PAGES,self.store.get_all_pages())
return self.store.pages
def get_all_aliases(self):
"""Returns a hash of all known page aliases"""
if not len(self.store.aliases):
if self.redis.exists(META_ALIASES):
self.store.aliases = self.redis.hgetall(META_ALIASES)
else:
# force filesystem scan and alias generation
self.store.get_all_pages()
self.redis.hmset(META_ALIASES, self.store.aliases)
return self.store.aliases
def get_close_matches_for_page(self, path):
"""Get a list of close matches for a given page/path"""
pages = self.get_all_pages()
return get_close_matches(path, pages.keys())
示例14: RedisJobStore
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import hget [as 别名]
class RedisJobStore(BaseJobStore):
"""
Stores jobs in a Redis database. Any leftover keyword arguments are directly passed to redis's StrictRedis.
Plugin alias: ``redis``
:param int db: the database number to store jobs in
:param str jobs_key: key to store jobs in
:param str run_times_key: key to store the jobs' run times in
:param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the highest available
"""
def __init__(self, db=0, jobs_key='apscheduler.jobs', run_times_key='apscheduler.run_times',
pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args):
super(RedisJobStore, self).__init__()
if db is None:
raise ValueError('The "db" parameter must not be empty')
if not jobs_key:
raise ValueError('The "jobs_key" parameter must not be empty')
if not run_times_key:
raise ValueError('The "run_times_key" parameter must not be empty')
self.pickle_protocol = pickle_protocol
self.jobs_key = jobs_key
self.run_times_key = run_times_key
self.redis = StrictRedis(db=int(db), **connect_args)
def lookup_job(self, job_id):
job_state = self.redis.hget(self.jobs_key, job_id)
return self._reconstitute_job(job_state) if job_state else None
def get_due_jobs(self, now):
timestamp = datetime_to_utc_timestamp(now)
job_ids = self.redis.zrangebyscore(self.run_times_key, 0, timestamp)
if job_ids:
job_states = self.redis.hmget(self.jobs_key, *job_ids)
return self._reconstitute_jobs(six.moves.zip(job_ids, job_states))
return []
def get_next_run_time(self):
next_run_time = self.redis.zrange(self.run_times_key, 0, 0, withscores=True)
if next_run_time:
return utc_timestamp_to_datetime(next_run_time[0][1])
def get_all_jobs(self):
job_states = self.redis.hgetall(self.jobs_key)
jobs = self._reconstitute_jobs(six.iteritems(job_states))
paused_sort_key = datetime(9999, 12, 31, tzinfo=utc)
return sorted(jobs, key=lambda job: job.next_run_time or paused_sort_key)
def add_job(self, job):
if self.redis.hexists(self.jobs_key, job.id):
raise ConflictingIdError(job.id)
with self.redis.pipeline() as pipe:
pipe.multi()
pipe.hset(self.jobs_key, job.id, pickle.dumps(job.__getstate__(), self.pickle_protocol))
if job.next_run_time:
pipe.zadd(self.run_times_key, datetime_to_utc_timestamp(job.next_run_time), job.id)
pipe.execute()
def update_job(self, job):
if not self.redis.hexists(self.jobs_key, job.id):
raise JobLookupError(job.id)
with self.redis.pipeline() as pipe:
pipe.hset(self.jobs_key, job.id, pickle.dumps(job.__getstate__(), self.pickle_protocol))
if job.next_run_time:
pipe.zadd(self.run_times_key, datetime_to_utc_timestamp(job.next_run_time), job.id)
else:
pipe.zrem(self.run_times_key, job.id)
pipe.execute()
def remove_job(self, job_id):
if not self.redis.hexists(self.jobs_key, job_id):
raise JobLookupError(job_id)
with self.redis.pipeline() as pipe:
pipe.hdel(self.jobs_key, job_id)
pipe.zrem(self.run_times_key, job_id)
pipe.execute()
def remove_all_jobs(self):
with self.redis.pipeline() as pipe:
pipe.delete(self.jobs_key)
pipe.delete(self.run_times_key)
pipe.execute()
def shutdown(self):
self.redis.connection_pool.disconnect()
def _reconstitute_job(self, job_state):
job_state = pickle.loads(job_state)
job = Job.__new__(Job)
job.__setstate__(job_state)
job._scheduler = self._scheduler
job._jobstore_alias = self._alias
return job
#.........这里部分代码省略.........
示例15: KVDB
# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import hget [as 别名]
class KVDB(object):
""" A wrapper around the Zato's key-value database.
"""
def __init__(self, conn=None, config=None, decrypt_func=None):
self.conn = conn
self.config = config
self.decrypt_func = decrypt_func
def init(self):
config = {}
if self.config.get('host'):
config['host'] = self.config.host
if self.config.get('port'):
config['port'] = int(self.config.port)
if self.config.get('db'):
config['db'] = int(self.config.db)
if self.config.get('password'):
config['password'] = self.decrypt_func(self.config.password)
if self.config.get('socket_timeout'):
config['socket_timeout'] = float(self.config.socket_timeout)
if self.config.get('connection_pool'):
split = self.config.connection_pool.split('.')
module, class_name = split[:-1], split[-1]
mod = import_module(module)
config['connection_pool'] = getattr(mod, class_name)
if self.config.get('charset'):
config['charset'] = self.config.charset
if self.config.get('errors'):
config['errors'] = self.config.errors
if self.config.get('unix_socket_path'):
config['unix_socket_path'] = self.config.unix_socket_path
self.conn = StrictRedis(**config)
def pubsub(self):
return self.conn.pubsub()
def publish(self, *args, **kwargs):
return self.conn.publish(*args, **kwargs)
def subscribe(self, *args, **kwargs):
return self.conn.subscribe(*args, **kwargs)
def translate(self, system1, key1, value1, system2, key2, default=''):
return self.conn.hget(
_KVDB.SEPARATOR.join(
(_KVDB.TRANSLATION, system1, key1, value1, system2, key2)), 'value2') or default
def copy(self):
""" Returns an KVDB with the configuration copied over from self. Note that
the object returned isn't initialized, in particular, the connection to the
database won't have been initialized.
"""
kvdb = KVDB()
kvdb.config = self.config
kvdb.decrypt_func = self.decrypt_func
return kvdb
def close(self):
self.conn.connection_pool.disconnect()
# ##############################################################################
# OAuth
def add_oauth_nonce(self, username, nonce, max_nonce_log):
""" Adds an OAuth to the set containing last N used ones for a given username.
"""
key = NONCE_STORE.KEY_PATTERN.format('oauth', username)
# This lets us trim the set to top (last) N nonces
score = timegm(gmtime())
self.conn.zadd(key, score, nonce)
self.conn.zremrangebyrank(key, 0, -max_nonce_log)
def has_oauth_nonce(self, username, nonce):
""" Returns a boolean flag indicating if there's an OAuth nonce for a given
username stored in KVDB.
"""
return self.conn.zscore(NONCE_STORE.KEY_PATTERN.format('oauth', username), nonce)