本文整理汇总了Python中redis.from_url函数的典型用法代码示例。如果您正苦于以下问题:Python from_url函数的具体用法?Python from_url怎么用?Python from_url使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了from_url函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: logs_new_guild
def logs_new_guild(guild_name, guild_server, guild_region):
try:
r = redis.from_url(os.environ.get('REDISTOGO_URL'))
except:
import config
r = redis.from_url(config.REDISTOGO_URL)
guild = {}
guild["guild_name"] = guild_name
guild["guild_server"] = guild_server
guild["guild_region"] = guild_region
guild["logs"] = []
start_time = 1438387200000
response = requests.get("https://www.warcraftlogs.com:443/v1/reports/guild/"+guild_name+"/"+guild_server+"/"+guild_region+"?start="+str(start_time)+"&api_key=9457bbf774422ab14b5625efb2b35e36")
response = json.loads(response.text)
# print response
for log in response:
if log["zone"] == 8:
new_log = {}
new_log["log_id"] = log["id"]
new_log["title"] = log["title"]
new_log["start"] = log["start"]/1000
new_log["date"] = datetime.date.fromtimestamp(log["start"]/1000)
new_log["owner"] = log["owner"]
guild["logs"].append(new_log)
guild["last_checked"] = int(time.time())
guild["last_checked_dt"] = datetime.datetime.fromtimestamp(guild["last_checked"])
guild_id_string = guild_name+"_"+guild_server+"_"+guild_region
guild = analyze_guild_logs(guild, r)
r.hmset(guild_id_string, guild)
return guild
示例2: last_log_from_guild
def last_log_from_guild(guild_name, guild_server, guild_region, r):
try:
r = redis.from_url(os.environ.get('REDISTOGO_URL'))
except:
import config
r = redis.from_url(config.REDISTOGO_URL)
guild = {}
guild["guild_name"] = guild_name
guild["guild_server"] = guild_server
guild["guild_region"] = guild_region
guild["logs"] = []
start_time = 1435734000000
end_time = 1437465661000
try:
response = requests.get("https://www.warcraftlogs.com:443/v1/reports/guild/"+guild_name+"/"+guild_server+"/"+guild_region+"?start="+str(start_time)+"&api_key=9457bbf774422ab14b5625efb2b35e36")
response = json.loads(response.text)
log_id = response[0]["id"]
try:
report = analyze(log_id)
except:
good_log = False
for log_dict in response[::-1]:
try:
log_id = log_dict["id"]
report = analyze(log_id)
good_log = True
except:
pass
if good_log == True:
break
except:
report = False
return report
示例3: image_upload
def image_upload():
"""
The image upload route
"""
urls = request.json['urls']
# get credentials from the redis server
rd = redis.from_url(current_app.config['REDIS_URL'])
try:
credentials = rd.get("credentials")
except:
response_object = {'status': "error, no access token is found, without it you can no longer upload image files."}
return jsonify(response_object)
# create new Task
tsk = Task()
tsk.initialize(urls,credentials)
# create and add Job
with Connection(redis.from_url(current_app.config['REDIS_URL'])):
q = Queue()
task = q.enqueue('project.server.main.works.long_work', tsk)
if task:
response_object = {'jobId': task.get_id()}
else:
response_object = {
'status': "error, could start job, this may mean that there are no workers running, or the redis server is down"}
return jsonify(response_object), 202
示例4: __get_connection
def __get_connection(self) -> redis.Redis:
"""
Get a Redis connection
:return: Redis connection instance
:rtype: redis.Redis
"""
if self.__redis_use_socket:
r = redis.from_url(
'unix://{:s}?db={:d}'.format(
self.__redis_host,
self.__redis_db
)
)
else:
r = redis.from_url(
'redis://{:s}:{:d}/{:d}'.format(
self.__redis_host,
self.__redis_port,
self.__redis_db
)
)
if BlackRed.Settings.REDIS_AUTH is not None:
r.execute_command('AUTH {:s}'.format(BlackRed.Settings.REDIS_AUTH))
return r
示例5: from_settings
def from_settings(settings):
host = settings.get('REDIS_HOST',REDIS_HOST)
port = settings.get('REDIS_PORT',REDIS_PORT)
redis_url = settings.get('REDIS_URL',REDIS_URL)
if redis_url:
redis.from_url(redis_url)
else:
redis.Redis(host = host,port = int(port))
示例6: setup_rq_connection
def setup_rq_connection():
# 'RQ_DASHBOARD_REDIS_URL' environmental variable takes priority;
# otherwise, we look at the Flask app's config for the redis information.
if os.environ.get('RQ_DASHBOARD_REDIS_URL', None):
redis_conn = from_url(os.environ.get('RQ_DASHBOARD_REDIS_URL'))
elif current_app.config.get('REDIS_URL'):
redis_conn = from_url(current_app.config.get('REDIS_URL'))
else:
redis_conn = Redis(host=current_app.config.get('REDIS_HOST', 'localhost'),
port=current_app.config.get('REDIS_PORT', 6379),
password=current_app.config.get('REDIS_PASSWORD', None),
db=current_app.config.get('REDIS_DB', 0))
push_connection(redis_conn)
示例7: handle
def handle(self, *args, **options):
redis_conn = redis.from_url(os.getenv("REDISTOGO_URL", "redis://localhost:6379"))
states = redis_conn.get("states")
if states is None:
raise CommandError('"states" are not in redis cache!')
states = loads(states)
for state_data in states:
self.stdout.write("Processing state: %s\n" % state_data["NAME"])
get_statistics_for_area(None, state_data["state"])
counties = redis_conn.get("counties")
if counties is None:
raise CommandError('"counties" are not in redis cache!')
counties = loads(counties)
for county_data in counties:
self.stdout.write("Processing county: %s\n" % county_data["NAME"])
get_statistics_for_area(None, "%s,%s" % (county_data["state"], county_data["county"]))
for state_data in states:
state_msas = redis_conn.get("msa,%s" % state_data["state"])
if state_msas is None:
raise CommandError('"MSAs" for %s are not in redis cache!' % state_data["state"])
state_msas = loads(state_msas)
for msa_data in state_msas:
self.stdout.write("Processing msa: %s\n" % msa_data["NAME"])
get_statistics_for_area(
None,
"%s,%s,"
% (msa_data["state"], msa_data["metropolitan statistical area/micropolitan statistical area"]),
)
示例8: settings_update
def settings_update(request, setting_to_set, new_value=None):
if request.method == "POST":
new_value = request.POST["new_value"]
# only except post? they have to be logged in anyway though...
try: # this could get a get_or_create but that limits us and would just make us write code if we wanted slightly different functionality
setting = AccountSetting.objects.get(user=request.user, setting_name=setting_to_set)
if setting.setting_value == new_value:
return HttpResponse(json.dumps({"msg": "no change"}), content_type='application/json', status=200)
except Exception as e:
print("Hoping it just didn't exit yes, just in case :: {}".format(e))
setting = AccountSetting()
setting.user = request.user
setting.setting_name = setting_to_set
print(setting_to_set, new_value)
if setting.setting_value == new_value:
pass
else:
setting.setting_value = new_value
setting.save()
# now that it's saved in the DB lets save it in the cache! someday...
try:
r = redis.from_url(os.getenv('REDISTOGO_URL', 'redis://localhost:6379'))
r.hset("user.settings.{}.hash".format(request.user.id), setting_to_set, new_value)
except Exception as e:
print(e)
return HttpResponse(json.dumps({"msg": "I'm not a useful return..."}), content_type='application/json', status=200)
示例9: main
def main():
rclient = redis.from_url(redis_url)
cache = rcache.Rcache(cache_url, server_id)
log_file = rclient.get("log_file")
log_pos = rclient.get("log_pos")
log_pos = int(log_pos) if log_pos else None
only_events = _trans_events(events)
only_events.append(RotateEvent)
stream = BinLogStreamReader(
connection_settings=mysql_settings,
server_id=server_id,
blocking=blocking,
only_events=only_events,
only_tables=tables,
only_schemas=schemas,
resume_stream=True, # for resuming
freeze_schema=False, # do not support alter table event for faster
log_file=log_file,
log_pos=log_pos)
row_count = 0
for binlogevent in stream:
if int(time.time()) - binlogevent.timestamp > binlog_max_latency:
logger.warn("latency[{}] too large".format(
int(time.time()) - binlogevent.timestamp))
logger.debug("catch {}".format(binlogevent.__class__.__name__))
if isinstance(binlogevent, RotateEvent): #listen log_file changed event
rclient.set("log_file", binlogevent.next_binlog)
rclient.set("log_pos", binlogevent.position)
logger.info("log_file:{}, log_position:{}".format(
binlogevent.next_binlog, binlogevent.position))
else:
row_count += 1
table = "%s.%s" % (binlogevent.schema, binlogevent.table)
vals_lst = _get_row_values(binlogevent)
if not binlogevent.primary_key:
tables_without_primary_key.get(table, None)
try:
cache.save(table, binlogevent.primary_key, vals_lst)
logger.debug("save {} {} rows to cache".format(
table, len(vals_lst)))
except rcache.SaveIgnore as err:
logger.warning(str(err))
except rcache.FullError as err:
logger.info("cache OOM occured: {}.trigger dump command".format(
str(err)))
dump_code = _trigger_dumping()
cache.save(table, binlogevent.primary_key, vals_lst)
if cache_max_rows and cache.size > cache_max_rows:
logger.info("cache size:{} >= {}, trigger dumping".format(
cache.size, cache_max_rows))
_trigger_dumping()
rclient.set("log_pos", binlogevent.packet.log_pos)
if row_count % 1000 == 0:
logger.info("save {} changed rows".format(row_count))
stream.close()
示例10: main
def main():
global LAST_UPDATE_ID
telegram_token = os.environ.get("TELEGRAM_TOKEN")
logging.basicConfig(format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
logger = logging.getLogger("Maslahat.uz")
logger.setLevel(logging.DEBUG)
# logger.debug("Initalizing bot ...")
try:
bot = telegram.Bot(telegram_token)
# logger.debug("Connected to Telegram API")
except telegram.error.TelegramError:
pass
# logger.warning("Cannot connect to Telegram server!")
redis_url = os.environ.get("REDIS_URL")
redis_conn = redis.from_url(redis_url)
# logger.debug("Connected to Redis")
# logger.debug("Receiving updates ...")
try:
LAST_UPDATE_ID = bot.getUpdates()[-1].update_id
# logger.debug("Updates received")
except IndexError:
# logger.warning("No update received")
LAST_UPDATE_ID = None
# logger.debug("Starting heartbeat ...")
heart_beat(logger, stat)
# logger.debug("Waiting for updates ...")
while True:
bot_worker(redis_conn, bot, logger)
check_facebook(redis_conn, bot, logger)
check_announcements(redis_conn, bot, logger)
示例11: test_get_website_from_redis
def test_get_website_from_redis(self):
#clear redis
redis = get_redis()
redis.flushall()
#store known website in redis
website = Website.objects.all()[0]
logging.debug('website from DB: %s' % website)
website.prepare()
website._special_attribute = 'special'
cache_website(website, get_website_redis_key(website.token), redis)
# retrieve via normal method
website_from_method = get_website(website.token)
self.assertEqual(website, website_from_method)
self.assertTrue(hasattr(website_from_method, '_special_attribute') and website_from_method._special_attribute == 'special')
#clear redis and store website in backup...
redis.flushall()
redis = switch_redis()
website._special_attribute = 'special backup'
cache_website(website, get_website_redis_key(website.token), redis)
# be very clear that it's in the backup
main_redis = redis.from_url(REDIS_URL) # Redis(**REDIS_DBS['default'])
main_redis.flushdb()
# retrieve via normal method
website_from_method = get_website(website.token)
self.assertEqual(website, website_from_method)
self.assertTrue(hasattr(website_from_method, '_special_attribute') and website_from_method._special_attribute == 'special backup')
示例12: __init__
def __init__(self, hot_key='hot', url="redis://127.0.0.1", db=None, **kwargs):
self._conn = None
self._conn = redis.from_url(url, db=db, **kwargs)
try:
self._conn.ping()
except redis.ConnectionError, err:
raise ConnectionError(str(err))
示例13: get_redis_connection
def get_redis_connection(config):
"""
Returns a redis connection from a connection config
"""
if 'URL' in config:
return redis.from_url(config['URL'], db=config['DB'])
if 'USE_REDIS_CACHE' in config.keys():
from django.core.cache import get_cache
cache = get_cache(config['USE_REDIS_CACHE'])
if hasattr(cache, 'client'):
# We're using django-redis. The cache's `client` attribute
# is a pluggable backend that return its Redis connection as
# its `client`
try:
return cache.client.client
except NotImplementedError:
pass
else:
# We're using django-redis-cache
return cache._client
return redis.Redis(host=config['HOST'],
port=config['PORT'], db=config['DB'],
password=config.get('PASSWORD', None))
示例14: particle
def particle(hydrodataset, part, model):
from paegan.logger import logger
from paegan.logger.redis_handler import RedisHandler
rhandler = RedisHandler(model.redis_log_channel, model.redis_url)
rhandler.setLevel(logging.PROGRESS)
logger.addHandler(rhandler)
try:
redis_connection = redis.from_url(model.redis_url)
forcer = BaseForcer(hydrodataset,
particle=part,
common_variables=model.common_variables,
times=model.times,
start_time=model.start,
models=model._models,
release_location_centroid=model.reference_location.point,
usebathy=model._use_bathymetry,
useshore=model._use_shoreline,
usesurface=model._use_seasurface,
reverse_distance=model.reverse_distance,
bathy_path=model.bathy_path,
shoreline_path=model.shoreline_path,
shoreline_feature=model.shoreline_feature,
time_method=model.time_method,
redis_url=model.redis_url,
redis_results_channel=model.redis_results_channel,
shoreline_index_buffer=model.shoreline_index_buffer
)
forcer.run()
except Exception:
redis_connection.publish(model.redis_results_channel, json.dumps({"status" : "FAILED", "uid" : part.uid }))
else:
redis_connection.publish(model.redis_results_channel, json.dumps({"status" : "COMPLETED", "uid" : part.uid }))
示例15: __init__
def __init__(self):
self.redis_instance = redis.from_url(REDIS_URL)
self.recarea_list = []
self.small_recarea_dict = {
key.split('_')[0]: json.loads(self.redis_instance.get(key))
for key in self.redis_instance.keys() if key.find('_small') != -1
}