本文整理汇总了Python中r2.lib.lock.make_lock_factory函数的典型用法代码示例。如果您正苦于以下问题:Python make_lock_factory函数的具体用法?Python make_lock_factory怎么用?Python make_lock_factory使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了make_lock_factory函数的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: setup
def setup(self):
# heavy load mode is read only mode with a different infobar
if self.heavy_load_mode:
self.read_only_mode = True
if hasattr(signal, 'SIGUSR1'):
# not all platforms have user signals
signal.signal(signal.SIGUSR1, thread_dump)
# initialize caches. Any cache-chains built here must be added
# to cache_chains (closed around by reset_caches) so that they
# can properly reset their local components
localcache_cls = (SelfEmptyingCache if self.running_as_script
else LocalCache)
num_mc_clients = self.num_mc_clients
self.cache_chains = {}
# for now, zookeeper will be an optional part of the stack.
# if it's not configured, we will grab the expected config from the
# [live_config] section of the ini file
zk_hosts = self.config.get("zookeeper_connection_string")
if zk_hosts:
from r2.lib.zookeeper import (connect_to_zookeeper,
LiveConfig, LiveList)
zk_username = self.config["zookeeper_username"]
zk_password = self.config["zookeeper_password"]
self.zookeeper = connect_to_zookeeper(zk_hosts, (zk_username,
zk_password))
self.live_config = LiveConfig(self.zookeeper, LIVE_CONFIG_NODE)
self.throttles = LiveList(self.zookeeper, "/throttles",
map_fn=ipaddress.ip_network,
reduce_fn=ipaddress.collapse_addresses)
else:
self.zookeeper = None
parser = ConfigParser.RawConfigParser()
parser.read([self.config["__file__"]])
self.live_config = extract_live_config(parser, self.plugins)
self.throttles = tuple() # immutable since it's not real
self.memcache = CMemcache(self.memcaches, num_clients = num_mc_clients)
self.lock_cache = CMemcache(self.lockcaches, num_clients=num_mc_clients)
self.stats = Stats(self.config.get('statsd_addr'),
self.config.get('statsd_sample_rate'))
event.listens_for(engine.Engine, 'before_cursor_execute')(
self.stats.pg_before_cursor_execute)
event.listens_for(engine.Engine, 'after_cursor_execute')(
self.stats.pg_after_cursor_execute)
self.make_lock = make_lock_factory(self.lock_cache, self.stats)
if not self.cassandra_seeds:
raise ValueError("cassandra_seeds not set in the .ini")
keyspace = "reddit"
self.cassandra_pools = {
"main":
StatsCollectingConnectionPool(
keyspace,
stats=self.stats,
logging_name="main",
server_list=self.cassandra_seeds,
pool_size=self.cassandra_pool_size,
timeout=2,
max_retries=3,
prefill=False
),
}
perma_memcache = (CMemcache(self.permacache_memcaches, num_clients = num_mc_clients)
if self.permacache_memcaches
else None)
self.permacache = CassandraCacheChain(localcache_cls(),
CassandraCache('permacache',
self.cassandra_pools[self.cassandra_default_pool],
read_consistency_level = self.cassandra_rcl,
write_consistency_level = self.cassandra_wcl),
memcache = perma_memcache,
lock_factory = self.make_lock)
self.cache_chains.update(permacache=self.permacache)
# hardcache is done after the db info is loaded, and then the
# chains are reset to use the appropriate initial entries
if self.stalecaches:
self.cache = StaleCacheChain(localcache_cls(),
CMemcache(self.stalecaches, num_clients=num_mc_clients),
self.memcache)
else:
self.cache = MemcacheChain((localcache_cls(), self.memcache))
self.cache_chains.update(cache=self.cache)
self.rendercache = MemcacheChain((localcache_cls(),
CMemcache(self.rendercaches,
noreply=True, no_block=True,
#.........这里部分代码省略.........
示例2: setup
#.........这里部分代码省略.........
num_mc_clients = self.num_mc_clients
# the main memcache pool. used for most everything.
self.memcache = CMemcache(
self.memcaches,
min_compress_len=50 * 1024,
num_clients=num_mc_clients,
)
# a pool just used for @memoize results
memoizecaches = CMemcache(
self.memoizecaches,
min_compress_len=50 * 1024,
num_clients=num_mc_clients,
)
# a pool just for srmember rels
srmembercaches = CMemcache(
self.srmembercaches,
min_compress_len=96,
num_clients=num_mc_clients,
)
ratelimitcaches = CMemcache(
self.ratelimitcaches,
min_compress_len=96,
num_clients=num_mc_clients,
)
# a smaller pool of caches used only for distributed locks.
# TODO: move this to ZooKeeper
self.lock_cache = CMemcache(self.lockcaches,
num_clients=num_mc_clients)
self.make_lock = make_lock_factory(self.lock_cache, self.stats)
# memcaches used in front of the permacache CF in cassandra.
# XXX: this is a legacy thing; permacache was made when C* didn't have
# a row cache.
if self.permacache_memcaches:
permacache_memcaches = CMemcache(self.permacache_memcaches,
min_compress_len=50 * 1024,
num_clients=num_mc_clients)
else:
permacache_memcaches = None
# the stalecache is a memcached local to the current app server used
# for data that's frequently fetched but doesn't need to be fresh.
if self.stalecaches:
stalecaches = CMemcache(self.stalecaches,
num_clients=num_mc_clients)
else:
stalecaches = None
# rendercache holds rendered partial templates.
rendercaches = CMemcache(
self.rendercaches,
noreply=True,
no_block=True,
num_clients=num_mc_clients,
min_compress_len=480,
)
# pagecaches hold fully rendered pages
pagecaches = CMemcache(
self.pagecaches,
noreply=True,
示例3: setup
def setup(self, global_conf):
# heavy load mode is read only mode with a different infobar
if self.heavy_load_mode:
self.read_only_mode = True
if hasattr(signal, 'SIGUSR1'):
# not all platforms have user signals
signal.signal(signal.SIGUSR1, thread_dump)
# initialize caches. Any cache-chains built here must be added
# to cache_chains (closed around by reset_caches) so that they
# can properly reset their local components
localcache_cls = (SelfEmptyingCache if self.running_as_script
else LocalCache)
num_mc_clients = self.num_mc_clients
self.cache_chains = []
self.memcache = CMemcache(self.memcaches, num_clients = num_mc_clients)
self.make_lock = make_lock_factory(self.memcache)
if not self.cassandra_seeds:
raise ValueError("cassandra_seeds not set in the .ini")
self.cassandra = PycassaConnectionPool('reddit',
server_list = self.cassandra_seeds,
pool_size = len(self.cassandra_seeds),
# TODO: .ini setting
timeout=15, max_retries=3,
prefill=False)
perma_memcache = (CMemcache(self.permacache_memcaches, num_clients = num_mc_clients)
if self.permacache_memcaches
else None)
self.permacache = CassandraCacheChain(localcache_cls(),
CassandraCache('permacache',
self.cassandra,
read_consistency_level = self.cassandra_rcl,
write_consistency_level = self.cassandra_wcl),
memcache = perma_memcache,
lock_factory = self.make_lock)
self.cache_chains.append(self.permacache)
# hardcache is done after the db info is loaded, and then the
# chains are reset to use the appropriate initial entries
if self.stalecaches:
self.cache = StaleCacheChain(localcache_cls(),
CMemcache(self.stalecaches, num_clients=num_mc_clients),
self.memcache)
else:
self.cache = MemcacheChain((localcache_cls(), self.memcache))
self.cache_chains.append(self.cache)
self.rendercache = MemcacheChain((localcache_cls(),
CMemcache(self.rendercaches,
noreply=True, no_block=True,
num_clients = num_mc_clients)))
self.cache_chains.append(self.rendercache)
self.servicecache = MemcacheChain((localcache_cls(),
CMemcache(self.servicecaches,
num_clients = num_mc_clients)))
self.cache_chains.append(self.servicecache)
self.thing_cache = CacheChain((localcache_cls(),))
self.cache_chains.append(self.thing_cache)
#load the database info
self.dbm = self.load_db_params(global_conf)
# can't do this until load_db_params() has been called
self.hardcache = HardcacheChain((localcache_cls(),
self.memcache,
HardCache(self)),
cache_negative_results = True)
self.cache_chains.append(self.hardcache)
# I know this sucks, but we need non-request-threads to be
# able to reset the caches, so we need them be able to close
# around 'cache_chains' without being able to call getattr on
# 'g'
cache_chains = self.cache_chains[::]
def reset_caches():
for chain in cache_chains:
chain.reset()
self.reset_caches = reset_caches
self.reset_caches()
#make a query cache
self.stats_collector = QueryStats()
# set the modwindow
self.MODWINDOW = timedelta(self.MODWINDOW)
self.REDDIT_MAIN = bool(os.environ.get('REDDIT_MAIN'))
origin_prefix = self.domain_prefix + "." if self.domain_prefix else ""
self.origin = "http://" + origin_prefix + self.domain
#.........这里部分代码省略.........
示例4: setup
#.........这里部分代码省略.........
# for now, zookeeper will be an optional part of the stack.
# if it's not configured, we will grab the expected config from the
# [live_config] section of the ini file
zk_hosts = self.config.get("zookeeper_connection_string")
if zk_hosts:
from r2.lib.zookeeper import (connect_to_zookeeper,
LiveConfig, LiveList)
zk_username = self.config["zookeeper_username"]
zk_password = self.config["zookeeper_password"]
self.zookeeper = connect_to_zookeeper(zk_hosts, (zk_username,
zk_password))
self.live_config = LiveConfig(self.zookeeper, LIVE_CONFIG_NODE)
self.throttles = LiveList(self.zookeeper, "/throttles",
map_fn=ipaddress.ip_network,
reduce_fn=ipaddress.collapse_addresses)
else:
self.zookeeper = None
parser = ConfigParser.RawConfigParser()
parser.read([self.config["__file__"]])
self.live_config = extract_live_config(parser, self.plugins)
self.throttles = tuple() # immutable since it's not real
self.startup_timer.intermediate("zookeeper")
################# MEMCACHE
num_mc_clients = self.num_mc_clients
# the main memcache pool. used for most everything.
self.memcache = CMemcache(self.memcaches, num_clients=num_mc_clients)
# a smaller pool of caches used only for distributed locks.
# TODO: move this to ZooKeeper
self.lock_cache = CMemcache(self.lockcaches,
num_clients=num_mc_clients)
self.make_lock = make_lock_factory(self.lock_cache, self.stats)
# memcaches used in front of the permacache CF in cassandra.
# XXX: this is a legacy thing; permacache was made when C* didn't have
# a row cache.
if self.permacache_memcaches:
permacache_memcaches = CMemcache(self.permacache_memcaches,
num_clients=num_mc_clients)
else:
permacache_memcaches = None
# the stalecache is a memcached local to the current app server used
# for data that's frequently fetched but doesn't need to be fresh.
if self.stalecaches:
stalecaches = CMemcache(self.stalecaches,
num_clients=num_mc_clients)
else:
stalecaches = None
# rendercache holds rendered partial templates.
rendercaches = CMemcache(
self.rendercaches,
noreply=True,
no_block=True,
num_clients=num_mc_clients,
)
# pagecaches hold fully rendered pages
pagecaches = CMemcache(
self.pagecaches,
noreply=True,
no_block=True,
num_clients=num_mc_clients,
示例5: setup
#.........这里部分代码省略.........
# a pool just for srmember rels
srmembercaches = CMemcache(
"srmember",
self.srmembercaches,
min_compress_len=96,
num_clients=num_mc_clients,
validators=[validate_size_error],
)
# a pool just for rels
relcaches = CMemcache(
"rel",
self.relcaches,
min_compress_len=96,
num_clients=num_mc_clients,
validators=[validate_size_error],
)
ratelimitcaches = CMemcache(
"ratelimit",
self.ratelimitcaches,
min_compress_len=96,
num_clients=num_mc_clients,
validators=[validate_size_error],
)
# a smaller pool of caches used only for distributed locks.
self.lock_cache = CMemcache(
"lock",
self.lockcaches,
num_clients=num_mc_clients,
validators=[validate_size_error],
)
self.make_lock = make_lock_factory(self.lock_cache, self.stats)
# memcaches used in front of the permacache CF in cassandra.
# XXX: this is a legacy thing; permacache was made when C* didn't have
# a row cache.
permacache_memcaches = CMemcache("perma",
self.permacache_memcaches,
min_compress_len=1400,
num_clients=num_mc_clients,
validators=[],)
# the stalecache is a memcached local to the current app server used
# for data that's frequently fetched but doesn't need to be fresh.
if self.stalecaches:
stalecaches = CMemcache(
"stale",
self.stalecaches,
num_clients=num_mc_clients,
validators=[validate_size_error],
)
else:
stalecaches = None
# hardcache memcache pool
hardcache_memcaches = CMemcache(
"hardcache",
self.hardcache_memcaches,
binary=True,
min_compress_len=1400,
num_clients=num_mc_clients,
validators=[validate_size_error],
)
示例6: __init__
def __init__(self, global_conf, app_conf, paths, **extra):
"""
Globals acts as a container for objects available throughout
the life of the application.
One instance of Globals is created by Pylons during
application initialization and is available during requests
via the 'g' variable.
``global_conf``
The same variable used throughout ``config/middleware.py``
namely, the variables from the ``[DEFAULT]`` section of the
configuration file.
``app_conf``
The same ``kw`` dictionary used throughout
``config/middleware.py`` namely, the variables from the
section in the config file for your application.
``extra``
The configuration returned from ``load_config`` in
``config/middleware.py`` which may be of use in the setup of
your global variables.
"""
def to_bool(x):
return (x.lower() == 'true') if x else None
def to_iter(name, delim = ','):
return (x.strip() for x in global_conf.get(name, '').split(delim))
# slop over all variables to start with
for k, v in global_conf.iteritems():
if not k.startswith("_") and not hasattr(self, k):
if k in self.int_props:
v = int(v)
elif k in self.bool_props:
v = to_bool(v)
elif k in self.tuple_props:
v = tuple(to_iter(k))
setattr(self, k, v)
# initialize caches
mc = Memcache(self.memcaches)
self.cache = CacheChain((LocalCache(), mc))
self.permacache = Memcache(self.permacaches)
self.rendercache = Memcache(self.rendercaches)
self.make_lock = make_lock_factory(mc)
self.rec_cache = Memcache(self.rec_cache)
# set default time zone if one is not set
self.tz = pytz.timezone(global_conf.get('timezone'))
#make a query cache
self.stats_collector = QueryStats()
# set the modwindow
self.MODWINDOW = timedelta(self.MODWINDOW)
self.REDDIT_MAIN = bool(os.environ.get('REDDIT_MAIN'))
# turn on for language support
self.languages, self.lang_name = _get_languages()
all_languages = self.lang_name.keys()
all_languages.sort()
self.all_languages = all_languages
# load the md5 hashes of files under static
static_files = os.path.join(paths.get('static_files'), 'static')
self.static_md5 = {}
if os.path.exists(static_files):
for f in os.listdir(static_files):
if f.endswith('.md5'):
key = f[0:-4]
f = os.path.join(static_files, f)
with open(f, 'r') as handle:
md5 = handle.read().strip('\n')
self.static_md5[key] = md5
#set up the logging directory
log_path = self.log_path
process_iden = global_conf.get('scgi_port', 'default')
if log_path:
if not os.path.exists(log_path):
os.makedirs(log_path)
for fname in os.listdir(log_path):
if fname.startswith(process_iden):
full_name = os.path.join(log_path, fname)
os.remove(full_name)
#setup the logger
self.log = logging.getLogger('reddit')
self.log.addHandler(logging.StreamHandler())
if self.debug:
self.log.setLevel(logging.DEBUG)
#.........这里部分代码省略.........
示例7: __init__
def __init__(self, global_conf, app_conf, paths, **extra):
"""
Globals acts as a container for objects available throughout
the life of the application.
One instance of Globals is created by Pylons during
application initialization and is available during requests
via the 'g' variable.
``global_conf``
The same variable used throughout ``config/middleware.py``
namely, the variables from the ``[DEFAULT]`` section of the
configuration file.
``app_conf``
The same ``kw`` dictionary used throughout
``config/middleware.py`` namely, the variables from the
section in the config file for your application.
``extra``
The configuration returned from ``load_config`` in
``config/middleware.py`` which may be of use in the setup of
your global variables.
"""
# slop over all variables to start with
for k, v in global_conf.iteritems():
if not k.startswith("_") and not hasattr(self, k):
if k in self.int_props:
v = int(v)
elif k in self.float_props:
v = float(v)
elif k in self.bool_props:
v = self.to_bool(v)
elif k in self.tuple_props:
v = tuple(self.to_iter(v))
elif k in self.choice_props:
if v not in self.choice_props[k]:
raise ValueError("Unknown option for %r: %r not in %r"
% (k, v, self.choice_props[k]))
v = self.choice_props[k][v]
setattr(self, k, v)
self.running_as_script = global_conf.get('running_as_script', False)
# initialize caches. Any cache-chains built here must be added
# to cache_chains (closed around by reset_caches) so that they
# can properly reset their local components
localcache_cls = (SelfEmptyingCache if self.running_as_script
else LocalCache)
num_mc_clients = self.num_mc_clients
self.cache_chains = []
self.memcache = CMemcache(self.memcaches, num_clients = num_mc_clients)
self.make_lock = make_lock_factory(self.memcache)
if not self.cassandra_seeds:
raise ValueError("cassandra_seeds not set in the .ini")
if not self.url_seeds:
raise ValueError("url_seeds not set in the .ini")
self.cassandra_seeds = list(self.cassandra_seeds)
random.shuffle(self.cassandra_seeds)
self.cassandra = pycassa.connect_thread_local(self.cassandra_seeds)
perma_memcache = (CMemcache(self.permacache_memcaches, num_clients = num_mc_clients)
if self.permacache_memcaches
else None)
self.permacache = self.init_cass_cache('permacache', 'permacache',
self.cassandra,
self.make_lock,
memcache = perma_memcache,
read_consistency_level = self.cassandra_rcl,
write_consistency_level = self.cassandra_wcl,
localcache_cls = localcache_cls)
self.cache_chains.append(self.permacache)
self.url_seeds = list(self.url_seeds)
random.shuffle(self.url_seeds)
self.url_cassandra = pycassa.connect_thread_local(self.url_seeds)
self.urlcache = self.init_cass_cache('urls', 'urls',
self.url_cassandra,
self.make_lock,
# until we've merged this
# with the regular
# cluster, this will
# always be CL_ONE
read_consistency_level = CL_ONE,
write_consistency_level = CL_ONE,
localcache_cls = localcache_cls)
self.cache_chains.append(self.urlcache)
# hardcache is done after the db info is loaded, and then the
# chains are reset to use the appropriate initial entries
self.cache = MemcacheChain((localcache_cls(), self.memcache))
self.cache_chains.append(self.cache)
self.rendercache = MemcacheChain((localcache_cls(),
#.........这里部分代码省略.........
示例8: setup
def setup(self, global_conf):
# heavy load mode is read only mode with a different infobar
if self.heavy_load_mode:
self.read_only_mode = True
if hasattr(signal, 'SIGUSR1'):
# not all platforms have user signals
signal.signal(signal.SIGUSR1, thread_dump)
# initialize caches. Any cache-chains built here must be added
# to cache_chains (closed around by reset_caches) so that they
# can properly reset their local components
localcache_cls = (SelfEmptyingCache if self.running_as_script
else LocalCache)
num_mc_clients = self.num_mc_clients
self.cache_chains = {}
self.memcache = CMemcache(self.memcaches, num_clients = num_mc_clients)
self.make_lock = make_lock_factory(self.memcache)
self.stats = Stats(global_conf.get('statsd_addr'),
global_conf.get('statsd_sample_rate'))
event.listens_for(engine.Engine, 'before_cursor_execute')(
self.stats.pg_before_cursor_execute)
event.listens_for(engine.Engine, 'after_cursor_execute')(
self.stats.pg_after_cursor_execute)
if not self.cassandra_seeds:
raise ValueError("cassandra_seeds not set in the .ini")
keyspace = "reddit"
self.cassandra_pools = {
"main":
StatsCollectingConnectionPool(
keyspace,
stats=self.stats,
logging_name="main",
server_list=self.cassandra_seeds,
pool_size=self.cassandra_pool_size,
timeout=2,
max_retries=3,
prefill=False
),
"noretries":
StatsCollectingConnectionPool(
keyspace,
stats=self.stats,
logging_name="noretries",
server_list=self.cassandra_seeds,
pool_size=len(self.cassandra_seeds),
timeout=2,
max_retries=0,
prefill=False
),
}
perma_memcache = (CMemcache(self.permacache_memcaches, num_clients = num_mc_clients)
if self.permacache_memcaches
else None)
self.permacache = CassandraCacheChain(localcache_cls(),
CassandraCache('permacache',
self.cassandra_pools[self.cassandra_default_pool],
read_consistency_level = self.cassandra_rcl,
write_consistency_level = self.cassandra_wcl),
memcache = perma_memcache,
lock_factory = self.make_lock)
self.cache_chains.update(permacache=self.permacache)
# hardcache is done after the db info is loaded, and then the
# chains are reset to use the appropriate initial entries
if self.stalecaches:
self.cache = StaleCacheChain(localcache_cls(),
CMemcache(self.stalecaches, num_clients=num_mc_clients),
self.memcache)
else:
self.cache = MemcacheChain((localcache_cls(), self.memcache))
self.cache_chains.update(cache=self.cache)
self.rendercache = MemcacheChain((localcache_cls(),
CMemcache(self.rendercaches,
noreply=True, no_block=True,
num_clients = num_mc_clients)))
self.cache_chains.update(rendercache=self.rendercache)
self.thing_cache = CacheChain((localcache_cls(),))
self.cache_chains.update(thing_cache=self.thing_cache)
#load the database info
self.dbm = self.load_db_params(global_conf)
# can't do this until load_db_params() has been called
self.hardcache = HardcacheChain((localcache_cls(),
self.memcache,
HardCache(self)),
#.........这里部分代码省略.........
示例9: setup
#.........这里部分代码省略.........
# for now, zookeeper will be an optional part of the stack.
# if it's not configured, we will grab the expected config from the
# [live_config] section of the ini file
zk_hosts = self.config.get("zookeeper_connection_string")
if zk_hosts:
from r2.lib.zookeeper import connect_to_zookeeper, LiveConfig, LiveList
zk_username = self.config["zookeeper_username"]
zk_password = self.config["zookeeper_password"]
self.zookeeper = connect_to_zookeeper(zk_hosts, (zk_username, zk_password))
self.live_config = LiveConfig(self.zookeeper, LIVE_CONFIG_NODE)
self.throttles = LiveList(
self.zookeeper, "/throttles", map_fn=ipaddress.ip_network, reduce_fn=ipaddress.collapse_addresses
)
self.banned_domains = LiveDict(self.zookeeper, "/banned-domains", watch=True)
else:
self.zookeeper = None
parser = ConfigParser.RawConfigParser()
parser.read([self.config["__file__"]])
self.live_config = extract_live_config(parser, self.plugins)
self.throttles = tuple() # immutable since it's not real
self.banned_domains = dict()
self.startup_timer.intermediate("zookeeper")
################# MEMCACHE
num_mc_clients = self.num_mc_clients
# the main memcache pool. used for most everything.
self.memcache = CMemcache(self.memcaches, num_clients=num_mc_clients)
# a smaller pool of caches used only for distributed locks.
# TODO: move this to ZooKeeper
self.lock_cache = CMemcache(self.lockcaches, num_clients=num_mc_clients)
self.make_lock = make_lock_factory(self.lock_cache, self.stats)
# memcaches used in front of the permacache CF in cassandra.
# XXX: this is a legacy thing; permacache was made when C* didn't have
# a row cache.
if self.permacache_memcaches:
permacache_memcaches = CMemcache(self.permacache_memcaches, num_clients=num_mc_clients)
else:
permacache_memcaches = None
# the stalecache is a memcached local to the current app server used
# for data that's frequently fetched but doesn't need to be fresh.
if self.stalecaches:
stalecaches = CMemcache(self.stalecaches, num_clients=num_mc_clients)
else:
stalecaches = None
# rendercache holds rendered partial templates.
rendercaches = CMemcache(self.rendercaches, noreply=True, no_block=True, num_clients=num_mc_clients)
# pagecaches hold fully rendered pages
pagecaches = CMemcache(self.pagecaches, noreply=True, no_block=True, num_clients=num_mc_clients)
self.startup_timer.intermediate("memcache")
################# CASSANDRA
keyspace = "reddit"
self.cassandra_pools = {
"main": StatsCollectingConnectionPool(
keyspace,
stats=self.stats,
logging_name="main",
server_list=self.cassandra_seeds,
示例10: __init__
def __init__(self, global_conf, app_conf, paths, **extra):
"""
Globals acts as a container for objects available throughout
the life of the application.
One instance of Globals is created by Pylons during
application initialization and is available during requests
via the 'g' variable.
``global_conf``
The same variable used throughout ``config/middleware.py``
namely, the variables from the ``[DEFAULT]`` section of the
configuration file.
``app_conf``
The same ``kw`` dictionary used throughout
``config/middleware.py`` namely, the variables from the
section in the config file for your application.
``extra``
The configuration returned from ``load_config`` in
``config/middleware.py`` which may be of use in the setup of
your global variables.
"""
# slop over all variables to start with
for k, v in global_conf.iteritems():
if not k.startswith("_") and not hasattr(self, k):
if k in self.int_props:
v = int(v)
elif k in self.float_props:
v = float(v)
elif k in self.bool_props:
v = self.to_bool(v)
elif k in self.tuple_props:
v = tuple(self.to_iter(v))
setattr(self, k, v)
self.paid_sponsors = set(x.lower() for x in self.paid_sponsors)
# initialize caches
mc = Memcache(self.memcaches, pickleProtocol=1)
self.memcache = mc
self.cache = CacheChain((LocalCache(), mc))
self.permacache = Memcache(self.permacaches, pickleProtocol=1)
self.rendercache = Memcache(self.rendercaches, pickleProtocol=1)
self.make_lock = make_lock_factory(mc)
self.rec_cache = Memcache(self.rec_cache, pickleProtocol=1)
# set default time zone if one is not set
tz = global_conf.get("timezone")
dtz = global_conf.get("display_timezone", tz)
self.tz = pytz.timezone(tz)
self.display_tz = pytz.timezone(dtz)
# load the database info
self.dbm = self.load_db_params(global_conf)
# make a query cache
self.stats_collector = QueryStats()
# set the modwindow
self.MODWINDOW = timedelta(self.MODWINDOW)
self.REDDIT_MAIN = bool(os.environ.get("REDDIT_MAIN"))
# turn on for language support
self.languages, self.lang_name = get_active_langs(default_lang=self.lang)
all_languages = self.lang_name.keys()
all_languages.sort()
self.all_languages = all_languages
# load the md5 hashes of files under static
static_files = os.path.join(paths.get("static_files"), "static")
self.static_md5 = {}
if os.path.exists(static_files):
for f in os.listdir(static_files):
if f.endswith(".md5"):
key = f.strip(".md5")
f = os.path.join(static_files, f)
with open(f, "r") as handle:
md5 = handle.read().strip("\n")
self.static_md5[key] = md5
# set up the logging directory
log_path = self.log_path
process_iden = global_conf.get("scgi_port", "default")
if log_path:
if not os.path.exists(log_path):
os.makedirs(log_path)
for fname in os.listdir(log_path):
if fname.startswith(process_iden):
full_name = os.path.join(log_path, fname)
os.remove(full_name)
# setup the logger
#.........这里部分代码省略.........