本文整理汇总了Python中tornado.httpclient.AsyncHTTPClient.cache方法的典型用法代码示例。如果您正苦于以下问题:Python AsyncHTTPClient.cache方法的具体用法?Python AsyncHTTPClient.cache怎么用?Python AsyncHTTPClient.cache使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tornado.httpclient.AsyncHTTPClient
的用法示例。
在下文中一共展示了AsyncHTTPClient.cache方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: make_app
# 需要导入模块: from tornado.httpclient import AsyncHTTPClient [as 别名]
# 或者: from tornado.httpclient.AsyncHTTPClient import cache [as 别名]
def make_app():
# NBConvert config
config = Config()
config.NbconvertApp.fileext = "html"
config.CSSHTMLHeaderTransformer.enabled = False
# don't strip the files prefix - we use it for redirects
# config.Exporter.filters = {'strip_files_prefix': lambda s: s}
# DEBUG env implies both autoreload and log-level
if os.environ.get("DEBUG"):
options.debug = True
logging.getLogger().setLevel(logging.DEBUG)
# setup memcache
mc_pool = ThreadPoolExecutor(options.mc_threads)
# setup formats
formats = configure_formats(options, config, log.app_log)
if options.processes:
pool = ProcessPoolExecutor(options.processes)
else:
pool = ThreadPoolExecutor(options.threads)
memcache_urls = os.environ.get("MEMCACHIER_SERVERS", os.environ.get("MEMCACHE_SERVERS"))
# Handle linked Docker containers
if os.environ.get("NBCACHE_PORT"):
tcp_memcache = os.environ.get("NBCACHE_PORT")
memcache_urls = tcp_memcache.split("tcp://")[1]
if os.environ.get("NBINDEX_PORT"):
log.app_log.info("Indexing notebooks")
tcp_index = os.environ.get("NBINDEX_PORT")
index_url = tcp_index.split("tcp://")[1]
index_host, index_port = index_url.split(":")
indexer = ElasticSearch(index_host, index_port)
else:
log.app_log.info("Not indexing notebooks")
indexer = NoSearch()
if options.no_cache:
log.app_log.info("Not using cache")
cache = MockCache()
elif pylibmc and memcache_urls:
kwargs = dict(pool=mc_pool)
username = os.environ.get("MEMCACHIER_USERNAME", "")
password = os.environ.get("MEMCACHIER_PASSWORD", "")
if username and password:
kwargs["binary"] = True
kwargs["username"] = username
kwargs["password"] = password
log.app_log.info("Using SASL memcache")
else:
log.app_log.info("Using plain memecache")
cache = AsyncMultipartMemcache(memcache_urls.split(","), **kwargs)
else:
log.app_log.info("Using in-memory cache")
cache = DummyAsyncCache()
# setup tornado handlers and settings
template_paths = pjoin(here, "templates")
if options.template_path is not None:
log.app_log.info("Using custom template path {}".format(options.template_path))
template_paths = [options.template_path, template_paths]
static_path = pjoin(here, "static")
env = Environment(loader=FileSystemLoader(template_paths), autoescape=True)
env.filters["markdown"] = markdown.markdown
try:
git_data = git_info(here)
except Exception as e:
app_log.error("Failed to get git info: %s", e)
git_data = {}
else:
git_data["msg"] = escape(git_data["msg"])
if options.no_cache:
# force jinja to recompile template every time
env.globals.update(cache_size=0)
env.globals.update(nrhead=nrhead, nrfoot=nrfoot, git_data=git_data, jupyter_info=jupyter_info(), len=len)
AsyncHTTPClient.configure(HTTPClientClass)
client = AsyncHTTPClient()
client.cache = cache
# load frontpage sections
with io.open(options.frontpage, "r") as f:
frontpage_sections = json.load(f)
# cache frontpage links for the maximum allowed time
max_cache_uris = {""}
for section in frontpage_sections:
for link in section["links"]:
max_cache_uris.add("/" + link["target"])
fetch_kwargs = dict(connect_timeout=10)
if options.proxy_host:
#.........这里部分代码省略.........
示例2: make_app
# 需要导入模块: from tornado.httpclient import AsyncHTTPClient [as 别名]
# 或者: from tornado.httpclient.AsyncHTTPClient import cache [as 别名]
def make_app():
# NBConvert config
config = Config()
config.NbconvertApp.fileext = 'html'
config.CSSHTMLHeaderTransformer.enabled = False
# don't strip the files prefix - we use it for redirects
# config.Exporter.filters = {'strip_files_prefix': lambda s: s}
# DEBUG env implies both autoreload and log-level
if os.environ.get("DEBUG"):
options.debug = True
logging.getLogger().setLevel(logging.DEBUG)
# setup memcache
mc_pool = ThreadPoolExecutor(options.mc_threads)
# setup formats
formats = configure_formats(options, config, log.app_log)
if options.processes:
pool = ProcessPoolExecutor(options.processes)
else:
pool = ThreadPoolExecutor(options.threads)
memcache_urls = os.environ.get('MEMCACHIER_SERVERS',
os.environ.get('MEMCACHE_SERVERS')
)
# Handle linked Docker containers
if(os.environ.get('NBCACHE_PORT')):
tcp_memcache = os.environ.get('NBCACHE_PORT')
memcache_urls = tcp_memcache.split('tcp://')[1]
if(os.environ.get('NBINDEX_PORT')):
log.app_log.info("Indexing notebooks")
tcp_index = os.environ.get('NBINDEX_PORT')
index_url = tcp_index.split('tcp://')[1]
index_host, index_port = index_url.split(":")
indexer = ElasticSearch(index_host, index_port)
else:
log.app_log.info("Not indexing notebooks")
indexer = NoSearch()
if options.no_cache:
log.app_log.info("Not using cache")
cache = MockCache()
elif pylibmc and memcache_urls:
kwargs = dict(pool=mc_pool)
username = os.environ.get('MEMCACHIER_USERNAME', '')
password = os.environ.get('MEMCACHIER_PASSWORD', '')
if username and password:
kwargs['binary'] = True
kwargs['username'] = username
kwargs['password'] = password
log.app_log.info("Using SASL memcache")
else:
log.app_log.info("Using plain memecache")
cache = AsyncMultipartMemcache(memcache_urls.split(','), **kwargs)
else:
log.app_log.info("Using in-memory cache")
cache = DummyAsyncCache()
# setup tornado handlers and settings
template_paths = pjoin(here, 'templates')
if options.template_path is not None:
log.app_log.info("Using custom template path {}".format(
options.template_path)
)
template_paths = [options.template_path, template_paths]
static_path = pjoin(here, 'static')
env = Environment(
loader=FileSystemLoader(template_paths),
autoescape=True
)
env.filters['markdown'] = markdown.markdown
try:
git_data = git_info(here)
except Exception as e:
app_log.error("Failed to get git info: %s", e)
git_data = {}
else:
git_data['msg'] = escape(git_data['msg'])
if options.no_cache:
# force jinja to recompile template every time
env.globals.update(cache_size=0)
env.globals.update(nrhead=nrhead, nrfoot=nrfoot, git_data=git_data,
jupyter_info=jupyter_info(), len=len,
)
AsyncHTTPClient.configure(HTTPClientClass)
client = AsyncHTTPClient()
client.cache = cache
# load frontpage sections
with io.open(options.frontpage, 'r') as f:
#.........这里部分代码省略.........