本文整理汇总了Python中cache.CacheHelper.active_queries方法的典型用法代码示例。如果您正苦于以下问题:Python CacheHelper.active_queries方法的具体用法?Python CacheHelper.active_queries怎么用?Python CacheHelper.active_queries使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cache.CacheHelper
的用法示例。
在下文中一共展示了CacheHelper.active_queries方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: queryConsumer
# 需要导入模块: from cache import CacheHelper [as 别名]
# 或者: from cache.CacheHelper import active_queries [as 别名]
def queryConsumer(queryQueue = "query_default"):
rabbitHelper = queryConsumer.rabbitHelper
queryQueueSize = rabbitHelper.qsize(queryQueue)
# for cli retreive currently active query workload
# since multi-query is not supported here
active_query = None
all_queries = CacheHelper.active_queries()
if len(all_queries) > 0:
active_query = all_queries[0]
if queryQueueSize> 0:
# setup new query workload from queued message
queryMsg = rabbitHelper.getJsonMsg(queryQueue)
logger.error(queryMsg)
try:
queryWorkload = QueryWorkload(queryMsg)
# deactivate old query workload
if active_query is not None:
active_query.active = False
# activate new query workload
# to be detected in queryRunner task
queryWorkload.active = True
if 'rcq' in queryMsg:
rabbitHelper.putMsg(queryMsg['rcq'], "Started Querying: %s/%s" % \
(queryWorkload.ddoc, queryWorkload.view))
except KeyError:
logger.info("Invalid query workload message: %s" % queryMsg)
示例2: queryRunner
# 需要导入模块: from cache import CacheHelper [as 别名]
# 或者: from cache.CacheHelper import active_queries [as 别名]
def queryRunner():
hosts = None
clusterStatus = CacheHelper.clusterstatus(cfg.CB_CLUSTER_TAG+"_status")
if clusterStatus:
hosts = clusterStatus.get_all_hosts()
# retreive all active query workloads
queries = CacheHelper.active_queries()
for query in queries:
# async update query workload object
updateQueryWorkload.apply_async(args=[query])
count = int(query.qps)
filters = list(set(query.include_filters) -\
set(query.exclude_filters))
params = generateQueryParams(query.indexed_key,
query.bucket,
filters,
query.limit,
query.startkey,
query.endkey,
query.startkey_docid,
query.endkey_docid)
multi_query.delay(count,
query.ddoc,
query.view,
params,
query.bucket,
query.password,
hosts = hosts)
示例3: queryRunner
# 需要导入模块: from cache import CacheHelper [as 别名]
# 或者: from cache.CacheHelper import active_queries [as 别名]
def queryRunner():
# retreive all active query workloads
queries = CacheHelper.active_queries()
for query in queries:
count = int(query.qps)
params = {"stale" : "update_after"}
multi_query.delay(count,
query.ddoc,
query.view,
params,
query.bucket,
query.password)
示例4: query_ops_manager
# 需要导入模块: from cache import CacheHelper [as 别名]
# 或者: from cache.CacheHelper import active_queries [as 别名]
def query_ops_manager(max_msgs=10, isovercommited=False):
rabbitHelper = query_ops_manager.rabbitHelper
# retreive all active query workloads
queries = CacheHelper.active_queries()
for query in queries:
# check if query tasks are overloaded
if rabbitHelper.qsize(query.task_queue) > max_msgs or isovercommited:
# purge waiting tasks
rabbitHelper.purge(query.task_queue)
# throttle down ops by 10%
new_queries_per_sec = query.qps * 0.90
# cannot reduce below 10 qps
if new_queries_per_sec > 10:
query.qps = new_queries_per_sec
logger.error("Cluster Overcommited: reduced queries/sec to (%s)" % query.qps)
示例5: queryRunner
# 需要导入模块: from cache import CacheHelper [as 别名]
# 或者: from cache.CacheHelper import active_queries [as 别名]
def queryRunner(max_msgs=10):
rabbitHelper = queryRunner.rabbitHelper
# check queue with pending http requests
pending_http_requests = "query_multi_" + cfg.CB_CLUSTER_TAG
if rabbitHelper.qsize(pending_http_requests) > max_msgs:
# purge waiting tasks
rabbitHelper.purge(pending_http_requests)
query_ops_manager(max_msgs, True)
else:
hosts = None
clusterStatus = CacheHelper.clusterstatus(cfg.CB_CLUSTER_TAG + "_status")
if clusterStatus:
hosts = clusterStatus.get_all_hosts()
# retreive all active query workloads
queries = CacheHelper.active_queries()
for query in queries:
# async update query workload object
updateQueryWorkload.apply_async(args=[query])
count = int(query.qps)
filters = list(set(query.include_filters) - set(query.exclude_filters))
params = generateQueryParams(
query.indexed_key,
query.bucket,
filters,
query.limit,
query.startkey,
query.endkey,
query.startkey_docid,
query.endkey_docid,
)
multi_query.delay(count, query.ddoc, query.view, params, query.bucket, query.password, hosts=hosts)