本文整理汇总了Python中lib.membase.api.rest_client.RestConnection.get_aggregate_event_processing_stats方法的典型用法代码示例。如果您正苦于以下问题:Python RestConnection.get_aggregate_event_processing_stats方法的具体用法?Python RestConnection.get_aggregate_event_processing_stats怎么用?Python RestConnection.get_aggregate_event_processing_stats使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类lib.membase.api.rest_client.RestConnection
的用法示例。
在下文中一共展示了RestConnection.get_aggregate_event_processing_stats方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: EventingBaseTest
# 需要导入模块: from lib.membase.api.rest_client import RestConnection [as 别名]
# 或者: from lib.membase.api.rest_client.RestConnection import get_aggregate_event_processing_stats [as 别名]
#.........这里部分代码省略.........
result = self.rest.get_deployed_eventing_apps()
if count == 20:
raise Exception(
'Eventing took lot of time to come out of bootstrap state or did not successfully bootstrap')
def wait_for_undeployment(self, name):
result = self.rest.get_deployed_eventing_apps()
count = 0
while name in result and count < 20:
self.sleep(30, message="Waiting for undeployment of function...")
count += 1
result = self.rest.get_deployed_eventing_apps()
if count == 20:
raise Exception(
'Eventing took lot of time to undeploy')
def verify_eventing_results(self, name, expected_dcp_mutations, doc_timer_events=False, on_delete=False,
skip_stats_validation=False, bucket=None, timeout=600):
# This resets the rest server as the previously used rest server might be out of cluster due to rebalance
num_nodes = self.refresh_rest_server()
eventing_nodes = self.get_nodes_from_services_map(service_type="eventing", get_all_nodes=True)
if bucket is None:
bucket=self.dst_bucket_name
if not skip_stats_validation:
# we can't rely on DCP_MUTATION stats when doc timers events are set.
# TODO : add this back when getEventProcessingStats works reliably for doc timer events as well
if not doc_timer_events:
count = 0
if num_nodes <= 1:
stats = self.rest.get_event_processing_stats(name)
else:
stats = self.rest.get_aggregate_event_processing_stats(name)
if on_delete:
mutation_type = "DCP_DELETION"
else:
mutation_type = "DCP_MUTATION"
actual_dcp_mutations = stats[mutation_type]
# This is required when binary data is involved where DCP_MUTATION will have process DCP_MUTATIONS
# but ignore it
# wait for eventing node to process dcp mutations
log.info("Number of {0} processed till now : {1}".format(mutation_type, actual_dcp_mutations))
while actual_dcp_mutations != expected_dcp_mutations and count < 20:
self.sleep(timeout/20, message="Waiting for eventing to process all dcp mutations...")
count += 1
if num_nodes <= 1:
stats = self.rest.get_event_processing_stats(name)
else:
stats = self.rest.get_aggregate_event_processing_stats(name)
actual_dcp_mutations = stats[mutation_type]
log.info("Number of {0} processed till now : {1}".format(mutation_type, actual_dcp_mutations))
if count == 20:
raise Exception(
"Eventing has not processed all the {0}. Current : {1} Expected : {2}".format(mutation_type,
actual_dcp_mutations,
expected_dcp_mutations
))
# wait for bucket operations to complete and verify it went through successfully
count = 0
stats_dst = self.rest.get_bucket_stats(bucket)
while stats_dst["curr_items"] != expected_dcp_mutations and count < 20:
self.sleep(timeout/20, message="Waiting for handler code to complete all bucket operations...")
count += 1
stats_dst = self.rest.get_bucket_stats(bucket)
if stats_dst["curr_items"] != expected_dcp_mutations: