当前位置: 首页>>代码示例>>Python>>正文


Python RestConnection.get_bucket_stats方法代码示例

本文整理汇总了Python中lib.membase.api.rest_client.RestConnection.get_bucket_stats方法的典型用法代码示例。如果您正苦于以下问题:Python RestConnection.get_bucket_stats方法的具体用法?Python RestConnection.get_bucket_stats怎么用?Python RestConnection.get_bucket_stats使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在lib.membase.api.rest_client.RestConnection的用法示例。


在下文中一共展示了RestConnection.get_bucket_stats方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_xdcr_and_indexing_with_eventing

# 需要导入模块: from lib.membase.api.rest_client import RestConnection [as 别名]
# 或者: from lib.membase.api.rest_client.RestConnection import get_bucket_stats [as 别名]
 def test_xdcr_and_indexing_with_eventing(self):
     rest_src = RestConnection(self.servers[0])
     rest_dst = RestConnection(self.servers[2])
     self.n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
     self.n1ql_helper = N1QLHelper(shell=self.shell,
                                   max_verify=self.max_verify,
                                   buckets=self.buckets,
                                   item_flag=self.item_flag,
                                   n1ql_port=self.n1ql_port,
                                   full_docs_list=self.full_docs_list,
                                   log=self.log, input=self.input,
                                   master=self.master,
                                   use_rest=True
                                   )
     self.n1ql_helper.create_primary_index(using_gsi=True, server=self.n1ql_node)
     try:
         rest_src.remove_all_replications()
         rest_src.remove_all_remote_clusters()
         rest_src.add_remote_cluster(self.servers[2].ip, self.servers[2].port, self.servers[0].rest_username,
                                     self.servers[0].rest_password, "C2")
         rest_dst.create_bucket(bucket=self.src_bucket_name, ramQuotaMB=100)
         # setup xdcr relationship
         repl_id = rest_src.start_replication('continuous', self.src_bucket_name, "C2")
         if repl_id is not None:
             self.log.info("Replication created successfully")
         self.load(self.gens_load, buckets=self.src_bucket, flag=self.item_flag, verify_data=False,
                   batch_size=self.batch_size)
         body = self.create_save_function_body(self.function_name, HANDLER_CODE.DELETE_BUCKET_OP_ON_DELETE)
         # deploy function
         self.deploy_function(body)
         # Wait for eventing to catch up with all the update mutations and verify results
         self.verify_eventing_results(self.function_name, self.docs_per_day * 2016)
         stats_xdcr_dst = rest_dst.get_bucket_stats(self.src_bucket_name)
         index_bucket_map = self.n1ql_helper.get_index_count_using_primary_index(self.buckets, self.n1ql_node)
         actual_count = index_bucket_map[self.src_bucket_name]
         log.info("No of docs in xdcr destination bucket : {0}".format(stats_xdcr_dst["curr_items"]))
         log.info("No of docs indexed by primary index: {0}".format(actual_count))
         if stats_xdcr_dst["curr_items"] != self.docs_per_day * 2016:
             self.fail("xdcr did not replicate all documents, actual : {0} expected : {1}".format(
                 stats_xdcr_dst["curr_items"], self.docs_per_day * 2016))
         if actual_count != self.docs_per_day * 2016:
             self.fail("Not all the items were indexed, actual : {0} expected : {1}".format(
                 actual_count, self.docs_per_day * 2016))
         # delete all documents
         self.load(self.gens_load, buckets=self.src_bucket, flag=self.item_flag, verify_data=False,
                   batch_size=self.batch_size, op_type='delete')
         # Wait for eventing to catch up with all the delete mutations and verify results
         self.verify_eventing_results(self.function_name, 0, skip_stats_validation=True)
         stats_xdcr_dst = rest_dst.get_bucket_stats(self.src_bucket_name)
         index_bucket_map = self.n1ql_helper.get_index_count_using_primary_index(self.buckets, self.n1ql_node)
         actual_count = index_bucket_map[self.src_bucket_name]
         log.info("No of docs in xdcr destination bucket : {0}".format(stats_xdcr_dst["curr_items"]))
         log.info("No of docs indexed by primary index: {0}".format(actual_count))
         if stats_xdcr_dst["curr_items"] != 0:
             self.fail("xdcr did not replicate all documents, actual : {0} expected : {1}".format(
                 stats_xdcr_dst["curr_items"], 0))
         if actual_count != 0:
             self.fail("Not all the items were indexed, actual : {0} expected : {1}".format(actual_count, 0))
         self.undeploy_and_delete_function(body)
     finally:
         self.n1ql_helper.drop_primary_index(using_gsi=True, server=self.n1ql_node)
         rest_dst.delete_bucket()
开发者ID:membase,项目名称:testrunner,代码行数:64,代码来源:eventing_concurrency.py

示例2: EventingBaseTest

# 需要导入模块: from lib.membase.api.rest_client import RestConnection [as 别名]
# 或者: from lib.membase.api.rest_client.RestConnection import get_bucket_stats [as 别名]

#.........这里部分代码省略.........
            # TODO : add this back when getEventProcessingStats works reliably for doc timer events as well
            if not doc_timer_events:
                count = 0
                if num_nodes <= 1:
                    stats = self.rest.get_event_processing_stats(name)
                else:
                    stats = self.rest.get_aggregate_event_processing_stats(name)
                if on_delete:
                    mutation_type = "DCP_DELETION"
                else:
                    mutation_type = "DCP_MUTATION"
                actual_dcp_mutations = stats[mutation_type]
                # This is required when binary data is involved where DCP_MUTATION will have process DCP_MUTATIONS
                # but ignore it
                # wait for eventing node to process dcp mutations
                log.info("Number of {0} processed till now : {1}".format(mutation_type, actual_dcp_mutations))
                while actual_dcp_mutations != expected_dcp_mutations and count < 20:
                    self.sleep(timeout/20, message="Waiting for eventing to process all dcp mutations...")
                    count += 1
                    if num_nodes <= 1:
                        stats = self.rest.get_event_processing_stats(name)
                    else:
                        stats = self.rest.get_aggregate_event_processing_stats(name)
                    actual_dcp_mutations = stats[mutation_type]
                    log.info("Number of {0} processed till now : {1}".format(mutation_type, actual_dcp_mutations))
                if count == 20:
                    raise Exception(
                        "Eventing has not processed all the {0}. Current : {1} Expected : {2}".format(mutation_type,
                                                                                                      actual_dcp_mutations,
                                                                                                      expected_dcp_mutations
                                                                                                      ))
        # wait for bucket operations to complete and verify it went through successfully
        count = 0
        stats_dst = self.rest.get_bucket_stats(bucket)
        while stats_dst["curr_items"] != expected_dcp_mutations and count < 20:
            self.sleep(timeout/20, message="Waiting for handler code to complete all bucket operations...")
            count += 1
            stats_dst = self.rest.get_bucket_stats(bucket)
        if stats_dst["curr_items"] != expected_dcp_mutations:
            total_dcp_backlog = 0
            timers_in_past = 0
            # TODO : Use the following stats in a meaningful way going forward. Just printing them for debugging.
            for eventing_node in eventing_nodes:
                rest_conn = RestConnection(eventing_node)
                out = rest_conn.get_all_eventing_stats()
                total_dcp_backlog += out[0]["events_remaining"]["dcp_backlog"]
                if "TIMERS_IN_PAST" in out[0]["event_processing_stats"]:
                    timers_in_past += out[0]["event_processing_stats"]["TIMERS_IN_PAST"]
                full_out = rest_conn.get_all_eventing_stats(seqs_processed=True)
                log.info("Stats for Node {0} is \n{1} ".format(eventing_node.ip, json.dumps(out, sort_keys=True,
                                                                                          indent=4)))
                log.debug("Full Stats for Node {0} is \n{1} ".format(eventing_node.ip, json.dumps(full_out,
                                                                                                sort_keys=True,
                                                                                                indent=4)))
            raise Exception(
                "Bucket operations from handler code took lot of time to complete or didn't go through. Current : {0} "
                "Expected : {1}  dcp_backlog : {2}  TIMERS_IN_PAST : {3}".format(stats_dst["curr_items"],
                                                                                 expected_dcp_mutations,
                                                                                 total_dcp_backlog,
                                                                                 timers_in_past))
        # TODO : Use the following stats in a meaningful way going forward. Just printing them for debugging.
        # print all stats from all eventing nodes
        # These are the stats that will be used by ns_server and UI
        for eventing_node in eventing_nodes:
            rest_conn = RestConnection(eventing_node)
            out = rest_conn.get_all_eventing_stats()
开发者ID:membase,项目名称:testrunner,代码行数:70,代码来源:eventing_base.py


注:本文中的lib.membase.api.rest_client.RestConnection.get_bucket_stats方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。