当前位置: 首页>>代码示例>>Python>>正文


Python RestConnection.get_all_eventing_stats方法代码示例

本文整理汇总了Python中lib.membase.api.rest_client.RestConnection.get_all_eventing_stats方法的典型用法代码示例。如果您正苦于以下问题:Python RestConnection.get_all_eventing_stats方法的具体用法?Python RestConnection.get_all_eventing_stats怎么用?Python RestConnection.get_all_eventing_stats使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在lib.membase.api.rest_client.RestConnection的用法示例。


在下文中一共展示了RestConnection.get_all_eventing_stats方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_function_where_handler_code_takes_more_time_to_execute_than_execution_timeout

# 需要导入模块: from lib.membase.api.rest_client import RestConnection [as 别名]
# 或者: from lib.membase.api.rest_client.RestConnection import get_all_eventing_stats [as 别名]
 def test_function_where_handler_code_takes_more_time_to_execute_than_execution_timeout(self):
     # Note to Self : Never use SDK's unless you really have to. It is difficult to upgrade or maintain correct
     # sdk versions on the slaves. Scripts will be notoriously unreliable when you run on jenkins slaves.
     num_docs = 10
     values = ['1', '10']
     # create 10 non json docs on source bucket
     gen_load_non_json = JSONNonDocGenerator('non_json_docs', values, start=0, end=num_docs)
     self.cluster.load_gen_docs(self.master, self.src_bucket_name, gen_load_non_json, self.buckets[0].kvs[1],
                                'create', compression=self.sdk_compression)
     # create a function which sleeps for 5 secs and set execution_timeout to 1s
     body = self.create_save_function_body(self.function_name, HANDLER_CODE_ERROR.EXECUTION_TIME_MORE_THAN_TIMEOUT,
                                           execution_timeout=1)
     # deploy the function
     self.deploy_function(body)
     # This is intentionally added so that we wait for some mutations to process and we decide none are processed
     self.sleep(60)
     # No docs should be present in dst_bucket as the all the function executions should have timed out
     self.verify_eventing_results(self.function_name, 0, skip_stats_validation=True)
     eventing_nodes = self.get_nodes_from_services_map(service_type="eventing", get_all_nodes=True)
     exec_timeout_count = 0
     for eventing_node in eventing_nodes:
         rest_conn = RestConnection(eventing_node)
         out = rest_conn.get_all_eventing_stats()
         # get sum of all timeout_count
         exec_timeout_count += out[0]["failure_stats"]["timeout_count"]
     # check whether all the function executions timed out and is equal to number of docs created
     if exec_timeout_count != num_docs:
         self.fail("Not all event executions timed out : Expected : {0} Actual : {1}".format(len(keys),
                                                                                             exec_timeout_count))
     self.undeploy_and_delete_function(body)
开发者ID:arod1987,项目名称:testrunner,代码行数:32,代码来源:eventing_negative.py

示例2: test_function_where_handler_code_takes_more_time_to_execute_than_execution_timeout

# 需要导入模块: from lib.membase.api.rest_client import RestConnection [as 别名]
# 或者: from lib.membase.api.rest_client.RestConnection import get_all_eventing_stats [as 别名]
 def test_function_where_handler_code_takes_more_time_to_execute_than_execution_timeout(self):
     keys = ['customer123', 'customer1234', 'customer12345']
     url = 'couchbase://{ip}/{name}'.format(ip=self.master.ip, name=self.src_bucket_name)
     bucket = Bucket(url, username="cbadminbucket", password="password")
     for doc_id in keys:
         bucket.upsert(doc_id, {'name' : doc_id})
     # create a function which sleeps for 5 secs and set execution_timeout to 1s
     body = self.create_save_function_body(self.function_name, HANDLER_CODE_ERROR.EXECUTION_TIME_MORE_THAN_TIMEOUT,
                                           execution_timeout=1)
     # deploy the function
     self.deploy_function(body)
     # This is intentionally added so that we wait for some mutations to process and we decide none are processed
     self.sleep(60)
     # No docs should be present in dst_bucket as the all the function executions should have timed out
     self.verify_eventing_results(self.function_name, 0, skip_stats_validation=True)
     eventing_nodes = self.get_nodes_from_services_map(service_type="eventing", get_all_nodes=True)
     exec_timeout_count = 0
     for eventing_node in eventing_nodes:
         rest_conn = RestConnection(eventing_node)
         out = rest_conn.get_all_eventing_stats()
         # get sum of all timeout_count
         exec_timeout_count += out[0]["failure_stats"]["timeout_count"]
     # check whether all the function executions timed out and is equal to number of docs created
     if exec_timeout_count != len(keys):
         self.fail("Not all event executions timed out : Expected : {0} Actual : {1}".format(len(keys),
                                                                                             exec_timeout_count))
     self.undeploy_and_delete_function(body)
开发者ID:membase,项目名称:testrunner,代码行数:29,代码来源:eventing_negative.py

示例3: print_eventing_stats_from_all_eventing_nodes

# 需要导入模块: from lib.membase.api.rest_client import RestConnection [as 别名]
# 或者: from lib.membase.api.rest_client.RestConnection import get_all_eventing_stats [as 别名]
 def print_eventing_stats_from_all_eventing_nodes(self):
     eventing_nodes = self.get_nodes_from_services_map(service_type="eventing", get_all_nodes=True)
     for eventing_node in eventing_nodes:
         rest_conn = RestConnection(eventing_node)
         out = rest_conn.get_all_eventing_stats()
         log.info("Stats for Node {0} is \n{1} ".format(eventing_node.ip, json.dumps(out, sort_keys=True,
                                                                                   indent=4)))
开发者ID:arod1987,项目名称:testrunner,代码行数:9,代码来源:eventing_base.py

示例4: verify_eventing_results_of_all_functions

# 需要导入模块: from lib.membase.api.rest_client import RestConnection [as 别名]
# 或者: from lib.membase.api.rest_client.RestConnection import get_all_eventing_stats [as 别名]
 def verify_eventing_results_of_all_functions(self, docs_expected, verify_results=True):
     if verify_results:
         # Verify the results of all the buckets
         self.verify_eventing_results(self.function_name, docs_expected, skip_stats_validation=True)
         self.verify_eventing_results(self.function_name, docs_expected, skip_stats_validation=True,
                                      bucket=self.dst_bucket_name1)
         self.verify_eventing_results(self.function_name, docs_expected, skip_stats_validation=True,
                                      bucket=self.dst_bucket_name2)
     else:
         # Just print the stats after sleeping for 10 mins. Required to get the latest stats.
         self.sleep(600)
         eventing_nodes = self.get_nodes_from_services_map(service_type="eventing", get_all_nodes=True)
         for eventing_node in eventing_nodes:
             rest_conn = RestConnection(eventing_node)
             out = rest_conn.get_all_eventing_stats()
             log.info("Stats for Node {0} is \n{1} ".format(eventing_node.ip, json.dumps(out, sort_keys=True,
                                                                                         indent=4)))
         for bucket in [self.dst_bucket_name, self.dst_bucket_name1, self.dst_bucket_name2]:
             stats_dst = self.rest.get_bucket_stats(bucket)
             log.info("Number of docs in {0} bucket actual : {1} expected : {2} ".format(bucket,
                                                                                         stats_dst["curr_items"],
                                                                                         docs_expected))
开发者ID:membase,项目名称:testrunner,代码行数:24,代码来源:eventing_volume.py

示例5: EventingBaseTest

# 需要导入模块: from lib.membase.api.rest_client import RestConnection [as 别名]
# 或者: from lib.membase.api.rest_client.RestConnection import get_all_eventing_stats [as 别名]

#.........这里部分代码省略.........
                actual_dcp_mutations = stats[mutation_type]
                # This is required when binary data is involved where DCP_MUTATION will have process DCP_MUTATIONS
                # but ignore it
                # wait for eventing node to process dcp mutations
                log.info("Number of {0} processed till now : {1}".format(mutation_type, actual_dcp_mutations))
                while actual_dcp_mutations != expected_dcp_mutations and count < 20:
                    self.sleep(timeout/20, message="Waiting for eventing to process all dcp mutations...")
                    count += 1
                    if num_nodes <= 1:
                        stats = self.rest.get_event_processing_stats(name)
                    else:
                        stats = self.rest.get_aggregate_event_processing_stats(name)
                    actual_dcp_mutations = stats[mutation_type]
                    log.info("Number of {0} processed till now : {1}".format(mutation_type, actual_dcp_mutations))
                if count == 20:
                    raise Exception(
                        "Eventing has not processed all the {0}. Current : {1} Expected : {2}".format(mutation_type,
                                                                                                      actual_dcp_mutations,
                                                                                                      expected_dcp_mutations
                                                                                                      ))
        # wait for bucket operations to complete and verify it went through successfully
        count = 0
        stats_dst = self.rest.get_bucket_stats(bucket)
        while stats_dst["curr_items"] != expected_dcp_mutations and count < 20:
            self.sleep(timeout/20, message="Waiting for handler code to complete all bucket operations...")
            count += 1
            stats_dst = self.rest.get_bucket_stats(bucket)
        if stats_dst["curr_items"] != expected_dcp_mutations:
            total_dcp_backlog = 0
            timers_in_past = 0
            # TODO : Use the following stats in a meaningful way going forward. Just printing them for debugging.
            for eventing_node in eventing_nodes:
                rest_conn = RestConnection(eventing_node)
                out = rest_conn.get_all_eventing_stats()
                total_dcp_backlog += out[0]["events_remaining"]["dcp_backlog"]
                if "TIMERS_IN_PAST" in out[0]["event_processing_stats"]:
                    timers_in_past += out[0]["event_processing_stats"]["TIMERS_IN_PAST"]
                full_out = rest_conn.get_all_eventing_stats(seqs_processed=True)
                log.info("Stats for Node {0} is \n{1} ".format(eventing_node.ip, json.dumps(out, sort_keys=True,
                                                                                          indent=4)))
                log.debug("Full Stats for Node {0} is \n{1} ".format(eventing_node.ip, json.dumps(full_out,
                                                                                                sort_keys=True,
                                                                                                indent=4)))
            raise Exception(
                "Bucket operations from handler code took lot of time to complete or didn't go through. Current : {0} "
                "Expected : {1}  dcp_backlog : {2}  TIMERS_IN_PAST : {3}".format(stats_dst["curr_items"],
                                                                                 expected_dcp_mutations,
                                                                                 total_dcp_backlog,
                                                                                 timers_in_past))
        # TODO : Use the following stats in a meaningful way going forward. Just printing them for debugging.
        # print all stats from all eventing nodes
        # These are the stats that will be used by ns_server and UI
        for eventing_node in eventing_nodes:
            rest_conn = RestConnection(eventing_node)
            out = rest_conn.get_all_eventing_stats()
            full_out = rest_conn.get_all_eventing_stats(seqs_processed=True)
            log.info("Stats for Node {0} is \n{1} ".format(eventing_node.ip, json.dumps(out, sort_keys=True,
                                                                                      indent=4)))
            log.debug("Full Stats for Node {0} is \n{1} ".format(eventing_node.ip, json.dumps(full_out, sort_keys=True,
                                                                                            indent=4)))

    def eventing_stats(self):
        self.sleep(30)
        content=self.rest.get_all_eventing_stats()
        js=json.loads(content)
        log.info("execution stats: {0}".format(js))
开发者ID:membase,项目名称:testrunner,代码行数:70,代码来源:eventing_base.py

示例6: verify_eventing_results

# 需要导入模块: from lib.membase.api.rest_client import RestConnection [as 别名]
# 或者: from lib.membase.api.rest_client.RestConnection import get_all_eventing_stats [as 别名]
 def verify_eventing_results(self, name, expected_dcp_mutations, doc_timer_events=False, on_delete=False,
                             skip_stats_validation=False, bucket=None, timeout=600):
     # This resets the rest server as the previously used rest server might be out of cluster due to rebalance
     num_nodes = self.refresh_rest_server()
     eventing_nodes = self.get_nodes_from_services_map(service_type="eventing", get_all_nodes=True)
     if bucket is None:
         bucket=self.dst_bucket_name
     if not skip_stats_validation:
         # we can't rely on DCP_MUTATION stats when doc timers events are set.
         # TODO : add this back when getEventProcessingStats works reliably for doc timer events as well
         if not doc_timer_events:
             count = 0
             if num_nodes <= 1:
                 stats = self.rest.get_event_processing_stats(name)
             else:
                 stats = self.rest.get_aggregate_event_processing_stats(name)
             if on_delete:
                 mutation_type = "DCP_DELETION"
             else:
                 mutation_type = "DCP_MUTATION"
             actual_dcp_mutations = stats[mutation_type]
             # This is required when binary data is involved where DCP_MUTATION will have process DCP_MUTATIONS
             # but ignore it
             # wait for eventing node to process dcp mutations
             log.info("Number of {0} processed till now : {1}".format(mutation_type, actual_dcp_mutations))
             while actual_dcp_mutations != expected_dcp_mutations and count < 20:
                 self.sleep(timeout/20, message="Waiting for eventing to process all dcp mutations...")
                 count += 1
                 if num_nodes <= 1:
                     stats = self.rest.get_event_processing_stats(name)
                 else:
                     stats = self.rest.get_aggregate_event_processing_stats(name)
                 actual_dcp_mutations = stats[mutation_type]
                 log.info("Number of {0} processed till now : {1}".format(mutation_type, actual_dcp_mutations))
             if count == 20:
                 raise Exception(
                     "Eventing has not processed all the {0}. Current : {1} Expected : {2}".format(mutation_type,
                                                                                                   actual_dcp_mutations,
                                                                                                   expected_dcp_mutations
                                                                                                   ))
     # wait for bucket operations to complete and verify it went through successfully
     count = 0
     stats_dst = self.rest.get_bucket_stats(bucket)
     while stats_dst["curr_items"] != expected_dcp_mutations and count < 20:
         self.sleep(timeout/20, message="Waiting for handler code to complete all bucket operations...")
         count += 1
         stats_dst = self.rest.get_bucket_stats(bucket)
     if stats_dst["curr_items"] != expected_dcp_mutations:
         total_dcp_backlog = 0
         timers_in_past = 0
         # TODO : Use the following stats in a meaningful way going forward. Just printing them for debugging.
         for eventing_node in eventing_nodes:
             rest_conn = RestConnection(eventing_node)
             out = rest_conn.get_all_eventing_stats()
             total_dcp_backlog += out[0]["events_remaining"]["dcp_backlog"]
             if "TIMERS_IN_PAST" in out[0]["event_processing_stats"]:
                 timers_in_past += out[0]["event_processing_stats"]["TIMERS_IN_PAST"]
             full_out = rest_conn.get_all_eventing_stats(seqs_processed=True)
             log.info("Stats for Node {0} is \n{1} ".format(eventing_node.ip, json.dumps(out, sort_keys=True,
                                                                                       indent=4)))
             log.debug("Full Stats for Node {0} is \n{1} ".format(eventing_node.ip, json.dumps(full_out,
                                                                                             sort_keys=True,
                                                                                             indent=4)))
         raise Exception(
             "Bucket operations from handler code took lot of time to complete or didn't go through. Current : {0} "
             "Expected : {1}  dcp_backlog : {2}  TIMERS_IN_PAST : {3}".format(stats_dst["curr_items"],
                                                                              expected_dcp_mutations,
                                                                              total_dcp_backlog,
                                                                              timers_in_past))
     # TODO : Use the following stats in a meaningful way going forward. Just printing them for debugging.
     # print all stats from all eventing nodes
     # These are the stats that will be used by ns_server and UI
     for eventing_node in eventing_nodes:
         rest_conn = RestConnection(eventing_node)
         out = rest_conn.get_all_eventing_stats()
         full_out = rest_conn.get_all_eventing_stats(seqs_processed=True)
         log.info("Stats for Node {0} is \n{1} ".format(eventing_node.ip, json.dumps(out, sort_keys=True,
                                                                                   indent=4)))
         log.debug("Full Stats for Node {0} is \n{1} ".format(eventing_node.ip, json.dumps(full_out, sort_keys=True,
                                                                                         indent=4)))
开发者ID:membase,项目名称:testrunner,代码行数:82,代码来源:eventing_base.py


注:本文中的lib.membase.api.rest_client.RestConnection.get_all_eventing_stats方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。