当前位置: 首页>>代码示例>>Python>>正文


Python RestConnection.get_bucket_stats方法代码示例

本文整理汇总了Python中membase.api.rest_client.RestConnection.get_bucket_stats方法的典型用法代码示例。如果您正苦于以下问题:Python RestConnection.get_bucket_stats方法的具体用法?Python RestConnection.get_bucket_stats怎么用?Python RestConnection.get_bucket_stats使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在membase.api.rest_client.RestConnection的用法示例。


在下文中一共展示了RestConnection.get_bucket_stats方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: replication_verification

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_bucket_stats [as 别名]
    def replication_verification(master, bucket, replica, inserted_count, test):
        rest = RestConnection(master)
        nodes = rest.node_statuses()

        if len(nodes) / (1 + replica) >= 1:
                    final_replication_state = RestHelper(rest).wait_for_replication(900)
                    msg = "replication state after waiting for up to 15 minutes : {0}"
                    test.log.info(msg.format(final_replication_state))
                    # in windows, we need to set timeout_in_seconds to 15+ minutes
                    test.assertTrue(RebalanceHelper.wait_till_total_numbers_match(master=master,
                                                                                  bucket=bucket,
                                                                                  timeout_in_seconds=1200),
                                    msg="replication was completed but sum(curr_items) dont match the curr_items_total")

                    start_time = time.time()
                    stats = rest.get_bucket_stats()
                    while time.time() < (start_time + 120) and stats["curr_items"] != inserted_count:
                        test.log.info("curr_items : {0} versus {1}".format(stats["curr_items"], inserted_count))
                        time.sleep(5)
                        stats = rest.get_bucket_stats()
                    RebalanceHelper.print_taps_from_all_nodes(rest, bucket)
                    test.log.info("curr_items : {0} versus {1}".format(stats["curr_items"], inserted_count))
                    stats = rest.get_bucket_stats()
                    msg = "curr_items : {0} is not equal to actual # of keys inserted : {1}"
                    test.assertEquals(stats["curr_items"], inserted_count,
                                      msg=msg.format(stats["curr_items"], inserted_count))
开发者ID:jchris,项目名称:testrunner,代码行数:28,代码来源:failovertests.py

示例2: wait_for_stats_no_timeout

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_bucket_stats [as 别名]
    def wait_for_stats_no_timeout(master, bucket, stat_key, stat_value, timeout_in_seconds=-1, verbose=True):
        log.info("waiting for bucket {0} stat : {1} to match {2} on {3}".format(bucket, stat_key, \
                                                                                stat_value, master.ip))
        rest = RestConnection(master)
        stats = rest.get_bucket_stats(bucket)

        while stats.get(stat_key, -1) != stat_value:
            stats = rest.get_bucket_stats(bucket)
            if verbose:
                log.info("{0} : {1}".format(stat_key, stats.get(stat_key, -1)))
            time.sleep(5)
        return True
开发者ID:jchris,项目名称:testrunner,代码行数:14,代码来源:rebalance_helper.py

示例3: verify_items_count

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_bucket_stats [as 别名]
    def verify_items_count(master, bucket):
        #get the #of buckets from rest
        rest = RestConnection(master)
        bucket_info = rest.get_bucket(bucket)
        replica_factor = bucket_info.numReplicas
        #print out vb_pending_num,vb_active_num,vb_replica_num as well
        master_stats = rest.get_bucket_stats(bucket)
        vbucket_active_sum = 0
        vbucket_replica_sum = 0
        vbucket_pending_sum = 0
        all_server_stats = []
        stats_received = 0
        nodes = rest.get_nodes()
        for server in nodes:
            #get the stats
            server_stats = rest.get_bucket_stats_for_node(bucket, server)
            if not server_stats:
                log.info("unable to get stats from {0}:{1}".format(server.ip, server.port))
            else:
                stats_received += 1
            all_server_stats.append((server, server_stats))
        if not stats_received:
            raise StatsUnavailableException()
        sum = 0
        for server, single_stats in all_server_stats:
            if not single_stats or "curr_items" not in single_stats:
                continue
            sum += single_stats["curr_items"]
            log.info("curr_items from {0}:{1} : {2}".format(server.ip, server.port, single_stats["curr_items"]))
            if 'vb_pending_num' in single_stats:
                vbucket_pending_sum += single_stats['vb_pending_num']
                log.info(
                    "vb_pending_num from {0}:{1} : {2}".format(server.ip, server.port, single_stats["vb_pending_num"]))
            if 'vb_active_num' in single_stats:
                vbucket_active_sum += single_stats['vb_active_num']
                log.info(
                    "vb_active_num from {0}:{1} : {2}".format(server.ip, server.port, single_stats["vb_active_num"]))
            if 'vb_replica_num' in single_stats:
                vbucket_replica_sum += single_stats['vb_replica_num']
                log.info(
                    "vb_replica_num from {0}:{1} : {2}".format(server.ip, server.port, single_stats["vb_replica_num"]))

        msg = "summation of vb_active_num : {0} vb_pending_num : {1} vb_replica_num : {2}"
        log.info(msg.format(vbucket_active_sum, vbucket_pending_sum, vbucket_replica_sum))
        msg = 'sum : {0} and sum * replica_factor ({1}) : {2}'
        log.info(msg.format(sum, replica_factor, (sum * (replica_factor + 1))))
        log.info('master_stats : {0}'.format(master_stats["curr_items_tot"]))
        delta = sum * (replica_factor + 1) - master_stats["curr_items_tot"]
        delta = abs(delta)
        if sum > 0:
            missing_percentage = delta * 1.0 / sum * (replica_factor + 1)
        else:
            missing_percentage = 100
        log.info("delta : {0} missing_percentage : {1} replica_factor : {2}".format(delta,missing_percentage,replica_factor))
        if replica_factor > 1:
           if delta == 0 or missing_percentage < 0.005:
              return True
           return False
        else:
           return (sum * (replica_factor + 1)) == master_stats["curr_items_tot"]
开发者ID:vmx,项目名称:testrunner,代码行数:62,代码来源:rebalance_helper.py

示例4: wait_for_stats_int_value

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_bucket_stats [as 别名]
 def wait_for_stats_int_value(
     master, bucket, stat_key, stat_value, option="==", timeout_in_seconds=120, verbose=True
 ):
     log.info(
         "waiting for bucket {0} stat : {1} to {2} {3} on {4}".format(
             bucket, stat_key, option, stat_value, master.ip
         )
     )
     start = time.time()
     verified = False
     while (time.time() - start) <= timeout_in_seconds:
         rest = RestConnection(master)
         stats = rest.get_bucket_stats(bucket)
         # some stats are in memcached
         if stats and stat_key in stats:
             actual = int(stats[stat_key])
             if option == "==":
                 verified = stat_value == actual
             elif option == ">":
                 verified = stat_value > actual
             elif option == "<":
                 verified = stat_value < actual
             elif option == ">=":
                 verified = stat_value >= actual
             elif option == "<=":
                 verified = stat_value <= actual
             if verified:
                 log.info("verified {0} : {1}".format(stat_key, actual))
                 break
             if verbose:
                 log.info("{0} : {1} isn't {2} {3}".format(stat_key, stat_value, option, actual))
         time.sleep(2)
     return verified
开发者ID:ketakigangal,项目名称:cbsystest,代码行数:35,代码来源:rebalance_helper.py

示例5: _monitor_drain_queue

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_bucket_stats [as 别名]
 def _monitor_drain_queue(self):
     # start whenever drain_queue is > 0
     rest = RestConnection(self.master)
     start = time.time()
     stats = rest.get_bucket_stats(self.bucket)
     self.log.info("current ep_queue_size: {0}".format(stats["ep_queue_size"]))
     self.drained = RebalanceHelper.wait_for_persistence(self.master, self.bucket, timeout=300)
     self.drained_in_seconds = time.time() - start
开发者ID:DavidAlphaFox,项目名称:couchbase,代码行数:10,代码来源:drainratetests.py

示例6: _monitor_drain_queue

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_bucket_stats [as 别名]
 def _monitor_drain_queue(self):
     #start whenever drain_queue is > 0
     rest = RestConnection(self.master)
     start = time.time()
     stats = rest.get_bucket_stats(self.bucket)
     self.log.info("current ep_queue_size: {0}".format(stats["ep_queue_size"]))
     verified = RebalanceHelper.wait_for_stats(self.master, self.bucket, 'ep_queue_size', 0, timeout_in_seconds=300, verbose=False)\
     and RebalanceHelper.wait_for_stats(self.master, self.bucket, 'ep_flusher_todo', 0, timeout_in_seconds=300, verbose=False)
     self.drained = verified
     self.drained_in_seconds = time.time() - start
开发者ID:steveyen,项目名称:testrunner,代码行数:12,代码来源:drainratetests.py

示例7: verify_es_num_docs

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_bucket_stats [as 别名]
    def verify_es_num_docs(self, src_server, dest_server, kv_store=1, retry=10, verification_count=10000):
        cb_rest = RestConnection(src_server)
        es_rest = RestConnection(dest_server)
        buckets = self.xd_ref._get_cluster_buckets(src_server)
        wait = 20
        for bucket in buckets:
            all_cb_docs = cb_rest.all_docs(bucket.name)
            cb_valid = [str(row["id"]) for row in all_cb_docs["rows"]]
            cb_num_items = cb_rest.get_bucket_stats(bucket.name)["curr_items"]
            es_num_items = es_rest.get_bucket(bucket.name).stats.itemCount
            _retry = retry
            while _retry > 0 and cb_num_items != es_num_items:
                self._log.info(
                    "elasticsearch items %s, expected: %s....retry after %s seconds"
                    % (es_num_items, cb_num_items, wait)
                )
                time.sleep(wait)
                last_es_items = es_num_items
                es_num_items = es_rest.get_bucket(bucket.name).stats.itemCount
                if es_num_items == last_es_items:
                    _retry = _retry - 1
                    # if index doesn't change reduce retry count
                elif es_num_items <= last_es_items:
                    self._log.info("%s items removed from index " % (es_num_items - last_es_items))
                    _retry = retry
                elif es_num_items >= last_es_items:
                    self._log.info("%s items added to index" % (es_num_items - last_es_items))
                    _retry = retry

            if es_num_items != cb_num_items:
                self.xd_ref.fail(
                    "Error: Couchbase has %s docs, ElasticSearch has %s docs " % (cb_num_items, es_num_items)
                )

            # query for all es keys
            es_valid = es_rest.all_docs(keys_only=True, indices=[bucket.name], size=cb_num_items)

            if len(es_valid) != cb_num_items:
                self._log.info(
                    "WARNING: Couchbase has %s docs, ElasticSearch all_docs returned %s docs "
                    % (cb_num_items, len(es_valid))
                )
            for _id in cb_valid[:verification_count]:  # match at most 10k keys
                id_found = _id in es_valid

                if id_found == False:
                    # document missing from all_docs query do manual term search
                    doc = es_rest.search_all_nodes(_id, indices=[bucket.name])
                    if doc is None:
                        self.xd_ref.fail("Document %s Missing from ES Index (%s)" % (_id, bucket.name))

            self._log.info(
                "Verified couchbase bucket (%s) replicated (%s) docs to elasticSearch with matching keys"
                % (bucket.name, cb_num_items)
            )
开发者ID:DavidAlphaFox,项目名称:couchbase,代码行数:57,代码来源:esbasetests.py

示例8: _verify_total_keys

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_bucket_stats [as 别名]
 def _verify_total_keys(self, server, loaded_keys):
     rest = RestConnection(server)
     buckets = rest.get_buckets()
     for bucket in buckets:
         self.log.info("start to verify bucket: {0}".format(bucket))
         stats = rest.get_bucket_stats(bucket)
         if stats["curr_items"] == loaded_keys:
             self.log.info("{0} keys in bucket {2} match with \
                            pre-loaded keys: {1}".format(stats["curr_items"], loaded_keys, bucket))
         else:
             raise Exception("{%s keys in bucket %s does not match with \
                              loaded %s keys" % (stats["curr_items"], bucket, loaded_keys))
开发者ID:DavidAlphaFox,项目名称:couchbase,代码行数:14,代码来源:rackzonetests.py

示例9: wait_for_stats

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_bucket_stats [as 别名]
    def wait_for_stats(master, bucket, stat_key, stat_value, timeout_in_seconds=120, verbose=True):
        log.info(
            "waiting for bucket {0} stat : {1} to match {2} on {3}".format(bucket, stat_key, stat_value, master.ip)
        )
        time_to_timeout = 0
        previous_stat_value = -1
        curr_stat_value = -1
        verified = False
        while not verified:
            rest = RestConnection(master)
            try:
                stats = rest.get_bucket_stats(bucket)
                if stats and stat_key in stats and stats[stat_key] == stat_value:
                    log.info("{0} : {1}".format(stat_key, stats[stat_key]))
                    verified = True
                    break
                else:
                    if stats and stat_key in stats:
                        if verbose:
                            log.info("{0} : {1}".format(stat_key, stats[stat_key]))
                        curr_stat_value = stats[stat_key]

                    # values are changing so clear any timeout
                    if curr_stat_value != previous_stat_value:
                        time_to_timeout = 0
                    else:
                        if time_to_timeout == 0:
                            time_to_timeout = time.time() + timeout_in_seconds
                        if time_to_timeout < time.time():
                            log.info(
                                "no change in {0} stat after {1} seconds (value = {2})".format(
                                    stat_key, timeout_in_seconds, curr_stat_value
                                )
                            )
                            break

                    previous_stat_value = curr_stat_value

                    if not verbose:
                        time.sleep(0.1)
                    else:
                        time.sleep(2)
            except:
                log.info("unable to collect stats from server {0}".format(master))
                verified = True  # TODO: throw ex and assume caller catches
                break
            # wait for 5 seconds for the next check
            time.sleep(5)

        return verified
开发者ID:ketakigangal,项目名称:cbsystest,代码行数:52,代码来源:rebalance_helper.py

示例10: replication_verification

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_bucket_stats [as 别名]
    def replication_verification(master, bucket_data, replica, test, failed_over=False):
        asserts = []
        rest = RestConnection(master)
        buckets = rest.get_buckets()
        nodes = rest.node_statuses()
        test.log.info("expect {0} / {1} replication ? {2}".format(len(nodes),
            (1.0 + replica), len(nodes) / (1.0 + replica)))
        for bucket in buckets:
            ClusterOperationHelper.flushctl_set(master, "exp_pager_stime", 30, bucket.name)
        if len(nodes) / (1.0 + replica) >= 1:
            final_replication_state = RestHelper(rest).wait_for_replication(300)
            msg = "replication state after waiting for up to 5 minutes : {0}"
            test.log.info(msg.format(final_replication_state))
            #run expiry_pager on all nodes before doing the replication verification
            for bucket in buckets:
                ClusterOperationHelper.flushctl_set(master, "exp_pager_stime", 30, bucket.name)
                test.log.info("wait for expiry pager to run on all these nodes")
                time.sleep(30)
                ClusterOperationHelper.flushctl_set(master, "exp_pager_stime", 3600, bucket.name)
                ClusterOperationHelper.flushctl_set(master, "exp_pager_stime", 30, bucket.name)
                # windows need more than 15 minutes to get number matched
                replica_match = RebalanceHelper.wait_till_total_numbers_match(bucket=bucket.name,
                    master=master,
                    timeout_in_seconds=600)
                if not replica_match:
                    asserts.append("replication was completed but sum(curr_items) don't match the curr_items_total %s" %
                                   bucket.name)
                if not failed_over:
                    stats = rest.get_bucket_stats(bucket=bucket.name)
                    RebalanceHelper.print_taps_from_all_nodes(rest, bucket.name)
                    msg = "curr_items : {0} is not equal to actual # of keys inserted : {1} : bucket: {2}"

                    if bucket_data[bucket.name]['kv_store'] is None:
                        items_inserted = bucket_data[bucket.name]["items_inserted_count"]
                    else:
                        items_inserted = len(bucket_data[bucket.name]['kv_store'].valid_items())

                    active_items_match = stats["curr_items"] == items_inserted
                    if not active_items_match:
                        asserts.append(msg.format(stats["curr_items"], items_inserted, bucket.name))

        if len(asserts) > 0:
            for msg in asserts:
                test.log.error(msg)
            test.assertTrue(len(asserts) == 0, msg=asserts)
开发者ID:ronniedada,项目名称:testrunner,代码行数:47,代码来源:rebalancetests.py

示例11: replication_verification

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_bucket_stats [as 别名]
    def replication_verification(master, bucket_data, replica, test, failed_over=False):
        asserts = []
        rest = RestConnection(master)
        buckets = rest.get_buckets()
        nodes = rest.node_statuses()
        test.log.info(
            "expect {0} / {1} replication ? {2}".format(len(nodes), (1.0 + replica), len(nodes) / (1.0 + replica))
        )
        if len(nodes) / (1.0 + replica) >= 1:
            test.assertTrue(
                RebalanceHelper.wait_for_replication(rest.get_nodes(), timeout=300),
                msg="replication did not complete after 5 minutes",
            )
            # run expiry_pager on all nodes before doing the replication verification
            for bucket in buckets:
                ClusterOperationHelper.set_expiry_pager_sleep_time(master, bucket.name)
                test.log.info("wait for expiry pager to run on all these nodes")
                time.sleep(30)
                ClusterOperationHelper.set_expiry_pager_sleep_time(master, bucket.name, 3600)
                ClusterOperationHelper.set_expiry_pager_sleep_time(master, bucket.name)
                replica_match = RebalanceHelper.wait_till_total_numbers_match(
                    bucket=bucket.name, master=master, timeout_in_seconds=300
                )
                if not replica_match:
                    asserts.append("replication was completed but sum(curr_items) dont match the curr_items_total")
                if not failed_over:
                    stats = rest.get_bucket_stats(bucket=bucket.name)
                    RebalanceHelper.print_taps_from_all_nodes(rest, bucket.name)
                    msg = "curr_items : {0} is not equal to actual # of keys inserted : {1}"
                    active_items_match = stats["curr_items"] == bucket_data[bucket.name]["items_inserted_count"]
                    if not active_items_match:
                        #                        asserts.append(
                        test.log.error(
                            msg.format(stats["curr_items"], bucket_data[bucket.name]["items_inserted_count"])
                        )

        if len(asserts) > 0:
            for msg in asserts:
                test.log.error(msg)
            test.assertTrue(len(asserts) == 0, msg=asserts)
开发者ID:DavidAlphaFox,项目名称:couchbase,代码行数:42,代码来源:rebalancingtests.py

示例12: EventingUpgrade

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_bucket_stats [as 别名]

#.........这里部分代码省略.........
        status, content = ClusterOperationHelper.find_orchestrator(self.master)
        self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}". \
                        format(status, content))
        FIND_MASTER = False
        for new_server in servers_in:
            if content.find(new_server.ip) >= 0:
                self._new_master(new_server)
                FIND_MASTER = True
                self.log.info("%s node %s becomes the master" \
                              % (self.input.param("upgrade_version", ""), new_server.ip))
                break
        if self.input.param("initial_version", "")[:5] in COUCHBASE_VERSION_2 \
                and not FIND_MASTER:
            raise Exception( \
                "After rebalance in {0} nodes, {0} node doesn't become master" \
                    .format(self.input.param("upgrade_version", "")))
        servers_out = self.servers[:self.nodes_init]
        log.info("Rebalanced out all old version nodes")
        self.cluster.rebalance(self.servers[:self.num_servers], [], servers_out)
        self._new_master(self.servers[self.nodes_init])

    def online_upgrade_swap_rebalance(self, services=None):
        servers_in = self.servers[self.nodes_init:self.num_servers]
        self.sleep(self.sleep_time)
        status, content = ClusterOperationHelper.find_orchestrator(self.master)
        self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}". \
                        format(status, content))
        i = 0
        for server_in, service_in in zip(servers_in, services):
            log.info("Swap rebalance nodes")
            self.cluster.rebalance(self.servers[:self.nodes_init], [server_in], [self.servers[i]], [service_in])
            self._new_master(self.servers[self.nodes_init])
            i += 1

    def online_upgrade_with_failover(self, services=None):
        servers_in = self.servers[self.nodes_init:self.num_servers]
        self.cluster.rebalance(self.servers[:self.nodes_init], servers_in, [], services=services)
        log.info("Rebalance in all {0} nodes" \
                      .format(self.input.param("upgrade_version", "")))
        self.sleep(self.sleep_time)
        status, content = ClusterOperationHelper.find_orchestrator(self.master)
        self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}". \
                        format(status, content))
        FIND_MASTER = False
        for new_server in servers_in:
            if content.find(new_server.ip) >= 0:
                self._new_master(new_server)
                FIND_MASTER = True
                self.log.info("%s node %s becomes the master" \
                              % (self.input.param("upgrade_version", ""), new_server.ip))
                break
        if self.input.param("initial_version", "")[:5] in COUCHBASE_VERSION_2 \
                and not FIND_MASTER:
            raise Exception( \
                "After rebalance in {0} nodes, {0} node doesn't become master" \
                    .format(self.input.param("upgrade_version", "")))
        servers_out = self.servers[:self.nodes_init]
        self._new_master(self.servers[self.nodes_init])
        log.info("failover and rebalance nodes")
        self.cluster.failover(self.servers[:self.num_servers],failover_nodes=servers_out, graceful=False)
        self.cluster.rebalance(self.servers[:self.num_servers], [], servers_out)

    def _new_master(self, server):
        self.master = server
        self.rest = RestConnection(self.master)
        self.rest_helper = RestHelper(self.rest)

    def create_buckets(self):
        self.rest.set_service_memoryQuota(service='memoryQuota', memoryQuota=700)
        self.rest.delete_bucket("default")
        self.bucket_size = 100
        log.info("Create the required buckets in the initial version")
        bucket_params = self._create_bucket_params(server=self.server, size=self.bucket_size,
                                                   replicas=self.num_replicas)
        self.cluster.create_standard_bucket(name=self.src_bucket_name, port=STANDARD_BUCKET_PORT + 1,
                                            bucket_params=bucket_params)
        self.src_bucket = RestConnection(self.master).get_buckets()
        self.sleep(60)
        self.cluster.create_standard_bucket(name=self.dst_bucket_name, port=STANDARD_BUCKET_PORT + 2,
                                            bucket_params=bucket_params)
        self.sleep(60)
        self.cluster.create_standard_bucket(name=self.metadata_bucket_name, port=STANDARD_BUCKET_PORT + 3,
                                            bucket_params=bucket_params)
        self.sleep(60)
        self.cluster.create_standard_bucket(name=self.dst_bucket_name1, port=STANDARD_BUCKET_PORT + 4,
                                            bucket_params=bucket_params)
        self.buckets = RestConnection(self.master).get_buckets()

    def validate_eventing(self, bucket_name, no_of_docs):
        count = 0
        stats_dst = self.rest.get_bucket_stats(bucket_name)
        while stats_dst["curr_items"] != no_of_docs and count < 20:
            self.sleep(30, message="Waiting for handler code to complete all bucket operations...")
            count += 1
            stats_dst = self.rest.get_bucket_stats(bucket_name)
        if stats_dst["curr_items"] != no_of_docs:
            log.info("Eventing is not working as expected after upgrade")
            raise Exception(
                "Bucket operations from handler code took lot of time to complete or didn't go through. Current : {0} "
                "Expected : {1} ".format(stats_dst["curr_items"], no_of_docs))
开发者ID:ritamcouchbase,项目名称:testrunner,代码行数:104,代码来源:eventing_upgrade.py

示例13: verify_items_count

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_bucket_stats [as 别名]
    def verify_items_count(master, bucket, num_attempt=3, timeout=2):
        # get the #of buckets from rest
        rest = RestConnection(master)
        if isinstance(bucket, Bucket):
            bucket = bucket.name
        bucket_info = rest.get_bucket(bucket, num_attempt, timeout)
        replica_factor = bucket_info.numReplicas
        vbucket_active_sum = 0
        vbucket_replica_sum = 0
        vbucket_pending_sum = 0
        all_server_stats = []
        stats_received = True
        nodes = rest.get_nodes()
        for server in nodes:
            # get the stats
            server_stats = rest.get_bucket_stats_for_node(bucket, server)
            if not server_stats:
                log.info("unable to get stats from {0}:{1}".format(server.ip, server.port))
                stats_received = False
            all_server_stats.append((server, server_stats))
        if not stats_received:
            raise StatsUnavailableException()
        sum = 0
        for server, single_stats in all_server_stats:
            if not single_stats or "curr_items" not in single_stats:
                continue
            sum += single_stats["curr_items"]
            log.info("curr_items from {0}:{1} : {2}".format(server.ip, server.port, single_stats["curr_items"]))
            if "vb_pending_num" in single_stats:
                vbucket_pending_sum += single_stats["vb_pending_num"]
                log.info(
                    "vb_pending_num from {0}:{1} : {2}".format(server.ip, server.port, single_stats["vb_pending_num"])
                )
            if "vb_active_num" in single_stats:
                vbucket_active_sum += single_stats["vb_active_num"]
                log.info(
                    "vb_active_num from {0}:{1} : {2}".format(server.ip, server.port, single_stats["vb_active_num"])
                )
            if "vb_replica_num" in single_stats:
                vbucket_replica_sum += single_stats["vb_replica_num"]
                log.info(
                    "vb_replica_num from {0}:{1} : {2}".format(server.ip, server.port, single_stats["vb_replica_num"])
                )

        msg = "summation of vb_active_num : {0} vb_pending_num : {1} vb_replica_num : {2}"
        log.info(msg.format(vbucket_active_sum, vbucket_pending_sum, vbucket_replica_sum))
        msg = "sum : {0} and sum * (replica_factor + 1) ({1}) : {2}"
        log.info(msg.format(sum, replica_factor + 1, (sum * (replica_factor + 1))))
        master_stats = rest.get_bucket_stats(bucket)
        if "curr_items_tot" in master_stats:
            log.info("curr_items_tot from master: {0}".format(master_stats["curr_items_tot"]))
        else:
            self.fail("bucket {O} stats doesnt contain 'curr_items_tot':".format(bucket))
        if replica_factor >= len(nodes):
            log.warn("the number of nodes is less than replica requires")
            delta = sum * (len(nodes)) - master_stats["curr_items_tot"]
        else:
            delta = sum * (replica_factor + 1) - master_stats["curr_items_tot"]
        delta = abs(delta)

        if delta > 0:
            if sum == 0:
                missing_percentage = 0
            else:
                missing_percentage = delta * 1.0 / (sum * (replica_factor + 1))
        else:
            missing_percentage = 1
        log.info(
            "delta : {0} missing_percentage : {1} replica_factor : {2}".format(
                delta, missing_percentage, replica_factor
            )
        )
        # If no items missing then, return True
        if not delta:
            return True
        return False
开发者ID:ketakigangal,项目名称:cbsystest,代码行数:78,代码来源:rebalance_helper.py

示例14: common_test_body

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_bucket_stats [as 别名]
    def common_test_body(self, replica, failover_reason, load_ratio, age, max_nodes):
        log = logger.Logger.get_logger()
        bucket_name = "default"
        log.info("replica : {0}".format(replica))
        log.info("failover_reason : {0}".format(failover_reason))
        log.info("load_ratio : {0}".format(load_ratio))
        log.info("age : {0}".format(age))
        log.info("max_nodes : {0}".format(max_nodes))
        master = self._servers[0]
        log.info('picking server : {0} as the master'.format(master))
        rest = RestConnection(master)
        info = rest.get_nodes_self()
        rest.init_cluster(username=master.rest_username,
                          password=master.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
        rest.update_autofailover_settings(True, age, max_nodes)
        rest.reset_autofailover()
        bucket_ram = info.memoryQuota * 2 / 3
        rest.create_bucket(bucket=bucket_name,
                           ramQuotaMB=bucket_ram,
                           replicaNumber=replica,
                           proxyPort=info.moxi)
        ready = BucketOperationHelper.wait_for_memcached(master, bucket_name)
        self.assertTrue(ready, "wait_for_memcached failed")

        credentials = self._input.membase_settings

        log.info("inserting some items in the master before adding any nodes")
        distribution = {512: 0.4, 1 * 1024: 0.59, 5 * 1024: 0.01}
        if load_ratio > 10:
            distribution = {5 * 1024: 0.4, 10 * 1024: 0.5, 20 * 1024: 0.1}

        ClusterOperationHelper.add_all_nodes_or_assert(master, self._servers, credentials, self)
        nodes = rest.node_statuses()
        rest.rebalance(otpNodes=[node.id for node in nodes], ejectedNodes=[])
        msg = "rebalance failed after adding these nodes {0}".format(nodes)
        self.assertTrue(rest.monitorRebalance(), msg=msg)

        inserted_count, rejected_count =\
        MemcachedClientHelper.load_bucket(servers=self._servers,
                                          ram_load_ratio=load_ratio,
                                          value_size_distribution=distribution,
                                          number_of_threads=1)
        log.info('inserted {0} keys'.format(inserted_count))
        nodes = rest.node_statuses()
        # why are we in this while loop?
        while (len(nodes) - replica) >= 1:
            final_replication_state = RestHelper(rest).wait_for_replication(900)
            msg = "replication state after waiting for up to 15 minutes : {0}"
            self.log.info(msg.format(final_replication_state))
            chosen = AutoFailoverBaseTest.choose_nodes(master, nodes, replica)
            for node in chosen:
                #let's do op
                if failover_reason == 'stop_membase':
                    self.stop_membase(node)
                    log.info("10 seconds delay to wait for membase-server to shutdown")
                    #wait for 5 minutes until node is down
                    self.assertTrue(RestHelper(rest).wait_for_node_status(node, "unhealthy", 300),
                                    msg="node status is not unhealthy even after waiting for 5 minutes")
                elif failover_reason == "firewall":
                    self.enable_firewall(node)
                    self.assertTrue(RestHelper(rest).wait_for_node_status(node, "unhealthy", 300),
                                    msg="node status is not unhealthy even after waiting for 5 minutes")
            # list pre-autofailover stats
            stats = rest.get_bucket_stats()
            self.log.info("pre-autofail - curr_items : {0} versus {1}".format(stats["curr_items"], inserted_count))
            AutoFailoverBaseTest.wait_for_failover_or_assert(master, replica, age, self)

            # manually fail over any unhealthy:active nodes left, max that we should need to manually failover is replica-max_nodes
            manual_failover_count = replica - max_nodes
            for node in chosen:
                self.log.info("checking {0}".format(node.ip))
                if node.status.lower() == "unhealthy" and node.clusterMembership == "active":
                    msg = "node {0} not failed over and we are over out manual failover limit of {1}"
                    self.assertTrue(manual_failover_count > 0, msg.format(node.ip, (replica - max_nodes)))
                    self.log.info("manual failover {0}".format(node.ip))
                    rest.fail_over(node.id)
                    manual_failover_count -= 1

            stats = rest.get_bucket_stats()
            self.log.info("post-autofail - curr_items : {0} versus {1}".format(stats["curr_items"], inserted_count))
            self.assertTrue(stats["curr_items"] == inserted_count, "failover completed but curr_items ({0}) does not match inserted items ({1})".format(stats["curr_items"], inserted_count))

            log.info("10 seconds sleep after autofailover before invoking rebalance...")
            time.sleep(10)
            rest.rebalance(otpNodes=[node.id for node in nodes],
                           ejectedNodes=[node.id for node in chosen])
            msg="rebalance failed while removing failover nodes {0}".format(chosen)
            self.assertTrue(rest.monitorRebalance(), msg=msg)

            nodes = rest.node_statuses()
            if len(nodes) / (1 + replica) >= 1:
                final_replication_state = RestHelper(rest).wait_for_replication(900)
                msg = "replication state after waiting for up to 15 minutes : {0}"
                self.log.info(msg.format(final_replication_state))
                self.assertTrue(RebalanceHelper.wait_till_total_numbers_match(master,bucket_name,600),
                                msg="replication was completed but sum(curr_items) dont match the curr_items_total")

                start_time = time.time()
                stats = rest.get_bucket_stats()
#.........这里部分代码省略.........
开发者ID:vmx,项目名称:testrunner,代码行数:103,代码来源:autofailovertests.py

示例15: StatChecker

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_bucket_stats [as 别名]
class StatChecker(object):
    EQUAL = '=='
    NOT_EQUAL = '!='
    LESS_THAN = '<'
    LESS_THAN_EQ = '<='
    GREATER_THAN = '>'
    GREATER_THAN_EQ = '>='

    def __init__(self, addr, bucket = "default", username = "Administrator", password = "password"):
        self.ip, self.port = addr.split(":")
        self.username = username
        self.password = password
        self.bucket = bucket
        serverInfo = { "ip" : self.ip,
                       "port" : self.port,
                       "rest_username" : self.username,
                       "rest_password" : self.password }
        self.node = self._dict_to_obj(serverInfo)
        self.rest = RestConnection(self.node)

    def check(self, condition, datatype = int):

        valid = False
        try:
            stat, cmp_type, value = self.parse_condition(condition)
        except AttributeError as ex:
            logger.error(ex)
            return valid

        value = datatype(value) 
        stats = self.rest.get_bucket_stats(self.bucket)
       
        if len(stats) > 0:
            try:
                curr_value = stats[stat]
            except:
                logger.error('Invalid Stat Key: %s' % stat)
            
                # invalid stat key
                return valid

            if (cmp_type  == StatChecker.EQUAL and curr_value == value) or\
                (cmp_type == StatChecker.NOT_EQUAL and curr_value != value) or\
                (cmp_type == StatChecker.LESS_THAN_EQ and curr_value <= value) or\
                (cmp_type == StatChecker.GREATER_THAN_EQ and curr_value >= value) or\
                (cmp_type == StatChecker.LESS_THAN and curr_value < value) or\
                (cmp_type == StatChecker.GREATER_THAN and curr_value > value):
                valid = True
   
        return valid

    def parse_condition(self, condition):
        m = re.match(r"(\w+)(\W+)(\w+)", condition)
        try: 
           return [str(str_.strip()) for str_ in m.groups()]
        except AttributeError:
            logger.error("Invalid condition syntax: %s" % condition)
            raise AttributeError(condition)

    def _dict_to_obj(self, dict_):
        return type('OBJ', (object,), dict_)
开发者ID:IrynaMironava,项目名称:testrunner,代码行数:63,代码来源:stats.py


注:本文中的membase.api.rest_client.RestConnection.get_bucket_stats方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。