当前位置: 首页>>代码示例>>Python>>正文


Python RebalanceHelper.wait_till_total_numbers_match方法代码示例

本文整理汇总了Python中membase.helper.rebalance_helper.RebalanceHelper.wait_till_total_numbers_match方法的典型用法代码示例。如果您正苦于以下问题:Python RebalanceHelper.wait_till_total_numbers_match方法的具体用法?Python RebalanceHelper.wait_till_total_numbers_match怎么用?Python RebalanceHelper.wait_till_total_numbers_match使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在membase.helper.rebalance_helper.RebalanceHelper的用法示例。


在下文中一共展示了RebalanceHelper.wait_till_total_numbers_match方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: rebalance_in_out_at_once_persistence_stopped

# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import wait_till_total_numbers_match [as 别名]
    def rebalance_in_out_at_once_persistence_stopped(self):
        num_nodes_with_stopped_persistence = self.input.param("num_nodes_with_stopped_persistence", 1)
        servs_init = self.servers[:self.nodes_init]
        servs_in = [self.servers[i + self.nodes_init] for i in range(self.nodes_in)]
        servs_out = [self.servers[self.nodes_init - i - 1] for i in range(self.nodes_out)]
        rest = RestConnection(self.master)
        self._wait_for_stats_all_buckets(servs_init)
        for server in servs_init[:min(num_nodes_with_stopped_persistence, self.nodes_init)]:
            shell = RemoteMachineShellConnection(server)
            for bucket in self.buckets:
                shell.execute_cbepctl(bucket, "stop", "", "", "")
        self.sleep(5)
        self.num_items_without_persistence = self.input.param("num_items_without_persistence", 100000)
        gen_extra = BlobGenerator('mike', 'mike-', self.value_size, start=self.num_items / 2\
                                      , end=self.num_items / 2 + self.num_items_without_persistence)
        self.log.info("current nodes : {0}".format([node.id for node in rest.node_statuses()]))
        self.log.info("adding nodes {0} to cluster".format(servs_in))
        self.log.info("removing nodes {0} from cluster".format(servs_out))
        tasks = self._async_load_all_buckets(self.master, gen_extra, "create", 0, batch_size=1000)
        result_nodes = set(servs_init + servs_in) - set(servs_out)
        # wait timeout in 60 min because MB-7386 rebalance stuck
        self.cluster.rebalance(servs_init[:self.nodes_init], servs_in, servs_out, timeout=self.wait_timeout * 60)
        for task in tasks:
            task.result()

        self._wait_for_stats_all_buckets(servs_init[:self.nodes_init - self.nodes_out], \
                                         ep_queue_size=self.num_items_without_persistence * 0.9, ep_queue_size_cond='>')
        self._wait_for_stats_all_buckets(servs_in)
        self._verify_all_buckets(self.master, timeout=None)
        self._verify_stats_all_buckets(result_nodes)
        #verify that curr_items_tot corresponds to sum of curr_items from all nodes
        verified = True
        for bucket in self.buckets:
            verified &= RebalanceHelper.wait_till_total_numbers_match(self.master, bucket)
        self.assertTrue(verified, "Lost items!!! Replication was completed but sum(curr_items) don't match the curr_items_total")
开发者ID:ashvindersingh,项目名称:testrunner,代码行数:37,代码来源:rebalanceinout.py

示例2: verify_cluster_stats

# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import wait_till_total_numbers_match [as 别名]
    def verify_cluster_stats(self, servers=None, master=None, max_verify=None, timeout=None, check_items=True,
                             only_store_hash=True, replica_to_read=None, batch_size=1000):
        if servers is None:
            servers = self.servers
        if master is None:
            master = self.master
        if max_verify is None:
            max_verify = self.max_verify

        self._wait_for_stats_all_buckets(servers, timeout=(timeout or 120))
        if check_items:
            try:
                self._verify_all_buckets(master, timeout=timeout, max_verify=max_verify,
                                     only_store_hash=only_store_hash, replica_to_read=replica_to_read,
                                     batch_size=batch_size)
            except ValueError, e:
                # get/verify stats if 'ValueError: Not able to get values for following keys' was gotten
                self._verify_stats_all_buckets(servers, timeout=(timeout or 120))
                raise e
            self._verify_stats_all_buckets(servers, timeout=(timeout or 120))
            # verify that curr_items_tot corresponds to sum of curr_items from all nodes
            verified = True
            for bucket in self.buckets:
                verified &= RebalanceHelper.wait_till_total_numbers_match(master, bucket)
            self.assertTrue(verified, "Lost items!!! Replication was completed but sum(curr_items) don't match the curr_items_total")
开发者ID:paragagarwal,项目名称:testrunner,代码行数:27,代码来源:basetestcase.py

示例3: items_verification

# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import wait_till_total_numbers_match [as 别名]
 def items_verification(test, master):
     rest = RestConnection(master)
     # Verify items count across all node
     timeout = 600
     for bucket in rest.get_buckets():
         verified = RebalanceHelper.wait_till_total_numbers_match(master, bucket.name, timeout_in_seconds=timeout)
         test.assertTrue(verified, "Lost items!!.. failing test in {0} secs".format(timeout))
开发者ID:Boggypop,项目名称:testrunner,代码行数:9,代码来源:swaprebalance.py

示例4: replication_verification

# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import wait_till_total_numbers_match [as 别名]
    def replication_verification(master, bucket, replica, inserted_count, test):
        rest = RestConnection(master)
        nodes = rest.node_statuses()

        if len(nodes) / (1 + replica) >= 1:
                    final_replication_state = RestHelper(rest).wait_for_replication(900)
                    msg = "replication state after waiting for up to 15 minutes : {0}"
                    test.log.info(msg.format(final_replication_state))
                    # in windows, we need to set timeout_in_seconds to 15+ minutes
                    test.assertTrue(RebalanceHelper.wait_till_total_numbers_match(master=master,
                                                                                  bucket=bucket,
                                                                                  timeout_in_seconds=1200),
                                    msg="replication was completed but sum(curr_items) dont match the curr_items_total")

                    start_time = time.time()
                    stats = rest.get_bucket_stats()
                    while time.time() < (start_time + 120) and stats["curr_items"] != inserted_count:
                        test.log.info("curr_items : {0} versus {1}".format(stats["curr_items"], inserted_count))
                        time.sleep(5)
                        stats = rest.get_bucket_stats()
                    RebalanceHelper.print_taps_from_all_nodes(rest, bucket)
                    test.log.info("curr_items : {0} versus {1}".format(stats["curr_items"], inserted_count))
                    stats = rest.get_bucket_stats()
                    msg = "curr_items : {0} is not equal to actual # of keys inserted : {1}"
                    test.assertEquals(stats["curr_items"], inserted_count,
                                      msg=msg.format(stats["curr_items"], inserted_count))
开发者ID:jchris,项目名称:testrunner,代码行数:28,代码来源:failovertests.py

示例5: test_rebalance_in_out_at_once_persistence_stopped

# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import wait_till_total_numbers_match [as 别名]
    def test_rebalance_in_out_at_once_persistence_stopped(self):
        """
        PERFORMANCE:Rebalance in/out at once with stopped persistence.

        This test begins by loading a given number of items into the cluster with
        self.nodes_init nodes in it. Then we stop persistence on some nodes.
        Test starts  to update some data and load new data in the cluster.
        At that time we add  servs_in nodes and remove  servs_out nodes and start rebalance.
        After rebalance and data ops are completed we start verification phase:
        wait for the disk queues to drain, verify the number of items that were/or not persisted
        with expected values, verify that there has been no data loss,
        sum(curr_items) match the curr_items_total.Once All checks passed, test is finished.
        Available parameters by default are:
        nodes_init=1, nodes_in=1, nodes_out=1,num_nodes_with_stopped_persistence=1
        num_items_without_persistence=100000
        """
        num_nodes_with_stopped_persistence = self.input.param("num_nodes_with_stopped_persistence", 1)
        servs_init = self.servers[:self.nodes_init]
        servs_in = [self.servers[i + self.nodes_init] for i in range(self.nodes_in)]
        servs_out = [self.servers[self.nodes_init - i - 1] for i in range(self.nodes_out)]
        rest = RestConnection(self.master)
        self._wait_for_stats_all_buckets(servs_init)
        for server in servs_init[:min(num_nodes_with_stopped_persistence, self.nodes_init)]:
            shell = RemoteMachineShellConnection(server)
            for bucket in self.buckets:
                shell.execute_cbepctl(bucket, "stop", "", "", "")
        self.sleep(5)
        self.num_items_without_persistence = self.input.param("num_items_without_persistence", 100000)
        gen_extra = BlobGenerator('mike', 'mike-', self.value_size, start=self.num_items / 2,
                                  end=self.num_items / 2 + self.num_items_without_persistence)
        self.log.info("current nodes : {0}".format([node.id for node in rest.node_statuses()]))
        self.log.info("adding nodes {0} to cluster".format(servs_in))
        self.log.info("removing nodes {0} from cluster".format(servs_out))
        tasks = self._async_load_all_buckets(self.master, gen_extra, "create", 0, batch_size=1000)
        result_nodes = set(servs_init + servs_in) - set(servs_out)
        # wait timeout in 60 min because MB-7386 rebalance stuck
        self.cluster.rebalance(servs_init[:self.nodes_init], servs_in, servs_out, timeout=self.wait_timeout * 60)
        for task in tasks:
            task.result()

        self._wait_for_stats_all_buckets(servs_init[:self.nodes_init - self.nodes_out],
                                         ep_queue_size=self.num_items_without_persistence * 0.9, ep_queue_size_cond='>')
        self._wait_for_stats_all_buckets(servs_in)
        self._verify_all_buckets(self.master, timeout=None)
        self._verify_stats_all_buckets(result_nodes)
        # verify that curr_items_tot corresponds to sum of curr_items from all nodes
        verified = True
        for bucket in self.buckets:
            verified &= RebalanceHelper.wait_till_total_numbers_match(self.master, bucket)
        self.assertTrue(verified,
                        "Lost items!!! Replication was completed but sum(curr_items) don't match the curr_items_total")
        self.verify_unacked_bytes_all_buckets()
开发者ID:EricACooper,项目名称:testrunner,代码行数:54,代码来源:rebalanceinout.py

示例6: verify_cluster_stats

# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import wait_till_total_numbers_match [as 别名]
 def verify_cluster_stats(self, servers=None, master=None, max_verify=None):
     if servers is None:
         servers = self.servers
     if master is None:
         master = self.master
     if max_verify is None:
         max_verify = self.max_verify
     self._wait_for_stats_all_buckets(servers)
     self._verify_all_buckets(master, max_verify=max_verify)
     self._verify_stats_all_buckets(servers)
     #verify that curr_items_tot corresponds to sum of curr_items from all nodes
     verified = True
     for bucket in self.buckets:
         verified &= RebalanceHelper.wait_till_total_numbers_match(master, bucket)
     self.assertTrue(verified, "Lost items!!! Replication was completed but sum(curr_items) don't match the curr_items_total")
开发者ID:mschoch,项目名称:testrunner,代码行数:17,代码来源:basetestcase.py

示例7: replication_verification

# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import wait_till_total_numbers_match [as 别名]
    def replication_verification(master, bucket_data, replica, test, failed_over=False):
        asserts = []
        rest = RestConnection(master)
        buckets = rest.get_buckets()
        nodes = rest.node_statuses()
        test.log.info("expect {0} / {1} replication ? {2}".format(len(nodes),
            (1.0 + replica), len(nodes) / (1.0 + replica)))
        for bucket in buckets:
            ClusterOperationHelper.flushctl_set(master, "exp_pager_stime", 30, bucket.name)
        if len(nodes) / (1.0 + replica) >= 1:
            final_replication_state = RestHelper(rest).wait_for_replication(300)
            msg = "replication state after waiting for up to 5 minutes : {0}"
            test.log.info(msg.format(final_replication_state))
            #run expiry_pager on all nodes before doing the replication verification
            for bucket in buckets:
                ClusterOperationHelper.flushctl_set(master, "exp_pager_stime", 30, bucket.name)
                test.log.info("wait for expiry pager to run on all these nodes")
                time.sleep(30)
                ClusterOperationHelper.flushctl_set(master, "exp_pager_stime", 3600, bucket.name)
                ClusterOperationHelper.flushctl_set(master, "exp_pager_stime", 30, bucket.name)
                # windows need more than 15 minutes to get number matched
                replica_match = RebalanceHelper.wait_till_total_numbers_match(bucket=bucket.name,
                    master=master,
                    timeout_in_seconds=600)
                if not replica_match:
                    asserts.append("replication was completed but sum(curr_items) don't match the curr_items_total %s" %
                                   bucket.name)
                if not failed_over:
                    stats = rest.get_bucket_stats(bucket=bucket.name)
                    RebalanceHelper.print_taps_from_all_nodes(rest, bucket.name)
                    msg = "curr_items : {0} is not equal to actual # of keys inserted : {1} : bucket: {2}"

                    if bucket_data[bucket.name]['kv_store'] is None:
                        items_inserted = bucket_data[bucket.name]["items_inserted_count"]
                    else:
                        items_inserted = len(bucket_data[bucket.name]['kv_store'].valid_items())

                    active_items_match = stats["curr_items"] == items_inserted
                    if not active_items_match:
                        asserts.append(msg.format(stats["curr_items"], items_inserted, bucket.name))

        if len(asserts) > 0:
            for msg in asserts:
                test.log.error(msg)
            test.assertTrue(len(asserts) == 0, msg=asserts)
开发者ID:ronniedada,项目名称:testrunner,代码行数:47,代码来源:rebalancetests.py

示例8: verify_cluster_stats

# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import wait_till_total_numbers_match [as 别名]
    def verify_cluster_stats(self, servers=None, master=None, max_verify=None, timeout=None, check_items=True):
        if servers is None:
            servers = self.servers
        if master is None:
            master = self.master
        if max_verify is None:
            max_verify = self.max_verify

        self._wait_for_stats_all_buckets(servers, timeout=(timeout or 120))
        if check_items:
            self._verify_all_buckets(master, timeout=timeout, max_verify=max_verify)
            self._verify_stats_all_buckets(servers, timeout=(timeout or 120))
            # verify that curr_items_tot corresponds to sum of curr_items from all nodes
            verified = True
            for bucket in self.buckets:
                verified &= RebalanceHelper.wait_till_total_numbers_match(master, bucket)
            self.assertTrue(verified, "Lost items!!! Replication was completed but sum(curr_items) don't match the curr_items_total")
        else:
            self.log.warn("verification of items was omitted")
开发者ID:strategist922,项目名称:testrunner,代码行数:21,代码来源:basetestcase.py

示例9: replication_verification

# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import wait_till_total_numbers_match [as 别名]
    def replication_verification(master, bucket_data, replica, test, failed_over=False):
        asserts = []
        rest = RestConnection(master)
        buckets = rest.get_buckets()
        nodes = rest.node_statuses()
        test.log.info(
            "expect {0} / {1} replication ? {2}".format(len(nodes), (1.0 + replica), len(nodes) / (1.0 + replica))
        )
        if len(nodes) / (1.0 + replica) >= 1:
            test.assertTrue(
                RebalanceHelper.wait_for_replication(rest.get_nodes(), timeout=300),
                msg="replication did not complete after 5 minutes",
            )
            # run expiry_pager on all nodes before doing the replication verification
            for bucket in buckets:
                ClusterOperationHelper.set_expiry_pager_sleep_time(master, bucket.name)
                test.log.info("wait for expiry pager to run on all these nodes")
                time.sleep(30)
                ClusterOperationHelper.set_expiry_pager_sleep_time(master, bucket.name, 3600)
                ClusterOperationHelper.set_expiry_pager_sleep_time(master, bucket.name)
                replica_match = RebalanceHelper.wait_till_total_numbers_match(
                    bucket=bucket.name, master=master, timeout_in_seconds=300
                )
                if not replica_match:
                    asserts.append("replication was completed but sum(curr_items) dont match the curr_items_total")
                if not failed_over:
                    stats = rest.get_bucket_stats(bucket=bucket.name)
                    RebalanceHelper.print_taps_from_all_nodes(rest, bucket.name)
                    msg = "curr_items : {0} is not equal to actual # of keys inserted : {1}"
                    active_items_match = stats["curr_items"] == bucket_data[bucket.name]["items_inserted_count"]
                    if not active_items_match:
                        #                        asserts.append(
                        test.log.error(
                            msg.format(stats["curr_items"], bucket_data[bucket.name]["items_inserted_count"])
                        )

        if len(asserts) > 0:
            for msg in asserts:
                test.log.error(msg)
            test.assertTrue(len(asserts) == 0, msg=asserts)
开发者ID:DavidAlphaFox,项目名称:couchbase,代码行数:42,代码来源:rebalancingtests.py

示例10: _test_body

# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import wait_till_total_numbers_match [as 别名]
 def _test_body(self, fill_ram_percentage, number_of_replicas):
     master = self.servers[0]
     self._verify_minimum_requirement(number_of_replicas)
     self._cleanup_cluster()
     self.log.info('cluster is setup')
     bucket_name =\
     'replica-{0}-ram-{1}-{2}'.format(number_of_replicas,
                                      fill_ram_percentage,
                                      uuid.uuid4())
     self._create_bucket(number_of_replicas=number_of_replicas, bucket_name=bucket_name)
     self.log.info('created the bucket')
     distribution = RebalanceBaseTest.get_distribution(fill_ram_percentage)
     self.add_nodes_and_rebalance()
     self.log.info('loading more data into the bucket')
     inserted_keys, rejected_keys =\
     MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[master],
                                                           name=self.bucket_name,
                                                           ram_load_ratio=fill_ram_percentage,
                                                           value_size_distribution=distribution,
                                                           number_of_threads=2,
                                                           write_only=True,
                                                           moxi=False)
     self.keys = inserted_keys
     self.log.info('updating all keys by appending _20 to each value')
     self._update_keys('20')
     self.log.info('verifying keys now...._20')
     self._verify_data('20')
     rest = RestConnection(self.servers[0])
     self.assertTrue(RestHelper(rest).wait_for_replication(180),
                     msg="replication did not complete")
     replicated = RebalanceHelper.wait_till_total_numbers_match(master, self.bucket_name, 300)
     self.assertTrue(replicated, msg="replication was completed but sum(curr_items) dont match the curr_items_total")
     self.log.info('updating all keys by appending _30 to each value')
     self._update_keys('30')
     self.log.info('verifying keys now...._20')
     self._verify_data('30')
     #flushing the node before cleaup
     MemcachedClientHelper.flush_bucket(self.servers[0], self.bucket_name)
开发者ID:steveyen,项目名称:testrunner,代码行数:40,代码来源:replicationtests.py

示例11: RestConnection

# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import wait_till_total_numbers_match [as 别名]
server.rest_username = 'Administrator'
server.rest_password = 'password'
server.port = 9000
rest = RestConnection(server)
nodes = rest.node_statuses()

vm = VBucketAwareMemcached(rest,{"name":"bucket-0","password":""})
key = str(uuid.uuid4())
vm.memcached(key).set(key, 0, 0, "hi")
vm.memcached(key).get(key)

RebalanceHelper.print_taps_from_all_nodes(rest,bucket="bucket-0",password="")
RebalanceHelper.verify_items_count(server,"bucket-0")
RebalanceHelper.verify_items_count(server,"bucket-1")
RebalanceHelper.verify_items_count(server,"bucket-2")
RebalanceHelper.wait_till_total_numbers_match(server,"bucket-0",120,"")


cm = MemcachedClientHelper.proxy_client(server, "bucket-0", "")
key = str(uuid.uuid4())
cm.set(key, 0, 0, "hi")
cm.get(key)



cm1 = MemcachedClientHelper.direct_client(server, "default", "")
key = str(uuid.uuid4())
cm1.set(key, 0, 0, "hi")
cm1.get(key)

开发者ID:Boggypop,项目名称:testrunner,代码行数:31,代码来源:awareness.py

示例12: common_test_body

# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import wait_till_total_numbers_match [as 别名]
    def common_test_body(self, replica, failover_reason, load_ratio, age, max_nodes):
        log = logger.Logger.get_logger()
        bucket_name = "default"
        log.info("replica : {0}".format(replica))
        log.info("failover_reason : {0}".format(failover_reason))
        log.info("load_ratio : {0}".format(load_ratio))
        log.info("age : {0}".format(age))
        log.info("max_nodes : {0}".format(max_nodes))
        master = self._servers[0]
        log.info('picking server : {0} as the master'.format(master))
        rest = RestConnection(master)
        info = rest.get_nodes_self()
        rest.init_cluster(username=master.rest_username,
                          password=master.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
        rest.update_autofailover_settings(True, age, max_nodes)
        rest.reset_autofailover()
        bucket_ram = info.memoryQuota * 2 / 3
        rest.create_bucket(bucket=bucket_name,
                           ramQuotaMB=bucket_ram,
                           replicaNumber=replica,
                           proxyPort=info.moxi)
        ready = BucketOperationHelper.wait_for_memcached(master, bucket_name)
        self.assertTrue(ready, "wait_for_memcached failed")

        credentials = self._input.membase_settings

        log.info("inserting some items in the master before adding any nodes")
        distribution = {512: 0.4, 1 * 1024: 0.59, 5 * 1024: 0.01}
        if load_ratio > 10:
            distribution = {5 * 1024: 0.4, 10 * 1024: 0.5, 20 * 1024: 0.1}

        ClusterOperationHelper.add_all_nodes_or_assert(master, self._servers, credentials, self)
        nodes = rest.node_statuses()
        rest.rebalance(otpNodes=[node.id for node in nodes], ejectedNodes=[])
        msg = "rebalance failed after adding these nodes {0}".format(nodes)
        self.assertTrue(rest.monitorRebalance(), msg=msg)

        inserted_count, rejected_count =\
        MemcachedClientHelper.load_bucket(servers=self._servers,
                                          ram_load_ratio=load_ratio,
                                          value_size_distribution=distribution,
                                          number_of_threads=1)
        log.info('inserted {0} keys'.format(inserted_count))
        nodes = rest.node_statuses()
        # why are we in this while loop?
        while (len(nodes) - replica) >= 1:
            final_replication_state = RestHelper(rest).wait_for_replication(900)
            msg = "replication state after waiting for up to 15 minutes : {0}"
            self.log.info(msg.format(final_replication_state))
            chosen = AutoFailoverBaseTest.choose_nodes(master, nodes, replica)
            for node in chosen:
                #let's do op
                if failover_reason == 'stop_membase':
                    self.stop_membase(node)
                    log.info("10 seconds delay to wait for membase-server to shutdown")
                    #wait for 5 minutes until node is down
                    self.assertTrue(RestHelper(rest).wait_for_node_status(node, "unhealthy", 300),
                                    msg="node status is not unhealthy even after waiting for 5 minutes")
                elif failover_reason == "firewall":
                    self.enable_firewall(node)
                    self.assertTrue(RestHelper(rest).wait_for_node_status(node, "unhealthy", 300),
                                    msg="node status is not unhealthy even after waiting for 5 minutes")
            # list pre-autofailover stats
            stats = rest.get_bucket_stats()
            self.log.info("pre-autofail - curr_items : {0} versus {1}".format(stats["curr_items"], inserted_count))
            AutoFailoverBaseTest.wait_for_failover_or_assert(master, replica, age, self)

            # manually fail over any unhealthy:active nodes left, max that we should need to manually failover is replica-max_nodes
            manual_failover_count = replica - max_nodes
            for node in chosen:
                self.log.info("checking {0}".format(node.ip))
                if node.status.lower() == "unhealthy" and node.clusterMembership == "active":
                    msg = "node {0} not failed over and we are over out manual failover limit of {1}"
                    self.assertTrue(manual_failover_count > 0, msg.format(node.ip, (replica - max_nodes)))
                    self.log.info("manual failover {0}".format(node.ip))
                    rest.fail_over(node.id)
                    manual_failover_count -= 1

            stats = rest.get_bucket_stats()
            self.log.info("post-autofail - curr_items : {0} versus {1}".format(stats["curr_items"], inserted_count))
            self.assertTrue(stats["curr_items"] == inserted_count, "failover completed but curr_items ({0}) does not match inserted items ({1})".format(stats["curr_items"], inserted_count))

            log.info("10 seconds sleep after autofailover before invoking rebalance...")
            time.sleep(10)
            rest.rebalance(otpNodes=[node.id for node in nodes],
                           ejectedNodes=[node.id for node in chosen])
            msg="rebalance failed while removing failover nodes {0}".format(chosen)
            self.assertTrue(rest.monitorRebalance(), msg=msg)

            nodes = rest.node_statuses()
            if len(nodes) / (1 + replica) >= 1:
                final_replication_state = RestHelper(rest).wait_for_replication(900)
                msg = "replication state after waiting for up to 15 minutes : {0}"
                self.log.info(msg.format(final_replication_state))
                self.assertTrue(RebalanceHelper.wait_till_total_numbers_match(master,bucket_name,600),
                                msg="replication was completed but sum(curr_items) dont match the curr_items_total")

                start_time = time.time()
                stats = rest.get_bucket_stats()
#.........这里部分代码省略.........
开发者ID:vmx,项目名称:testrunner,代码行数:103,代码来源:autofailovertests.py


注:本文中的membase.helper.rebalance_helper.RebalanceHelper.wait_till_total_numbers_match方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。