当前位置: 首页>>代码示例>>Python>>正文


Python RestConnection.stop_rebalance方法代码示例

本文整理汇总了Python中membase.api.rest_client.RestConnection.stop_rebalance方法的典型用法代码示例。如果您正苦于以下问题:Python RestConnection.stop_rebalance方法的具体用法?Python RestConnection.stop_rebalance怎么用?Python RestConnection.stop_rebalance使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在membase.api.rest_client.RestConnection的用法示例。


在下文中一共展示了RestConnection.stop_rebalance方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: restart_cbrecover_multiple_failover_swapout_reb_routine

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import stop_rebalance [as 别名]
    def restart_cbrecover_multiple_failover_swapout_reb_routine(self):
        self.common_preSetup()
        when_step = self._input.param("when_step", "recovery_when_rebalance")
        if self._failover is not None:
            if "source" in self._failover:
                rest = RestConnection(self.src_master)
                if self._default_bucket:
                    self.initial_node_count = len(self.src_nodes)
                    self.vbucket_map_before = rest.fetch_vbucket_map()  # JUST FOR DEFAULT BUCKET AS OF NOW
                if self._failover_count >= len(self.src_nodes):
                    raise Exception("Won't failover .. count exceeds available servers on source : SKIPPING TEST")
                if len(self._floating_servers_set) < self._add_count:
                    raise Exception("Not enough spare nodes available, to match the failover count : SKIPPING TEST")
                self.log.info("Failing over {0} nodes on source ..".format(self._failover_count))
                self.failed_nodes = self.src_nodes[(len(self.src_nodes) - self._failover_count):len(self.src_nodes)]
                self.cluster.failover(self.src_nodes, self.failed_nodes)
                for node in self.failed_nodes:
                    self.src_nodes.remove(node)
                add_nodes = self._floating_servers_set[0:self._add_count]
                for node in add_nodes:
                    rest.add_node(user=node.rest_username, password=node.rest_password, remoteIp=node.ip, port=node.port)
                self.src_nodes.extend(add_nodes)
                self.sleep(self.wait_timeout / 4)
                # CALL THE CBRECOVERY ROUTINE WITHOUT WAIT FOR COMPLETED
                self.cbr_routine(self.dest_master, self.src_master, False)

                if "create_bucket_when_recovery" in when_step:
                     name = 'standard_bucket'
                     try:
                         self.cluster.create_standard_bucket(self.src_master, name, STANDARD_BUCKET_PORT + 10, 100, 1)
                     except BucketCreationException, e:
                         self.log.info("bucket creation failed during cbrecovery as expected")
                     # but still able to create bucket on destination
                     self.cluster.create_standard_bucket(self.dest_master, name, STANDARD_BUCKET_PORT + 10, 100, 1)
                     # here we try to re-call cbrecovery(seems it's supported even it's still running)
                     # if recovery fast(=completed) we can get "No recovery needed"
                     self.cbr_routine(self.dest_master, self.src_master)
                elif "recovery_when_rebalance" in when_step:
                    rest.remove_all_recoveries()
                    self.trigger_rebalance(rest, 15)
                    try:
                        self.cbr_routine(self.dest_master, self.src_master)
                        self.log.exception("cbrecovery should be failed when rebalance is in progress")
                    except CBRecoveryFailedException, e:
                        self.log.info("cbrecovery failed  as expected when there are no failovered nodes")
                    reached = RestHelper(rest).rebalance_reached()
                    self.assertTrue(reached, "rebalance failed or did not completed")
                    if self._replication_direction_str == "unidirection":
                        self.log.warn("we expect data lost on source cluster with unidirection replication")
                        self.log.warn("verification data will be skipped")
                        return
                elif "recovery_when_rebalance_stopped" in when_step:
                    rest.remove_all_recoveries()
                    self.trigger_rebalance(rest, 15)
                    rest.stop_rebalance()
                    try:
                        self.cbr_routine(self.dest_master, self.src_master)
                        self.log.exception("cbrecovery should be failed when rebalance has been stopped")
                    except CBRecoveryFailedException, e:
                        self.log.info("cbrecovery failed  as expected when there are no failovered nodes")
开发者ID:abhinavdangeti,项目名称:testrunner,代码行数:62,代码来源:cbRecoverytests.py

示例2: cleanup

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import stop_rebalance [as 别名]
 def cleanup(self):
     rest = RestConnection(self.master)
     rest.stop_rebalance()
     BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
     for server in self.servers:
         ClusterOperationHelper.cleanup_cluster([server])
     ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
开发者ID:Boggypop,项目名称:testrunner,代码行数:9,代码来源:moxi.py

示例3: common_setup

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import stop_rebalance [as 别名]
    def common_setup(self):
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        serverInfo = self.servers[0]
        rest = RestConnection(serverInfo)

        # Clear the state from Previous invalid run
        rest.stop_rebalance()
        self.load_started = False
        self.loaders = []
        SwapRebalanceBase.common_tearDown(self)

        # Initialize test params
        self.replica  = self.input.param("replica", 1)
        self.keys_count = self.input.param("keys-count", 100000)
        self.load_ratio = self.input.param("load-ratio", 1)
        self.num_buckets = self.input.param("num-buckets", 1)
        self.failover_factor = self.num_swap = self.input.param("num-swap", 1)
        self.num_initial_servers = self.input.param("num-initial-servers", 3)
        self.fail_orchestrator = self.swap_orchestrator = self.input.param("swap-orchestrator", False)

        # Make sure the test is setup correctly
        min_servers = int(self.num_initial_servers) + int(self.num_swap)
        msg = "minimum {0} nodes required for running swap rebalance"
        self.assertTrue(len(self.servers) >= min_servers,
            msg=msg.format(min_servers))

        self.log.info('picking server : {0} as the master'.format(serverInfo))
        node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
        info = rest.get_nodes_self()
        rest.init_cluster(username=serverInfo.rest_username, password=serverInfo.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
开发者ID:steveyen,项目名称:testrunner,代码行数:35,代码来源:swaprebalance.py

示例4: tearDown

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import stop_rebalance [as 别名]
 def tearDown(self):
     master = self.servers[0]
     ClusterOperationHelper.set_vbuckets(master, self.old_vbuckets)
     rest = RestConnection(master)
     rest.stop_rebalance()
     self.cluster.rebalance(self.servers[:self.num_servers], [],
                            self.servers[1:self.num_servers])
     self.cluster.bucket_delete(master, self.bucket)
     self.cluster.shutdown()
开发者ID:IrynaMironava,项目名称:testrunner,代码行数:11,代码来源:checkpoint.py

示例5: common_setup

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import stop_rebalance [as 别名]
    def common_setup(self):
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        master = self.servers[0]
        rest = RestConnection(master)

        # Cleanup previous state
        self.task_manager = None
        rest.stop_rebalance()
        RebalanceBaseTest.reset(self)

        # Initialize test params
        self.replica = self.input.param("replica", 1)

        # By default we use keys-count for LoadTask
        # Use keys-count=-1 to use load-ratio
        self.keys_count = self.input.param("keys-count", 30000)
        self.load_ratio = self.input.param("load-ratio", 6)
        self.expiry_ratio = self.input.param("expiry-ratio", 0.1)
        self.delete_ratio = self.input.param("delete-ratio", 0.1)
        self.access_ratio = self.input.param("access-ratio", 0.8)
        self.num_buckets = self.input.param("num-buckets", 1)
        self.num_rebalance = self.input.param("num-rebalance", 1)
        self.do_ascii = self.input.param("ascii", False)
        self.do_verify = self.input.param("do-verify", True)
        self.repeat = self.input.param("repeat", 1)
        self.max_ops_per_second = self.input.param("max_ops_per_second", 500)
        self.min_item_size = self.input.param("min_item_size", 128)
        self.do_stop = self.input.param("do-stop", False)
        self.skip_cleanup = self.input.param("skip-cleanup", False)

        self.checkResidentRatio = self.input.param("checkResidentRatio", False)
        self.activeRatio = self.input.param("activeRatio", 50)
        self.replicaRatio = self.input.param("replicaRatio", 50)
        self.case_number = self.input.param("case_number", 0)

        self.log.info('picking server : {0} as the master'.format(master))

        node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
        info = rest.get_nodes_self()
        rest.init_cluster(username=master.rest_username,
            password=master.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
        BucketOperationHelper.create_multiple_buckets(master, self.replica, node_ram_ratio * (2.0 / 3.0),
                howmany=self.num_buckets, sasl=not self.do_ascii)
        buckets = rest.get_buckets()
        for bucket in buckets:
            ready = BucketOperationHelper.wait_for_memcached(master, bucket.name)
            self.assertTrue(ready, "wait_for_memcached failed")

        # Initialize and start the taskManager
        self.task_manager = taskmanager.TaskManager()
        self.task_manager.start()
开发者ID:ronniedada,项目名称:testrunner,代码行数:56,代码来源:rebalancetests.py

示例6: stop_rebalance

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import stop_rebalance [as 别名]
 def stop_rebalance(self, task_manager):
     rest = RestConnection(self.servers[0])
     try:
         rest.stop_rebalance()
         # We don't want to start rebalance immediately
         self.log.info("Rebalance Stopped, sleep for 20 secs")
         time.sleep(20)
         self.do_stop = False
         self.state = "start_rebalance"
         task_manager.schedule(self)
     except Exception as e:
         self.state = "finishing"
         self.set_result({"status": "error", "value": e})
开发者ID:jchris,项目名称:testrunner,代码行数:15,代码来源:task.py

示例7: common_setup

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import stop_rebalance [as 别名]
    def common_setup(self):
        self.cluster_helper = Cluster()
        self.log = logger.Logger.get_logger()
        self.cluster_run = False
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        serverInfo = self.servers[0]
        rest = RestConnection(serverInfo)
        if len(set([server.ip for server in self.servers])) == 1:
            ip = rest.get_nodes_self().ip
            for server in self.servers:
                server.ip = ip
            self.cluster_run = True
        self.case_number = self.input.param("case_number", 0)
        self.replica = self.input.param("replica", 1)
        self.keys_count = self.input.param("keys-count", 1000)
        self.load_ratio = self.input.param("load-ratio", 1)
        self.ratio_expiry = self.input.param("ratio-expiry", 0.03)
        self.ratio_deletes = self.input.param("ratio-deletes", 0.13)
        self.num_buckets = self.input.param("num-buckets", 1)
        self.failover_factor = self.num_swap = self.input.param("num-swap", 1)
        self.num_initial_servers = self.input.param("num-initial-servers", 3)
        self.fail_orchestrator = self.swap_orchestrator = self.input.param("swap-orchestrator", False)
        self.do_access = self.input.param("do-access", True)
        self.load_started = False
        self.loaders = []
        try:
            # Clear the state from Previous invalid run
            if rest._rebalance_progress_status() == "running":
                self.log.warning("rebalancing is still running, previous test should be verified")
                stopped = rest.stop_rebalance()
                self.assertTrue(stopped, msg="unable to stop rebalance")
            self.log.info(
                "==============  SwapRebalanceBase setup was started for test #{0} {1}==============".format(
                    self.case_number, self._testMethodName
                )
            )
            SwapRebalanceBase.reset(self)

            # Make sure the test is setup correctly
            min_servers = int(self.num_initial_servers) + int(self.num_swap)
            msg = "minimum {0} nodes required for running swap rebalance"
            self.assertTrue(len(self.servers) >= min_servers, msg=msg.format(min_servers))

            self.log.info("picking server : {0} as the master".format(serverInfo))
            node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
            info = rest.get_nodes_self()
            rest.init_cluster(username=serverInfo.rest_username, password=serverInfo.rest_password)
            rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
            if self.num_buckets > 10:
                BaseTestCase.change_max_buckets(self, self.num_buckets)
            self.log.info(
                "==============  SwapRebalanceBase setup was finished for test #{0} {1} ==============".format(
                    self.case_number, self._testMethodName
                )
            )
            SwapRebalanceBase._log_start(self)
        except Exception, e:
            self.cluster_helper.shutdown()
            self.fail(e)
开发者ID:pkdevboxy,项目名称:testrunner,代码行数:62,代码来源:swaprebalance.py

示例8: reset

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import stop_rebalance [as 别名]
 def reset(self):
     self.log.info(
         "==============  SwapRebalanceBase cleanup was started for test #{0} {1} ==============".format(
             self.case_number, self._testMethodName
         )
     )
     self.log.info("Stopping load in Teardown")
     SwapRebalanceBase.stop_load(self.loaders)
     for server in self.servers:
         rest = RestConnection(server)
         if rest._rebalance_progress_status() == "running":
             self.log.warning("rebalancing is still running, test should be verified")
             stopped = rest.stop_rebalance()
             self.assertTrue(stopped, msg="unable to stop rebalance")
     BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
     for server in self.servers:
         ClusterOperationHelper.cleanup_cluster([server])
         if server.data_path:
             rest = RestConnection(server)
             rest.set_data_path(data_path=server.data_path)
     ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
     self.log.info(
         "==============  SwapRebalanceBase cleanup was finished for test #{0} {1} ==============".format(
             self.case_number, self._testMethodName
         )
     )
开发者ID:jason-hou,项目名称:testrunner,代码行数:28,代码来源:swaprebalance.py

示例9: tearDown

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import stop_rebalance [as 别名]
 def tearDown(self):
         try:
             if (hasattr(self, '_resultForDoCleanups') and len(self._resultForDoCleanups.failures) > 0 \
                 and TestInputSingleton.input.param("stop-on-failure", False))\
                     or self.input.param("skip_cleanup", False):
                 self.log.warn("CLEANUP WAS SKIPPED")
             else:
                 self.log.info("==============  basetestcase cleanup was started for test #{0} {1} =============="\
                       .format(self.case_number, self._testMethodName))
                 rest = RestConnection(self.master)
                 alerts = rest.get_alerts()
                 if alerts is not None and len(alerts) != 0:
                     self.log.warn("Alerts were found: {0}".format(alerts))
                 if rest._rebalance_progress_status() == 'running':
                     self.log.warning("rebalancing is still running, test should be verified")
                     stopped = rest.stop_rebalance()
                     self.assertTrue(stopped, msg="unable to stop rebalance")
                 BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
                 ClusterOperationHelper.cleanup_cluster(self.servers)
                 self.sleep(10)
                 ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
                 self.log.info("==============  basetestcase cleanup was finished for test #{0} {1} =============="\
                       .format(self.case_number, self._testMethodName))
         except BaseException:
             # increase case_number to retry tearDown in setup for the next test
             self.case_number += 1000
         finally:
             # stop all existing task manager threads
             self.cluster.shutdown()
             self._log_finish(self)
开发者ID:strategist922,项目名称:testrunner,代码行数:32,代码来源:basetestcase.py

示例10: tearDown

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import stop_rebalance [as 别名]
 def tearDown(self):
     try:
         test_failed = len(self._resultForDoCleanups.errors)
         if self.driver and test_failed:
             BaseHelper(self).create_screenshot()
         if self.driver:
             self.driver.close()
         if test_failed and TestInputSingleton.input.param("stop-on-failure", False):
             print "test fails, teardown will be skipped!!!"
             return
         rest = RestConnection(self.servers[0])
         try:
             reb_status = rest._rebalance_progress_status()
         except ValueError as e:
             if e.message == 'No JSON object could be decoded':
                 print "cluster not initialized!!!"
                 return
         if reb_status == 'running':
             stopped = rest.stop_rebalance()
             self.assertTrue(stopped, msg="unable to stop rebalance")
         BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
         for server in self.servers:
             ClusterOperationHelper.cleanup_cluster([server])
         ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
     except Exception as e:
         raise e
     finally:
         if self.driver:
             self.shell.disconnect()
开发者ID:arod1987,项目名称:testrunner,代码行数:31,代码来源:uibasetest.py

示例11: test_start_stop_rebalance_with_mutations

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import stop_rebalance [as 别名]
    def test_start_stop_rebalance_with_mutations(self):
        """
            Start-stop rebalance in/out with adding/removing aditional after stopping rebalance with data mutations
            in background.

            This test begins by loading a given number of items into the cluster. It then
            add  servs_in nodes and remove  servs_out nodes and start rebalance. Then rebalance
            is stopped when its progress reached 20%. After we add  extra_nodes_in and remove
            extra_nodes_out. Restart rebalance with new cluster configuration. Later rebalance
            will be stop/restart on progress 40/60/80%.Before each iteration, we start data mutations
            and end the mutations before data validations. After each iteration we wait for
            the disk queues to drain, and then verify that there has been no data loss,
            sum(curr_items) match the curr_items_total. Once cluster was rebalanced the test is finished.
            The oder of add/remove nodes looks like:
            self.nodes_init|servs_in|extra_nodes_in|extra_nodes_out|servs_out
            """
        rest = RestConnection(self.master)
        self._wait_for_stats_all_buckets(self.servs_init)
        self.log.info("Current nodes : {0}".format([node.id for node in rest.node_statuses()]))
        self.log.info("Adding nodes {0} to cluster".format(self.servs_in))
        self.log.info("Removing nodes {0} from cluster".format(self.servs_out))
        add_in_once = self.extra_servs_in
        result_nodes = set(self.servs_init + self.servs_in) - set(self.servs_out)
        # the last iteration will be with i=5, for this case rebalance should be completed,
        # that also is verified and tracked
        for i in range(1, 6):
            if self.withMutationOps:
                tasks = self._async_load_all_buckets(self.master, self.gen_update, "update", 0)
            if i == 1:
                rebalance = self.cluster.async_rebalance(self.servs_init[:self.nodes_init], self.servs_in,
                                                         self.servs_out)
            else:
                rebalance = self.cluster.async_rebalance(self.servs_init[:self.nodes_init] + self.servs_in, add_in_once,
                                                         self.servs_out + self.extra_servs_out)
                add_in_once = []
                result_nodes = set(self.servs_init + self.servs_in + self.extra_servs_in) - set(
                    self.servs_out + self.extra_servs_out)
            self.sleep(20)
            expected_progress = 20 * i
            reached = RestHelper(rest).rebalance_reached(expected_progress)
            self.assertTrue(reached, "Rebalance failed or did not reach {0}%".format(expected_progress))
            if not RestHelper(rest).is_cluster_rebalanced():
                self.log.info("Stop the rebalance")
                stopped = rest.stop_rebalance(wait_timeout=self.wait_timeout / 3)
                self.assertTrue(stopped, msg="Unable to stop rebalance")
                if self.withMutationOps:
                    for tasks in tasks:
                        tasks.result(self.wait_timeout * 20)
                self.sleep(5)
            rebalance.result()
            if RestHelper(rest).is_cluster_rebalanced():
                self.verify_cluster_stats(result_nodes)
                self.log.info(
                    "Rebalance was completed when tried to stop rebalance on {0}%".format(str(expected_progress)))
                break
            else:
                self.log.info("Rebalance is still required. Verifying the data in the buckets")
                self._verify_all_buckets(self.master, timeout=None, max_verify=self.max_verify, batch_size=1)
                self.verify_cluster_stats(result_nodes, check_bucket_stats=False, verify_total_items=False)
        self.verify_unacked_bytes_all_buckets()
开发者ID:arod1987,项目名称:testrunner,代码行数:62,代码来源:rebalance_start_stop.py

示例12: tearDown

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import stop_rebalance [as 别名]
 def tearDown(self):
     try:
         if self.driver:
             path_screen = self.input.ui_conf['screenshots'] or 'logs/screens'
             full_path = '{1}/screen_{0}.png'.format(time.time(), path_screen)
             self.log.info('screenshot is available: %s' % full_path)
             if not os.path.exists(path_screen):
                 os.mkdir(path_screen)
             self.driver.get_screenshot_as_file(os.path.abspath(full_path))
         rest = RestConnection(self.servers[0])
         if rest._rebalance_progress_status() == 'running':
             stopped = rest.stop_rebalance()
             self.assertTrue(stopped, msg="unable to stop rebalance")
         BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
         for server in self.servers:
             ClusterOperationHelper.cleanup_cluster([server])
         ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
         if self.driver:
             self.driver.close()
     except Exception as e:
         raise e
     finally:
         if self.driver:
             self.shell.disconnect()
         self.cluster.shutdown()
开发者ID:DavidAlphaFox,项目名称:couchbase,代码行数:27,代码来源:uibasetest.py

示例13: _common_clenup

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import stop_rebalance [as 别名]
 def _common_clenup(self):
     rest = RestConnection(self.servers[0])
     if rest._rebalance_progress_status() == 'running':
         stopped = rest.stop_rebalance()
         self.assertTrue(stopped, msg="unable to stop rebalance")
     BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
     ClusterOperationHelper.cleanup_cluster(self.servers)
     ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
开发者ID:abhinavdangeti,项目名称:testrunner,代码行数:10,代码来源:viewtests.py

示例14: test_start_stop_rebalance_in_out

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import stop_rebalance [as 别名]
    def test_start_stop_rebalance_in_out(self):
        """
        Start-stop rebalance in/out with adding/removing aditional after stopping rebalance.

        This test begins by loading a given number of items into the cluster. It then
        add  servs_in nodes and remove  servs_out nodes and start rebalance. Then rebalance
        is stopped when its progress reached 20%. After we add  extra_nodes_in and remove
        extra_nodes_out. Restart rebalance with new cluster configuration. Later rebalance
        will be stop/restart on progress 40/60/80%. After each iteration we wait for
        the disk queues to drain, and then verify that there has been no data loss,
        sum(curr_items) match the curr_items_total. Once cluster was rebalanced the test is finished.
        The oder of add/remove nodes looks like:
        self.nodes_init|servs_in|extra_nodes_in|extra_nodes_out|servs_out
        """
        extra_nodes_in = self.input.param("extra_nodes_in", 0)
        extra_nodes_out = self.input.param("extra_nodes_out", 0)
        servs_init = self.servers[: self.nodes_init]
        servs_in = [self.servers[i + self.nodes_init] for i in range(self.nodes_in)]
        servs_out = [self.servers[self.nodes_init - i - 1] for i in range(self.nodes_out)]
        extra_servs_in = [self.servers[i + self.nodes_init + self.nodes_in] for i in range(extra_nodes_in)]
        extra_servs_out = [self.servers[self.nodes_init - i - 1 - self.nodes_out] for i in range(extra_nodes_out)]
        rest = RestConnection(self.master)
        self._wait_for_stats_all_buckets(servs_init)
        self.log.info("current nodes : {0}".format([node.id for node in rest.node_statuses()]))
        self.log.info("adding nodes {0} to cluster".format(servs_in))
        self.log.info("removing nodes {0} from cluster".format(servs_out))
        add_in_once = extra_servs_in
        result_nodes = set(servs_init + servs_in) - set(servs_out)
        # the latest iteration will be with i=5, for this case rebalance should be completed,
        # that also is verified and tracked
        for i in range(1, 6):
            if i == 1:
                rebalance = self.cluster.async_rebalance(servs_init[: self.nodes_init], servs_in, servs_out)
            else:
                rebalance = self.cluster.async_rebalance(
                    servs_init[: self.nodes_init] + servs_in, add_in_once, servs_out + extra_servs_out
                )
                add_in_once = []
                result_nodes = set(servs_init + servs_in + extra_servs_in) - set(servs_out + extra_servs_out)
            self.sleep(20)
            expected_progress = 20 * i
            reached = RestHelper(rest).rebalance_reached(expected_progress)
            self.assertTrue(reached, "rebalance failed or did not reach {0}%".format(expected_progress))
            if not RestHelper(rest).is_cluster_rebalanced():
                stopped = rest.stop_rebalance(wait_timeout=self.wait_timeout / 3)
                self.assertTrue(stopped, msg="unable to stop rebalance")
            rebalance.result()
            if RestHelper(rest).is_cluster_rebalanced():
                self.verify_cluster_stats(result_nodes)
                self.log.info(
                    "rebalance was completed when tried to stop rebalance on {0}%".format(str(expected_progress))
                )
                break
            else:
                self.log.info("rebalance is still required")
                self._verify_all_buckets(self.master, timeout=None, max_verify=self.max_verify, batch_size=1)
        self.verify_unacked_bytes_all_buckets()
开发者ID:pkdevboxy,项目名称:testrunner,代码行数:59,代码来源:rebalanceinout.py

示例15: tearDown

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import stop_rebalance [as 别名]
 def tearDown(self):
     rest = RestConnection(self.master)
     if rest._rebalance_progress_status() == 'running':
         self.log.warning("rebalancing is still running, test should be verified")
         stopped = rest.stop_rebalance()
         self.assertTrue(stopped, msg="unable to stop rebalance")
     try:
         super(ConcurrentTests, self).tearDown()
     except:
         pass
开发者ID:EricACooper,项目名称:testrunner,代码行数:12,代码来源:tuq_concurrent.py


注:本文中的membase.api.rest_client.RestConnection.stop_rebalance方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。