当前位置: 首页>>代码示例>>Python>>正文


Python ClusterOperationHelper.find_orchestrator方法代码示例

本文整理汇总了Python中membase.helper.cluster_helper.ClusterOperationHelper.find_orchestrator方法的典型用法代码示例。如果您正苦于以下问题:Python ClusterOperationHelper.find_orchestrator方法的具体用法?Python ClusterOperationHelper.find_orchestrator怎么用?Python ClusterOperationHelper.find_orchestrator使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在membase.helper.cluster_helper.ClusterOperationHelper的用法示例。


在下文中一共展示了ClusterOperationHelper.find_orchestrator方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: online_upgrade

# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import find_orchestrator [as 别名]
    def online_upgrade(self):
        servers_in = self.servers[self.nodes_init:self.num_servers]
        self.cluster.rebalance(self.servers[:self.nodes_init], servers_in, [])
        self.log.info("Rebalance in all {0} nodes" \
                       .format(self.input.param("upgrade_version", "")))
        self.sleep(self.sleep_time)
        status, content = ClusterOperationHelper.find_orchestrator(self.master)
        self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
                        format(status, content))
        FIND_MASTER = False
        for new_server in servers_in:
            if content.find(new_server.ip) >= 0:
                self._new_master(new_server)
                FIND_MASTER = True
                self.log.info("%s node %s becomes the master" \
                    % (self.input.param("upgrade_version", ""), new_server.ip))
                break
        if self.input.param("initial_version", "")[:5] in COUCHBASE_VERSION_2 \
            and not FIND_MASTER and not self.is_downgrade:
            raise Exception( \
                "After rebalance in {0} nodes, {0} node doesn't become master" \
                .format(self.input.param("upgrade_version", "")))

        servers_out = self.servers[:self.nodes_init]
        self.log.info("Rebalanced out all old version nodes")
        self.cluster.rebalance(self.servers[:self.num_servers], [], servers_out)
开发者ID:DavidAlphaFox,项目名称:couchbase,代码行数:28,代码来源:newupgradetests.py

示例2: online_upgrade_rebalance_in_out

# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import find_orchestrator [as 别名]
    def online_upgrade_rebalance_in_out(self):
        self._install(self.servers[: self.initial_num_servers])
        self.operations(multi_nodes=True)
        self.log.info("Installation of old version is done. Wait for %s sec for upgrade" % (self.sleep_time))
        time.sleep(self.sleep_time)
        upgrade_version = self.input.param("upgrade_version", "2.0.0-1870-rel")
        self.initial_version = upgrade_version
        self.product = "couchbase-server"
        self._install(self.servers[self.initial_num_servers : self.num_servers])
        self.log.info("Installation of new version is done. Wait for %s sec for rebalance" % (self.sleep_time))
        time.sleep(self.sleep_time)

        servers_in = self.servers[self.initial_num_servers : self.num_servers]
        self.cluster.rebalance(self.servers[: self.initial_num_servers], servers_in, [])
        self.log.info("Rebalance in all 2.0 Nodes")
        time.sleep(self.sleep_time)
        status, content = ClusterHelper.find_orchestrator(self.master)
        self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".format(status, content))
        FIND_MASTER = False
        for new_server in servers_in:
            if content.find(new_server.ip) >= 0:
                FIND_MASTER = True
                self.log.info("2.0 Node %s becomes the master" % (new_server.ip))
        if not FIND_MASTER:
            raise Exception("After rebalance in 2.0 Nodes, 2.0 doesn't become the master")

        servers_out = self.servers[: self.initial_num_servers]
        self.cluster.rebalance(self.servers[: self.num_servers], [], servers_out)
        self.log.info("Rebalance out all old version nodes")
        time.sleep(self.sleep_time)
        self.verify_upgrade_rebalance_in_out()
开发者ID:mschoch,项目名称:testrunner,代码行数:33,代码来源:newupgradetests.py

示例3: online_upgrade_swap_rebalance

# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import find_orchestrator [as 别名]
 def online_upgrade_swap_rebalance(self):
     self._install(self.servers[:self.nodes_init])
     self.operations(self.servers[:self.nodes_init])
     self.sleep(self.sleep_time, "Pre-setup of old version is done. Wait for upgrade")
     self.initial_version = self.upgrade_versions[0]
     self.product = 'couchbase-server'
     self._install(self.servers[self.nodes_init:self.num_servers])
     self.sleep(self.sleep_time, "Installation of new version is done. Wait for rebalance")
     self.swap_num_servers = self.input.param('swap_num_servers', 1)
     old_servers = self.servers[:self.nodes_init]
     new_servers = []
     for i in range(self.nodes_init / self.swap_num_servers):
         servers_in = self.servers[(self.nodes_init + i * self.swap_num_servers):
                                   (self.nodes_init + (i + 1) * self.swap_num_servers)]
         servers_out = self.servers[(i * self.swap_num_servers):((i + 1) * self.swap_num_servers)]
         servers = old_servers + new_servers
         self.log.info("Swap rebalance: rebalance out %s old version nodes, rebalance in %s 2.0 Nodes"
                       % (self.swap_num_servers, self.swap_num_servers))
         self.cluster.rebalance(servers, servers_in, servers_out)
         self.sleep(self.sleep_time)
         old_servers = self.servers[((i + 1) * self.swap_num_servers):self.nodes_init]
         new_servers = new_servers + servers_in
         servers = old_servers + new_servers
         status, content = ClusterOperationHelper.find_orchestrator(servers[0])
         self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
                         format(status, content))
         FIND_MASTER = False
         for new_server in new_servers:
             if content.find(new_server.ip) >= 0:
                 self._new_master(new_server)
                 FIND_MASTER = True
                 self.log.info("2.0 Node %s becomes the master" % (new_server.ip))
         if not FIND_MASTER:
             raise Exception("After rebalance in 2.0 nodes, 2.0 doesn't become the master ")
     self.verification(self.servers[self.nodes_init : self.num_servers])
开发者ID:Boggypop,项目名称:testrunner,代码行数:37,代码来源:newupgradetests.py

示例4: _failover_swap_rebalance

# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import find_orchestrator [as 别名]
    def _failover_swap_rebalance(self):
        master = self.servers[0]
        rest = RestConnection(master)
        creds = self.input.membase_settings
        num_initial_servers = self.num_initial_servers
        intial_severs = self.servers[:num_initial_servers]

        self.log.info("CREATE BUCKET PHASE")
        SwapRebalanceBase.create_buckets(self)

        # Cluster all starting set of servers
        self.log.info("INITIAL REBALANCE PHASE")
        status, servers_rebalanced = RebalanceHelper.rebalance_in(intial_severs, len(intial_severs) - 1)
        self.assertTrue(status, msg="Rebalance was failed")

        self.log.info("DATA LOAD PHASE")
        self.loaders = SwapRebalanceBase.start_load_phase(self, master)

        # Wait till load phase is over
        SwapRebalanceBase.stop_load(self.loaders, do_stop=False)
        self.log.info("DONE LOAD PHASE")

        # Start the swap rebalance
        self.log.info("current nodes : {0}".format(RebalanceHelper.getOtpNodeIds(master)))
        toBeEjectedNodes = RebalanceHelper.pick_nodes(master, howmany=self.failover_factor)
        optNodesIds = [node.id for node in toBeEjectedNodes]
        if self.fail_orchestrator:
            status, content = ClusterOperationHelper.find_orchestrator(master)
            self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
            format(status, content))
            optNodesIds[0] = content

        self.log.info("FAILOVER PHASE")
        # Failover selected nodes
        for node in optNodesIds:
            self.log.info("failover node {0} and rebalance afterwards".format(node))
            rest.fail_over(node)

        new_swap_servers = self.servers[num_initial_servers:num_initial_servers + self.failover_factor]
        for server in new_swap_servers:
            otpNode = rest.add_node(creds.rest_username, creds.rest_password, server.ip)
            msg = "unable to add node {0} to the cluster"
            self.assertTrue(otpNode, msg.format(server.ip))

        if self.fail_orchestrator:
            rest = RestConnection(new_swap_servers[0])
            master = new_swap_servers[0]

        self.log.info("DATA ACCESS PHASE")
        self.loaders = SwapRebalanceBase.start_access_phase(self, master)

        rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()], \
            ejectedNodes=optNodesIds)

        self.assertTrue(rest.monitorRebalance(),
            msg="rebalance operation failed after adding node {0}".format(new_swap_servers))

        SwapRebalanceBase.verification_phase(self, master)
开发者ID:Boggypop,项目名称:testrunner,代码行数:60,代码来源:swaprebalance.py

示例5: online_upgrade_swap_rebalance

# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import find_orchestrator [as 别名]
 def online_upgrade_swap_rebalance(self, services=None):
     servers_in = self.servers[self.nodes_init:self.num_servers]
     self.sleep(self.sleep_time)
     status, content = ClusterOperationHelper.find_orchestrator(self.master)
     self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}". \
                     format(status, content))
     i = 0
     for server_in, service_in in zip(servers_in, services):
         log.info("Swap rebalance nodes")
         self.cluster.rebalance(self.servers[:self.nodes_init], [server_in], [self.servers[i]], [service_in])
         self._new_master(self.servers[self.nodes_init])
         i += 1
开发者ID:ritamcouchbase,项目名称:testrunner,代码行数:14,代码来源:eventing_upgrade.py

示例6: online_upgrade_swap_rebalance

# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import find_orchestrator [as 别名]
    def online_upgrade_swap_rebalance(self):
        self._install(self.servers[: self.nodes_init])
        self.operations(self.servers[: self.nodes_init])
        self.initial_version = self.upgrade_versions[0]
        self.product = "couchbase-server"
        self.sleep(
            self.sleep_time,
            "Pre-setup of old version is done. Wait for online upgrade to {0} version".format(self.initial_version),
        )
        self._install(self.servers[self.nodes_init : self.num_servers])
        self.sleep(self.sleep_time, "Installation of new version is done. Wait for rebalance")
        self.swap_num_servers = self.input.param("swap_num_servers", 1)
        old_servers = self.servers[: self.nodes_init]
        new_vb_nums = RestHelper(RestConnection(self.master))._get_vbuckets(
            old_servers, bucket_name=self.buckets[0].name
        )
        new_servers = []
        for i in range(self.nodes_init / self.swap_num_servers):
            old_vb_nums = copy.deepcopy(new_vb_nums)
            servers_in = self.servers[
                (self.nodes_init + i * self.swap_num_servers) : (self.nodes_init + (i + 1) * self.swap_num_servers)
            ]
            servers_out = self.servers[(i * self.swap_num_servers) : ((i + 1) * self.swap_num_servers)]
            servers = old_servers + new_servers
            self.log.info(
                "Swap rebalance: rebalance out %s old version nodes, rebalance in %s 2.0 Nodes"
                % (self.swap_num_servers, self.swap_num_servers)
            )
            self.cluster.rebalance(servers, servers_in, servers_out)
            self.sleep(self.sleep_time)
            old_servers = self.servers[((i + 1) * self.swap_num_servers) : self.nodes_init]
            new_servers = new_servers + servers_in
            servers = old_servers + new_servers
            new_vb_nums = RestHelper(RestConnection(self.master))._get_vbuckets(
                servers, bucket_name=self.buckets[0].name
            )
            self._verify_vbucket_nums_for_swap(old_vb_nums, new_vb_nums)
            status, content = ClusterOperationHelper.find_orchestrator(servers[0])
            self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".format(status, content))
            FIND_MASTER = False
            for new_server in new_servers:
                if content.find(new_server.ip) >= 0:
                    self._new_master(new_server)
                    FIND_MASTER = True
                    self.log.info("3.0 Node %s becomes the master" % (new_server.ip))
            if not FIND_MASTER:
                raise Exception("After rebalance in 3.0 nodes, 3.0 doesn't become the master ")
        """ verify DCP upgrade in 3.0.0 version """
        self.monitor_dcp_rebalance()

        self.verification(new_servers)
开发者ID:,项目名称:,代码行数:53,代码来源:

示例7: _online_upgrade

# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import find_orchestrator [as 别名]
 def _online_upgrade(self, update_servers, extra_servers, check_newmaster=True):
     self.cluster.rebalance(update_servers + extra_servers, extra_servers, [])
     self.log.info("Rebalance in all 2.0 Nodes")
     self.sleep(self.sleep_time)
     status, content = ClusterOperationHelper.find_orchestrator(update_servers[0])
     self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
                     format(status, content))
     if check_newmaster:
         FIND_MASTER = False
         for new_server in extra_servers:
             if content.find(new_server.ip) >= 0:
                 FIND_MASTER = True
                 self.log.info("2.0 Node %s becomes the master" % (new_server.ip))
                 break
         if not FIND_MASTER:
             raise Exception("After rebalance in 2.0 Nodes, 2.0 doesn't become the master")
     self.log.info("Rebalanced out all old version nodes")
     self.cluster.rebalance(update_servers + extra_servers, [], update_servers)
开发者ID:Boggypop,项目名称:testrunner,代码行数:20,代码来源:upgradeXDCR.py

示例8: online_upgrade_swap_rebalance

# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import find_orchestrator [as 别名]
    def online_upgrade_swap_rebalance(self):
        self._install(self.servers[: self.initial_num_servers])
        self.operations(multi_nodes=True)
        self.log.info("Installation of old version is done. Wait for %s sec for upgrade" % (self.sleep_time))
        time.sleep(self.sleep_time)
        upgrade_version = self.input.param("upgrade_version", "2.0.0-1870-rel")
        self.initial_version = upgrade_version
        self.product = "couchbase-server"
        self._install(self.servers[self.initial_num_servers : self.num_servers])
        self.log.info("Installation of new version is done. Wait for %s sec for rebalance" % (self.sleep_time))
        time.sleep(self.sleep_time)

        self.swap_num_servers = self.input.param("swap_num_servers", 1)
        old_servers = self.servers[: self.initial_num_servers]
        new_servers = []
        for i in range(self.initial_num_servers / self.swap_num_servers):
            servers_in = self.servers[
                (self.initial_num_servers + i * self.swap_num_servers) : (
                    self.initial_num_servers + (i + 1) * self.swap_num_servers
                )
            ]
            servers_out = self.servers[(i * self.swap_num_servers) : ((i + 1) * self.swap_num_servers)]
            servers = old_servers + new_servers
            self.cluster.rebalance(servers, servers_in, servers_out)
            self.log.info(
                "Swap rebalance: rebalance out %s old version nodes, rebalance in %s 2.0 Nodes"
                % (self.swap_num_servers, self.swap_num_servers)
            )
            time.sleep(self.sleep_time)
            old_servers = self.servers[((i + 1) * self.swap_num_servers) : self.initial_num_servers]
            new_servers = new_servers + servers_in
            servers = old_servers + new_servers
            status, content = ClusterHelper.find_orchestrator(servers[0])
            self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".format(status, content))
            FIND_MASTER = False
            for new_server in new_servers:
                if content.find(new_server.ip) >= 0:
                    FIND_MASTER = True
                    self.log.info("2.0 Node %s becomes the master" % (new_server.ip))
            if not FIND_MASTER:
                raise Exception("After rebalance in 2.0 nodes, 2.0 doesn't become the master ")

        self.verify_upgrade_rebalance_in_out()
开发者ID:mschoch,项目名称:testrunner,代码行数:45,代码来源:newupgradetests.py

示例9: online_upgrade

# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import find_orchestrator [as 别名]
    def online_upgrade(self):
        servers_in = self.servers[self.nodes_init : self.num_servers]
        self.cluster.rebalance(self.servers[: self.nodes_init], servers_in, [])
        self.log.info("Rebalance in all 2.0 Nodes")
        self.sleep(self.sleep_time)
        status, content = ClusterOperationHelper.find_orchestrator(self.master)
        self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".format(status, content))
        FIND_MASTER = False
        for new_server in servers_in:
            if content.find(new_server.ip) >= 0:
                self._new_master(new_server)
                FIND_MASTER = True
                self.log.info("2.0 Node %s becomes the master" % (new_server.ip))
                break
        if not FIND_MASTER and not self.is_downgrade:
            raise Exception("After rebalance in 3.0 Nodes, 3.0 doesn't become the master")

        servers_out = self.servers[: self.nodes_init]
        self.log.info("Rebalanced out all old version nodes")
        self.cluster.rebalance(self.servers[: self.num_servers], [], servers_out)
开发者ID:,项目名称:,代码行数:22,代码来源:

示例10: _online_upgrade

# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import find_orchestrator [as 别名]
 def _online_upgrade(self, update_servers, extra_servers, check_newmaster=True):
     self.cluster.rebalance(update_servers + extra_servers, extra_servers, [])
     current_versions = RestConnection(update_servers[0]).get_nodes_versions()
     added_versions = RestConnection(extra_servers[0]).get_nodes_versions()
     self.log.info("Rebalance in all {0} nodes completed".format(added_versions[0]))
     self.sleep(self.sleep_time)
     status, content = ClusterOperationHelper.find_orchestrator(update_servers[0])
     self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
                     format(status, content))
     self.log.info("after rebalance in the master is {0}".format(content))
     if check_newmaster:
         FIND_MASTER = False
         for new_server in extra_servers:
             if content.find(new_server.ip) >= 0:
                 FIND_MASTER = True
                 self.log.info("{0} Node {1} becomes the master".format(added_versions[0], new_server.ip))
                 break
         if not FIND_MASTER:
             raise Exception("After rebalance in {0} Nodes, one of them doesn't become the master".format(added_versions[0]))
     self.log.info("Rebalanced out all old version nodes")
     self.cluster.rebalance(update_servers + extra_servers, [], update_servers)
开发者ID:bcui6611,项目名称:testrunner,代码行数:23,代码来源:upgradeXDCR.py

示例11: _add_back_failed_node

# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import find_orchestrator [as 别名]
    def _add_back_failed_node(self, do_node_cleanup=False):
        master = self.servers[0]
        rest = RestConnection(master)
        creds = self.input.membase_settings

        self.log.info("CREATE BUCKET PHASE")
        SwapRebalanceBase.create_buckets(self)

        # Cluster all servers
        self.log.info("INITIAL REBALANCE PHASE")
        status, servers_rebalanced = RebalanceHelper.rebalance_in(self.servers, len(self.servers) - 1)
        self.assertTrue(status, msg="Rebalance was failed")

        self.log.info("DATA LOAD PHASE")
        self.loaders = SwapRebalanceBase.start_load_phase(self, master)

        # Wait till load phase is over
        SwapRebalanceBase.stop_load(self.loaders, do_stop=False)
        self.log.info("DONE LOAD PHASE")

        # Start the swap rebalance
        current_nodes = RebalanceHelper.getOtpNodeIds(master)
        self.log.info("current nodes : {0}".format(current_nodes))
        toBeEjectedNodes = RebalanceHelper.pick_nodes(master, howmany=self.failover_factor)
        optNodesIds = [node.id for node in toBeEjectedNodes]

        # List of servers that will not be failed over
        not_failed_over = []
        for server in self.servers:
            if server.ip not in [node.ip for node in toBeEjectedNodes]:
                not_failed_over.append(server)
                self.log.info("Node %s not failed over" % server.ip)

        if self.fail_orchestrator:
            status, content = ClusterOperationHelper.find_orchestrator(master)
            self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
                format(status, content))
            # When swapping all the nodes
            if self.num_swap is len(current_nodes):
                optNodesIds.append(content)
            else:
                optNodesIds[0] = content
            master = not_failed_over[-1]

        self.log.info("DATA ACCESS PHASE")
        self.loaders = SwapRebalanceBase.start_access_phase(self, master)

        # Failover selected nodes
        for node in optNodesIds:
            self.log.info("failover node {0} and rebalance afterwards".format(node))
            rest.fail_over(node)

        rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()], \
            ejectedNodes=optNodesIds)

        self.assertTrue(rest.monitorRebalance(),
            msg="rebalance operation failed after adding node {0}".format(optNodesIds))

        # Add back the same failed over nodes

        # Cleanup the node, somehow
        # TODO: cluster_run?
        if do_node_cleanup:
            pass

        # Make rest connection with node part of cluster
        rest = RestConnection(master)

        # Given the optNode, find ip
        add_back_servers = []
        nodes = rest.get_nodes()
        for server in [node.ip for node in nodes]:
            if isinstance(server, unicode):
                add_back_servers.append(server)
        final_add_back_servers = []
        for server in self.servers:
            if server.ip not in add_back_servers:
                final_add_back_servers.append(server)

        for server in final_add_back_servers:
            otpNode = rest.add_node(creds.rest_username, creds.rest_password, server.ip)
            msg = "unable to add node {0} to the cluster"
            self.assertTrue(otpNode, msg.format(server.ip))

        rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()], ejectedNodes=[])

        self.assertTrue(rest.monitorRebalance(),
            msg="rebalance operation failed after adding node {0}".format(add_back_servers))

        SwapRebalanceBase.verification_phase(self, master)
开发者ID:Boggypop,项目名称:testrunner,代码行数:92,代码来源:swaprebalance.py

示例12: _common_test_body_failed_swap_rebalance

# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import find_orchestrator [as 别名]
    def _common_test_body_failed_swap_rebalance(self):
        master = self.servers[0]
        rest = RestConnection(master)
        num_initial_servers = self.num_initial_servers
        creds = self.input.membase_settings
        intial_severs = self.servers[:num_initial_servers]

        self.log.info("CREATE BUCKET PHASE")
        SwapRebalanceBase.create_buckets(self)

        # Cluster all starting set of servers
        self.log.info("INITIAL REBALANCE PHASE")
        status, servers_rebalanced = RebalanceHelper.rebalance_in(intial_severs, len(intial_severs) - 1)
        self.assertTrue(status, msg="Rebalance was failed")

        self.log.info("DATA LOAD PHASE")
        self.loaders = SwapRebalanceBase.start_load_phase(self, master)

        # Wait till load phase is over
        SwapRebalanceBase.stop_load(self.loaders, do_stop=False)
        self.log.info("DONE LOAD PHASE")

        # Start the swap rebalance
        current_nodes = RebalanceHelper.getOtpNodeIds(master)
        self.log.info("current nodes : {0}".format(current_nodes))
        toBeEjectedNodes = RebalanceHelper.pick_nodes(master, howmany=self.num_swap)
        optNodesIds = [node.id for node in toBeEjectedNodes]
        if self.swap_orchestrator:
            status, content = ClusterOperationHelper.find_orchestrator(master)
            self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
            format(status, content))
            # When swapping all the nodes
            if self.num_swap is len(current_nodes):
                optNodesIds.append(content)
            else:
                optNodesIds[0] = content

        for node in optNodesIds:
            self.log.info("removing node {0} and rebalance afterwards".format(node))

        new_swap_servers = self.servers[num_initial_servers:num_initial_servers + self.num_swap]
        for server in new_swap_servers:
            otpNode = rest.add_node(creds.rest_username, creds.rest_password, server.ip)
            msg = "unable to add node {0} to the cluster"
            self.assertTrue(otpNode, msg.format(server.ip))

        if self.swap_orchestrator:
            rest = RestConnection(new_swap_servers[0])
            master = new_swap_servers[0]

        self.log.info("DATA ACCESS PHASE")
        self.loaders = SwapRebalanceBase.start_access_phase(self, master)

        self.log.info("SWAP REBALANCE PHASE")
        rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()],
            ejectedNodes=optNodesIds)
        SwapRebalanceBase.sleep(self, 10, "Rebalance should start")
        self.log.info("FAIL SWAP REBALANCE PHASE @ {0}".format(self.percentage_progress))
        reached = RestHelper(rest).rebalance_reached(self.percentage_progress)
        if reached == 100 and not RestHelper(rest).is_cluster_rebalanced():
            # handle situation when rebalance failed at the beginning
            self.log.error('seems rebalance failed!')
            self.log.info("Latest logs from UI:")
            for i in rest.get_logs(): self.log.error(i)
            self.fail("rebalance failed even before killing memcached")
        bucket = rest.get_buckets()[0].name
        pid = None
        if self.swap_orchestrator:
            # get PID via remote connection if master is a new node
            shell = RemoteMachineShellConnection(master)
            o, _ = shell.execute_command("ps -eo comm,pid | awk '$1 == \"memcached\" { print $2 }'")
            pid = o[0]
            shell.disconnect()
        else:
            for i in xrange(2):
                try:
                    _mc = MemcachedClientHelper.direct_client(master, bucket)
                    pid = _mc.stats()["pid"]
                    break
                except EOFError as e:
                    self.log.error("{0}.Retry in 2 sec".format(e))
                    SwapRebalanceBase.sleep(self, 1)
        if pid is None:
            self.fail("impossible to get a PID")
        command = "os:cmd(\"kill -9 {0} \")".format(pid)
        self.log.info(command)
        killed = rest.diag_eval(command)
        self.log.info("killed {0}:{1}??  {2} ".format(master.ip, master.port, killed))
        self.log.info("sleep for 10 sec after kill memcached")
        SwapRebalanceBase.sleep(self, 10)
        # we can't get stats for new node when rebalance falls
        if not self.swap_orchestrator:
            ClusterOperationHelper._wait_warmup_completed(self, [master], bucket, wait_time=600)
        i = 0
        # we expect that rebalance will be failed
        try:
            rest.monitorRebalance()
        except RebalanceFailedException:
            # retry rebalance if it failed
            self.log.warn("Rebalance failed but it's expected")
#.........这里部分代码省略.........
开发者ID:Boggypop,项目名称:testrunner,代码行数:103,代码来源:swaprebalance.py

示例13: _common_test_body_swap_rebalance

# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import find_orchestrator [as 别名]
    def _common_test_body_swap_rebalance(self, do_stop_start=False):
        master = self.servers[0]
        rest = RestConnection(master)
        num_initial_servers = self.num_initial_servers
        creds = self.input.membase_settings
        intial_severs = self.servers[:num_initial_servers]

        self.log.info("CREATE BUCKET PHASE")
        SwapRebalanceBase.create_buckets(self)

        # Cluster all starting set of servers
        self.log.info("INITIAL REBALANCE PHASE")
        status, servers_rebalanced = RebalanceHelper.rebalance_in(intial_severs, len(intial_severs) - 1)
        self.assertTrue(status, msg="Rebalance was failed")

        self.log.info("DATA LOAD PHASE")
        self.loaders = SwapRebalanceBase.start_load_phase(self, master)

        # Wait till load phase is over
        SwapRebalanceBase.stop_load(self.loaders, do_stop=False)
        self.log.info("DONE LOAD PHASE")

        # Start the swap rebalance
        current_nodes = RebalanceHelper.getOtpNodeIds(master)
        self.log.info("current nodes : {0}".format(current_nodes))
        toBeEjectedNodes = RebalanceHelper.pick_nodes(master, howmany=self.num_swap)
        optNodesIds = [node.id for node in toBeEjectedNodes]

        if self.swap_orchestrator:
            status, content = ClusterOperationHelper.find_orchestrator(master)
            self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
                format(status, content))
            if self.num_swap is len(current_nodes):
                optNodesIds.append(content)
            else:
                optNodesIds[0] = content

        for node in optNodesIds:
            self.log.info("removing node {0} and rebalance afterwards".format(node))

        new_swap_servers = self.servers[num_initial_servers:num_initial_servers + self.num_swap]
        for server in new_swap_servers:
            otpNode = rest.add_node(creds.rest_username, creds.rest_password, server.ip)
            msg = "unable to add node {0} to the cluster"
            self.assertTrue(otpNode, msg.format(server.ip))

        if self.swap_orchestrator:
            rest = RestConnection(new_swap_servers[0])
            master = new_swap_servers[0]

        if self.do_access:
            self.log.info("DATA ACCESS PHASE")
            self.loaders = SwapRebalanceBase.start_access_phase(self, master)

        self.log.info("SWAP REBALANCE PHASE")
        rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()],
            ejectedNodes=optNodesIds)

        if do_stop_start:
            # Rebalance is stopped at 20%, 40% and 60% completion
            retry = 0
            for expected_progress in (20, 40, 60):
                self.log.info("STOP/START SWAP REBALANCE PHASE WITH PROGRESS {0}%".
                              format(expected_progress))
                while True:
                    progress = rest._rebalance_progress()
                    if progress < 0:
                        self.log.error("rebalance progress code : {0}".format(progress))
                        break
                    elif progress == 100:
                        self.log.warn("Rebalance has already reached 100%")
                        break
                    elif progress >= expected_progress:
                        self.log.info("Rebalance will be stopped with {0}%".format(progress))
                        stopped = rest.stop_rebalance()
                        self.assertTrue(stopped, msg="unable to stop rebalance")
                        SwapRebalanceBase.sleep(self, 20)
                        rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()],
                                       ejectedNodes=optNodesIds)
                        break
                    elif retry > 100:
                        break
                    else:
                        retry += 1
                        SwapRebalanceBase.sleep(self, 1)
        self.assertTrue(rest.monitorRebalance(),
            msg="rebalance operation failed after adding node {0}".format(optNodesIds))
        SwapRebalanceBase.verification_phase(self, master)
开发者ID:Boggypop,项目名称:testrunner,代码行数:90,代码来源:swaprebalance.py

示例14: test_capi_with_online_upgrade

# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import find_orchestrator [as 别名]
    def test_capi_with_online_upgrade(self):
        self._install(self._input.servers[:self.src_init + self.dest_init])
        upgrade_version = self._input.param("upgrade_version", "5.0.0-1797")
        upgrade_nodes = self.src_cluster.get_nodes()
        extra_nodes = self._input.servers[self.src_init + self.dest_init:]

        repl_id = self._start_es_replication()

        rest_conn = RestConnection(self.src_master)
        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        self.perform_update_delete()

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self._wait_for_es_replication_to_catchup()

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        RestConnection(upgrade_nodes[0]).get_nodes_versions()
        added_versions = RestConnection(extra_nodes[0]).get_nodes_versions()
        self.cluster.rebalance(upgrade_nodes + extra_nodes, extra_nodes, [])
        self.log.info("Rebalance in all {0} nodes completed".format(added_versions[0]))
        RestConnection(upgrade_nodes[0]).get_nodes_versions()
        self.sleep(15)
        status, content = ClusterOperationHelper.find_orchestrator(upgrade_nodes[0])
        self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
                        format(status, content))
        self.log.info("after rebalance in the master is {0}".format(content))
        find_master = False
        for new_server in extra_nodes:
            if content.find(new_server.ip) >= 0:
                find_master = True
                self.log.info("{0} Node {1} becomes the master".format(added_versions[0], new_server.ip))
                break
        if not find_master:
            raise Exception("After rebalance in {0} Nodes, one of them doesn't become the master".
                            format(added_versions[0]))
        self.log.info("Rebalancing out all old version nodes")
        self.cluster.rebalance(upgrade_nodes + extra_nodes, [], upgrade_nodes)
        self.src_master = self._input.servers[self.src_init + self.dest_init]

        self._install(self.src_cluster.get_nodes(), version=upgrade_version)
        upgrade_nodes = self._input.servers[self.src_init + self.dest_init:]
        extra_nodes = self.src_cluster.get_nodes()

        RestConnection(upgrade_nodes[0]).get_nodes_versions()
        added_versions = RestConnection(extra_nodes[0]).get_nodes_versions()
        self.cluster.rebalance(upgrade_nodes + extra_nodes, extra_nodes, [])
        self.log.info("Rebalance in all {0} nodes completed".format(added_versions[0]))
        RestConnection(upgrade_nodes[0]).get_nodes_versions()
        self.sleep(15)
        status, content = ClusterOperationHelper.find_orchestrator(upgrade_nodes[0])
        self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
                        format(status, content))
        self.log.info("after rebalance in the master is {0}".format(content))
        self.log.info("Rebalancing out all old version nodes")
        self.cluster.rebalance(upgrade_nodes + extra_nodes, [], upgrade_nodes)
        self.src_master = self._input.servers[0]

        self.log.info("######### Upgrade of CB cluster completed ##########")

        rest_conn = RestConnection(self.src_master)
        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')

        gen = DocumentGenerator('es', '{{"key":"value"}}',  xrange(100), start=0, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        self.perform_update_delete()

        rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')

        self._wait_for_es_replication_to_catchup()

        self._verify_es_results()
开发者ID:arod1987,项目名称:testrunner,代码行数:80,代码来源:capiXDCR.py

示例15: _common_test_body_failed_swap_rebalance

# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import find_orchestrator [as 别名]
    def _common_test_body_failed_swap_rebalance(self):
        master = self.servers[0]
        rest = RestConnection(master)
        num_initial_servers = self.num_initial_servers
        creds = self.input.membase_settings
        intial_severs = self.servers[:num_initial_servers]

        # Cluster all starting set of servers
        self.log.info("INITIAL REBALANCE PHASE")
        RebalanceHelper.rebalance_in(intial_severs, len(intial_severs)-1)

        self.log.info("CREATE BUCKET PHASE")
        SwapRebalanceBase.create_buckets(self)

        self.log.info("DATA LOAD PHASE")
        loaders = SwapRebalanceBase.start_load_phase(self, master)

        # Wait till load phase is over
        SwapRebalanceBase.stop_load(loaders, do_stop=False)
        self.log.info("DONE LOAD PHASE")

        # Start the swap rebalance
        current_nodes = RebalanceHelper.getOtpNodeIds(master)
        self.log.info("current nodes : {0}".format(current_nodes))
        toBeEjectedNodes = RebalanceHelper.pick_nodes(master, howmany=self.num_swap)
        optNodesIds = [node.id for node in toBeEjectedNodes]
        if self.swap_orchestrator:
            status, content = ClusterHelper.find_orchestrator(master)
            self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
            format(status, content))
            # When swapping all the nodes
            if self.num_swap is len(current_nodes):
                optNodesIds.append(content)
            else:
                optNodesIds[0] = content

        for node in optNodesIds:
            self.log.info("removing node {0} and rebalance afterwards".format(node))

        new_swap_servers = self.servers[num_initial_servers:num_initial_servers+self.num_swap]
        for server in new_swap_servers:
            otpNode = rest.add_node(creds.rest_username, creds.rest_password, server.ip)
            msg = "unable to add node {0} to the cluster"
            self.assertTrue(otpNode, msg.format(server.ip))

        if self.swap_orchestrator:
            rest = RestConnection(new_swap_servers[0])
            master = new_swap_servers[0]

        self.log.info("DATA ACCESS PHASE")
        loaders = SwapRebalanceBase.start_access_phase(self, master)

        self.log.info("SWAP REBALANCE PHASE")
        rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()],\
            ejectedNodes=optNodesIds)

        # Rebalance is failed at 20%, 40% and 60% completion
        for i in [1, 2, 3]:
            expected_progress = 20*i
            self.log.info("FAIL SWAP REBALANCE PHASE @ {0}".format(expected_progress))
            reached = RestHelper(rest).rebalance_reached(expected_progress)
            command = "[erlang:exit(element(2, X), kill) || X <- supervisor:which_children(ns_port_sup)]."
            memcached_restarted = rest.diag_eval(command)
            self.assertTrue(memcached_restarted, "unable to restart memcached/moxi process through diag/eval")
            time.sleep(20)

            rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()],\
                ejectedNodes=optNodesIds)

        # Stop loaders
        SwapRebalanceBase.stop_load(loaders)

        self.assertTrue(rest.monitorRebalance(),
            msg="rebalance operation failed after adding node {0}".format(toBeEjectedNodes))

        self.log.info("DONE DATA ACCESS PHASE")
        #for bucket in rest.get_buckets():
        #    SwapRebalanceBase.verify_data(new_swap_servers[0], bucket_data[bucket.name].get('inserted_keys'),\
        #        bucket.name, self)
        #    RebalanceHelper.wait_for_persistence(master, bucket.name)

        self.log.info("VERIFICATION PHASE")
        SwapRebalanceBase.items_verification(master, self)
开发者ID:steveyen,项目名称:testrunner,代码行数:85,代码来源:swaprebalance.py


注:本文中的membase.helper.cluster_helper.ClusterOperationHelper.find_orchestrator方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。