当前位置: 首页>>代码示例>>Python>>正文


Python RestConnection.fetch_vbucket_map方法代码示例

本文整理汇总了Python中membase.api.rest_client.RestConnection.fetch_vbucket_map方法的典型用法代码示例。如果您正苦于以下问题:Python RestConnection.fetch_vbucket_map方法的具体用法?Python RestConnection.fetch_vbucket_map怎么用?Python RestConnection.fetch_vbucket_map使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在membase.api.rest_client.RestConnection的用法示例。


在下文中一共展示了RestConnection.fetch_vbucket_map方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: cbrecover_multiple_failover_swapout_reb_routine

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import fetch_vbucket_map [as 别名]
    def cbrecover_multiple_failover_swapout_reb_routine(self):
        self.common_preSetup()
        if self._failover is not None:
            if "source" in self._failover:
                rest = RestConnection(self.src_master)
                if self._default_bucket:
                    self.initial_node_count = len(self.src_nodes)
                    self.vbucket_map_before = rest.fetch_vbucket_map()  # JUST FOR DEFAULT BUCKET AS OF NOW
                if self._failover_count >= len(self.src_nodes):
                    raise Exception("Won't failover .. count exceeds available servers on source : SKIPPING TEST")
                if len(self._floating_servers_set) < self._add_count:
                    raise Exception("Not enough spare nodes available, to match the failover count : SKIPPING TEST")
                self.log.info("Failing over {0} nodes on source ..".format(self._failover_count))
                self.failed_nodes = self.src_nodes[(len(self.src_nodes) - self._failover_count):len(self.src_nodes)]
                self.cluster.failover(self.src_nodes, self.failed_nodes)
                for node in self.failed_nodes:
                    self.src_nodes.remove(node)
                add_nodes = self._floating_servers_set[0:self._add_count]
                for node in add_nodes:
                    rest.add_node(user=node.rest_username, password=node.rest_password, remoteIp=node.ip, port=node.port)
                self.src_nodes.extend(add_nodes)
                self.sleep(self.wait_timeout / 4)
                # CALL THE CBRECOVERY ROUTINE
                self.cbr_routine(self.dest_master, self.src_master)

                self.trigger_rebalance(rest)
                if self._default_bucket:
                    self.vbucket_map_after = rest.fetch_vbucket_map()
                    self.final_node_count = len(self.src_nodes)

            elif "destination" in self._failover:
                rest = RestConnection(self.dest_master)
                if self._default_bucket:
                    self.initial_node_count = len(self.dest_nodes)
                    self.vbucket_map_before = rest.fetch_vbucket_map()  # JUST FOR DEFAULT BUCKET AS OF NOW
                if self._failover_count >= len(self.dest_nodes):
                    raise Exception("Won't failover .. count exceeds available servers on sink : SKIPPING TEST")
                if len(self._floating_servers_set) < self._add_count:
                    raise Exception("Not enough spare nodes available, to match the failover count : SKIPPING TEST")
                self.log.info("Failing over {0} nodes on destination ..".format(self._failover_count))
                self.failed_nodes = self.dest_nodes[(len(self.dest_nodes) - self._failover_count):len(self.dest_nodes)]
                self.cluster.failover(self.dest_nodes, self.failed_nodes)
                for node in self.failed_nodes:
                    self.dest_nodes.remove(node)
                add_nodes = self._floating_servers_set[0:self._add_count]
                for node in add_nodes:
                    rest.add_node(user=node.rest_username, password=node.rest_password, remoteIp=node.ip, port=node.port)
                self.dest_nodes.extend(add_nodes)
                self.sleep(self.wait_timeout / 4)
                # CALL THE CBRECOVERY ROUTINE
                self.cbr_routine(self.src_master, self.dest_master)

                self.trigger_rebalance(rest)
                if self._default_bucket:
                    self.vbucket_map_after = rest.fetch_vbucket_map()
                    self.final_node_count = len(self.dest_nodes)

        self.common_tearDown_verification()
开发者ID:abhinavdangeti,项目名称:testrunner,代码行数:60,代码来源:cbRecoverytests.py

示例2: restart_cbrecover_multiple_failover_swapout_reb_routine

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import fetch_vbucket_map [as 别名]
    def restart_cbrecover_multiple_failover_swapout_reb_routine(self):
        self.common_preSetup()
        when_step = self._input.param("when_step", "recovery_when_rebalance")
        if self._failover is not None:
            if "source" in self._failover:
                rest = RestConnection(self.src_master)
                if self._default_bucket:
                    self.initial_node_count = len(self.src_nodes)
                    self.vbucket_map_before = rest.fetch_vbucket_map()  # JUST FOR DEFAULT BUCKET AS OF NOW
                if self._failover_count >= len(self.src_nodes):
                    raise Exception("Won't failover .. count exceeds available servers on source : SKIPPING TEST")
                if len(self._floating_servers_set) < self._add_count:
                    raise Exception("Not enough spare nodes available, to match the failover count : SKIPPING TEST")
                self.log.info("Failing over {0} nodes on source ..".format(self._failover_count))
                self.failed_nodes = self.src_nodes[(len(self.src_nodes) - self._failover_count):len(self.src_nodes)]
                self.cluster.failover(self.src_nodes, self.failed_nodes)
                for node in self.failed_nodes:
                    self.src_nodes.remove(node)
                add_nodes = self._floating_servers_set[0:self._add_count]
                for node in add_nodes:
                    rest.add_node(user=node.rest_username, password=node.rest_password, remoteIp=node.ip, port=node.port)
                self.src_nodes.extend(add_nodes)
                self.sleep(self.wait_timeout / 4)
                # CALL THE CBRECOVERY ROUTINE WITHOUT WAIT FOR COMPLETED
                self.cbr_routine(self.dest_master, self.src_master, False)

                if "create_bucket_when_recovery" in when_step:
                     name = 'standard_bucket'
                     try:
                         self.cluster.create_standard_bucket(self.src_master, name, STANDARD_BUCKET_PORT + 10, 100, 1)
                     except BucketCreationException, e:
                         self.log.info("bucket creation failed during cbrecovery as expected")
                     # but still able to create bucket on destination
                     self.cluster.create_standard_bucket(self.dest_master, name, STANDARD_BUCKET_PORT + 10, 100, 1)
                     # here we try to re-call cbrecovery(seems it's supported even it's still running)
                     # if recovery fast(=completed) we can get "No recovery needed"
                     self.cbr_routine(self.dest_master, self.src_master)
                elif "recovery_when_rebalance" in when_step:
                    rest.remove_all_recoveries()
                    self.trigger_rebalance(rest, 15)
                    try:
                        self.cbr_routine(self.dest_master, self.src_master)
                        self.log.exception("cbrecovery should be failed when rebalance is in progress")
                    except CBRecoveryFailedException, e:
                        self.log.info("cbrecovery failed  as expected when there are no failovered nodes")
                    reached = RestHelper(rest).rebalance_reached()
                    self.assertTrue(reached, "rebalance failed or did not completed")
                    if self._replication_direction_str == "unidirection":
                        self.log.warn("we expect data lost on source cluster with unidirection replication")
                        self.log.warn("verification data will be skipped")
                        return
                elif "recovery_when_rebalance_stopped" in when_step:
                    rest.remove_all_recoveries()
                    self.trigger_rebalance(rest, 15)
                    rest.stop_rebalance()
                    try:
                        self.cbr_routine(self.dest_master, self.src_master)
                        self.log.exception("cbrecovery should be failed when rebalance has been stopped")
                    except CBRecoveryFailedException, e:
                        self.log.info("cbrecovery failed  as expected when there are no failovered nodes")
开发者ID:abhinavdangeti,项目名称:testrunner,代码行数:62,代码来源:cbRecoverytests.py

示例3: cbrecover_multiple_failover_addback_routine

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import fetch_vbucket_map [as 别名]
    def cbrecover_multiple_failover_addback_routine(self):
        self._load_all_buckets(self.src_master, self.gen_create, "create", 0, flag=self.flag_val)
        tasks = []
        if self._doc_ops is not None:
            if "update" in self._doc_ops:
                tasks.extend(self._async_load_all_buckets(self.src_master, self.gen_update, "update", self._expires))
            if "delete" in self._doc_ops:
                tasks.extend(self._async_load_all_buckets(self.src_master, self.gen_delete, "delete", 0))
        for task in tasks:
            task.result()

        self._wait_for_replication_to_catchup()
        """
        Tracking vbucket movement just on the default bucket for now
        """
        vbucket_map_before = []
        initial_node_count = 0
        vbucket_map_after = []
        final_node_count = 0

        if self._failover is not None:
            if "source" in self._failover:
                rest = RestConnection(self.src_master)
                if self._default_bucket:
                    initial_node_count = len(self.src_nodes)
                    vbucket_map_before = rest.fetch_vbucket_map()       # JUST FOR DEFAULT BUCKET AS OF NOW
                if self._failover_count >= len(self.src_nodes):
                    self.log.info("Won't failover .. count exceeds available servers on source : SKIPPING TEST")
                    self.tearDown()
                    return
                if len(self._floating_servers_set) < self._add_count:
                    self.log.info("Not enough spare nodes available, to match the failover count : SKIPPING TEST")
                    self.tearDown()
                    return
                self.log.info("Failing over {0} nodes on source ..".format(self._failover_count))
                self.failed_nodes = self.src_nodes[(len(self.src_nodes)-self._failover_count):len(self.src_nodes)]
                self.cluster.failover(self.src_nodes, self.failed_nodes)
                self.sleep(self._timeout / 4)
                self.log.info("Adding back the {0} nodes that were failed over ..".format(self._failover_count))
                for node in self.failed_nodes:
                    self.adding_back_a_node(self.src_master, node)
                add_nodes = self._floating_servers_set[0:self._add_count]
                self.sleep(self._timeout / 4)
                for node in add_nodes:
                    rest.add_node(user=node.rest_username, password=node.rest_password, remoteIp=node.ip, port=node.port)
                self.src_nodes.extend(add_nodes)
                # CALL THE CBRECOVERY ROUTINE
                self.cbr_routine(self.dest_master, self.src_master)

                self.trigger_rebalance(rest)
                if self._default_bucket:
                    vbucket_map_after = rest.fetch_vbucket_map()
                    final_node_count = len(self.src_nodes)

            elif "destination" in self._failover:
                rest = RestConnection(self.dest_master)
                if self._default_bucket:
                    initial_node_count = len(self.dest_nodes)
                    vbucket_map_before = rest.fetch_vbucket_map()       # JUST FOR DEFAULT BUCKET AS OF NOW
                if self._failover_count >= len(self.dest_nodes):
                    self.log.info("Won't failover .. count exceeds available servers on sink : SKIPPING TEST")
                    self.tearDown()
                    return
                if len(self._floating_servers_set) < self._add_count:
                    self.log.info("Not enough spare nodes available, to match the failover count : SKIPPING TEST")
                    self.tearDown()
                    return
                self.log.info("Failing over {0} nodes on destination ..".format(self._failover_count))
                self.failed_nodes = self.dest_nodes[(len(self.dest_nodes)-self._failover_count):len(self.dest_nodes)]
                self.cluster.failover(self.dest_nodes, self.failed_nodes)
                self.sleep(self._timeout / 4)
                self.log.info("Adding back the {0} nodes that were failed over ..".format(self._failover_count))
                for node in self.failed_nodes:
                    self.adding_back_a_node(self.dest_master, node)
                add_nodes = self._floating_servers_set[0:self._add_count]
                self.sleep(self._timeout / 4)
                for node in add_nodes:
                    rest.add_node(user=node.rest_username, password=node.rest_password, remoteIp=node.ip, port=node.port)
                self.dest_nodes.extend(add_nodes)
                # CALL THE CBRECOVERY ROUTINE
                self.cbr_routine(self.src_master, self.dest_master)

                self.trigger_rebalance(rest)
                if self._default_bucket:
                    vbucket_map_after = rest.fetch_vbucket_map()
                    final_node_count = len(self.dest_nodes)

            #TOVERIFY: Check if vbucket map unchanged if swap rebalance
            if self._default_bucket:
                if self._failover_count == self._add_count:
                    _flag_ = self.vbucket_map_checker(vbucket_map_before, vbucket_map_after, initial_node_count, final_node_count)
                    if _flag_:
                        self.log.info("vbucket_map same as earlier")
                    else:
                        self.log.info("vbucket_map differs from earlier")

        self.sleep(self._timeout / 2)
        self.merge_buckets(self.src_master, self.dest_master, bidirection=False)
        self.verify_results(verify_src=True)
开发者ID:Boggypop,项目名称:testrunner,代码行数:101,代码来源:cbRecoverytests.py

示例4: cbrecovery

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import fetch_vbucket_map [as 别名]
                    try:
                        self.trigger_rebalance(rest)
                        self.log.exception("rebalance is not permitted during cbrecovery")
                    except InvalidArgumentException, e:
                        self.log.info("can't call rebalance during cbrecovery as expected")
                    # here we try to re-call cbrecovery(seems it's supported even it's still running)
                    self.cbr_routine(self.dest_master, self.src_master)
                if self._default_bucket:
                    self.vbucket_map_after = rest.fetch_vbucket_map()
                    self.final_node_count = len(self.src_nodes)

            elif "destination" in self._failover:
                rest = RestConnection(self.dest_master)
                if self._default_bucket:
                    self.initial_node_count = len(self.dest_nodes)
                    self.vbucket_map_before = rest.fetch_vbucket_map()  # JUST FOR DEFAULT BUCKET AS OF NOW
                if self._failover_count >= len(self.dest_nodes):
                    raise Exception("Won't failover .. count exceeds available servers on sink : SKIPPING TEST")
                if len(self._floating_servers_set) < self._add_count:
                    raise Exception("Not enough spare nodes available, to match the failover count : SKIPPING TEST")
                self.log.info("Failing over {0} nodes on destination ..".format(self._failover_count))
                self.failed_nodes = self.dest_nodes[(len(self.dest_nodes) - self._failover_count):len(self.dest_nodes)]
                self.cluster.failover(self.dest_nodes, self.failed_nodes)
                for node in self.failed_nodes:
                    self.dest_nodes.remove(node)
                add_nodes = self._floating_servers_set[0:self._add_count]
                for node in add_nodes:
                    rest.add_node(user=node.rest_username, password=node.rest_password, remoteIp=node.ip, port=node.port)
                self.dest_nodes.extend(add_nodes)
                self.sleep(self.wait_timeout / 4)
                # CALL THE CBRECOVERY ROUTINE
开发者ID:abhinavdangeti,项目名称:testrunner,代码行数:33,代码来源:cbRecoverytests.py

示例5: restart_cbrecover_multiple_failover_swapout_reb_routine

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import fetch_vbucket_map [as 别名]
    def restart_cbrecover_multiple_failover_swapout_reb_routine(self):
        self.common_preSetup()
        when_cbrecovering = self._input.param("when_cbrecovering", "rebalance")
        if self._failover is not None:
            if "source" in self._failover:
                rest = RestConnection(self.src_master)
                if self._default_bucket:
                    self.initial_node_count = len(self.src_nodes)
                    self.vbucket_map_before = rest.fetch_vbucket_map()  # JUST FOR DEFAULT BUCKET AS OF NOW
                if self._failover_count >= len(self.src_nodes):
                    raise Exception("Won't failover .. count exceeds available servers on source : SKIPPING TEST")
                if len(self._floating_servers_set) < self._add_count:
                    raise Exception("Not enough spare nodes available, to match the failover count : SKIPPING TEST")
                self.log.info("Failing over {0} nodes on source ..".format(self._failover_count))
                self.failed_nodes = self.src_nodes[(len(self.src_nodes) - self._failover_count):len(self.src_nodes)]
                self.cluster.failover(self.src_nodes, self.failed_nodes)
                for node in self.failed_nodes:
                    self.src_nodes.remove(node)
                add_nodes = self._floating_servers_set[0:self._add_count]
                for node in add_nodes:
                    rest.add_node(user=node.rest_username, password=node.rest_password, remoteIp=node.ip, port=node.port)
                self.src_nodes.extend(add_nodes)
                self.sleep(self._timeout / 4)
                # CALL THE CBRECOVERY ROUTINE WITHOUT WAIT FOR COMPLETED
                self.cbr_routine(self.dest_master, self.src_master, False)

                if "stop_recovery" in when_cbrecovering:
                    rest.remove_all_recoveries()
                    self.trigger_rebalance(rest, 15)
                    rest.stop_rebalance()
                elif "rebalance" in when_cbrecovering:
                    try:
                        self.trigger_rebalance(rest)
                        self.log.exception("rebalance is not permitted during cbrecovery")
                    except InvalidArgumentException, e:
                        self.log.info("can't call rebalance during cbrecovery as expected")
                #here we try to re-call cbrecovery(seems it's supported even it's still running)
                self.cbr_routine(self.dest_master, self.src_master)
                self.trigger_rebalance(rest)
                if self._default_bucket:
                    self.vbucket_map_after = rest.fetch_vbucket_map()
                    self.final_node_count = len(self.src_nodes)

            elif "destination" in self._failover:
                rest = RestConnection(self.dest_master)
                if self._default_bucket:
                    self.initial_node_count = len(self.dest_nodes)
                    self.vbucket_map_before = rest.fetch_vbucket_map()  # JUST FOR DEFAULT BUCKET AS OF NOW
                if self._failover_count >= len(self.dest_nodes):
                    raise Exception("Won't failover .. count exceeds available servers on sink : SKIPPING TEST")
                if len(self._floating_servers_set) < self._add_count:
                    raise Exception("Not enough spare nodes available, to match the failover count : SKIPPING TEST")
                self.log.info("Failing over {0} nodes on destination ..".format(self._failover_count))
                self.failed_nodes = self.dest_nodes[(len(self.dest_nodes) - self._failover_count):len(self.dest_nodes)]
                self.cluster.failover(self.dest_nodes, self.failed_nodes)
                for node in self.failed_nodes:
                    self.dest_nodes.remove(node)
                add_nodes = self._floating_servers_set[0:self._add_count]
                for node in add_nodes:
                    rest.add_node(user=node.rest_username, password=node.rest_password, remoteIp=node.ip, port=node.port)
                self.dest_nodes.extend(add_nodes)
                self.sleep(self._timeout / 4)
                # CALL THE CBRECOVERY ROUTINE
                self.cbr_routine(self.dest_master, self.src_master, False)

                if "stop_recovery" in when_cbrecovering:
                    rest.remove_all_recoveries()
                    self.trigger_rebalance(rest, 15)
                    rest.stop_rebalance()
                elif "rebalance" in when_cbrecovering:
                    try:
                        self.trigger_rebalance(rest)
                        self.log.exception("rebalance is not permitted during cbrecovery")
                    except InvalidArgumentException, e:
                        self.log.info("can't call rebalance during cbrecovery as expected")

                #here we try to re-call cbrecovery(seems it's supported even it's still running)
                self.cbr_routine(self.dest_master, self.src_master)
                self.trigger_rebalance(rest)
                if self._default_bucket:
                    self.vbucket_map_after = rest.fetch_vbucket_map()
                    self.final_node_count = len(self.dest_nodes)
开发者ID:paragagarwal,项目名称:testrunner,代码行数:84,代码来源:cbRecoverytests.py


注:本文中的membase.api.rest_client.RestConnection.fetch_vbucket_map方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。