当前位置: 首页>>代码示例>>Python>>正文


Python RestHelper.bucket_exists方法代码示例

本文整理汇总了Python中membase.api.rest_client.RestHelper.bucket_exists方法的典型用法代码示例。如果您正苦于以下问题:Python RestHelper.bucket_exists方法的具体用法?Python RestHelper.bucket_exists怎么用?Python RestHelper.bucket_exists使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在membase.api.rest_client.RestHelper的用法示例。


在下文中一共展示了RestHelper.bucket_exists方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _create_default_bucket

# 需要导入模块: from membase.api.rest_client import RestHelper [as 别名]
# 或者: from membase.api.rest_client.RestHelper import bucket_exists [as 别名]
 def _create_default_bucket(self):
     helper = RestHelper(self.rest)
     if not helper.bucket_exists(self.bucket):
         node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
         info = self.rest.get_nodes_self()
         available_ram = int(info.memoryQuota * node_ram_ratio)
         if available_ram < 256:
             available_ram = 256
         self.rest.create_bucket(bucket=self.bucket, ramQuotaMB=available_ram)
         ready = BucketOperationHelper.wait_for_memcached(self.master, self.bucket)
         self.testcase.assertTrue(ready, "wait_for_memcached failed")
     self.testcase.assertTrue(helper.bucket_exists(self.bucket), "unable to create {0} bucket".format(self.bucket))
开发者ID:strategist922,项目名称:testrunner,代码行数:14,代码来源:spatial_helper.py

示例2: _create_default_bucket

# 需要导入模块: from membase.api.rest_client import RestHelper [as 别名]
# 或者: from membase.api.rest_client.RestHelper import bucket_exists [as 别名]
 def _create_default_bucket(self, replica=1):
     name = "default"
     master = self.servers[0]
     rest = RestConnection(master)
     helper = RestHelper(RestConnection(master))
     if not helper.bucket_exists(name):
         node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
         info = rest.get_nodes_self()
         available_ram = info.memoryQuota * node_ram_ratio
         rest.create_bucket(bucket=name, ramQuotaMB=int(available_ram), replicaNumber=replica)
         ready = BucketOperationHelper.wait_for_memcached(master, name)
         self.assertTrue(ready, msg="wait_for_memcached failed")
     self.assertTrue(helper.bucket_exists(name), msg="unable to create {0} bucket".format(name))
开发者ID:jason-hou,项目名称:testrunner,代码行数:15,代码来源:swaprebalance.py

示例3: _create_default_bucket

# 需要导入模块: from membase.api.rest_client import RestHelper [as 别名]
# 或者: from membase.api.rest_client.RestHelper import bucket_exists [as 别名]
 def _create_default_bucket(self, unittest):
     name = "default"
     master = self.master
     rest = RestConnection(master)
     helper = RestHelper(RestConnection(master))
     if not helper.bucket_exists(name):
         node_ram_ratio = BucketOperationHelper.base_bucket_ratio(TestInputSingleton.input.servers)
         info = rest.get_nodes_self()
         available_ram = info.memoryQuota * node_ram_ratio
         rest.create_bucket(bucket=name, ramQuotaMB=int(available_ram))
         ready = BucketOperationHelper.wait_for_memcached(master, name)
         BucketOperationHelper.wait_for_vbuckets_ready_state(master, name)
         unittest.assertTrue(ready, msg="wait_for_memcached failed")
     unittest.assertTrue(helper.bucket_exists(name),
                         msg="unable to create {0} bucket".format(name))
开发者ID:DavidAlphaFox,项目名称:couchbase,代码行数:17,代码来源:memcapable.py

示例4: _create_default_bucket

# 需要导入模块: from membase.api.rest_client import RestHelper [as 别名]
# 或者: from membase.api.rest_client.RestHelper import bucket_exists [as 别名]
 def _create_default_bucket(self):
     rest = RestConnection(self.master)
     helper = RestHelper(RestConnection(self.master))
     if not helper.bucket_exists(self.bucket):
         node_ram_ratio = BucketOperationHelper.base_bucket_ratio([self.master])
         info = rest.get_nodes_self()
         available_ram = info.memoryQuota * node_ram_ratio
         serverInfo = self.master
         rest.init_cluster(username=serverInfo.rest_username,
                           password=serverInfo.rest_password)
         rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
         rest.create_bucket(bucket=self.bucket, ramQuotaMB=int(available_ram))
         ready = BucketOperationHelper.wait_for_memcached(self.master, self.bucket)
         self.assertTrue(ready, msg="wait_for_memcached failed")
     self.assertTrue(helper.bucket_exists(self.bucket),
                     msg="unable to create {0} bucket".format(self.bucket))
开发者ID:steveyen,项目名称:testrunner,代码行数:18,代码来源:drainratetests.py

示例5: _create_default_bucket

# 需要导入模块: from membase.api.rest_client import RestHelper [as 别名]
# 或者: from membase.api.rest_client.RestHelper import bucket_exists [as 别名]
 def _create_default_bucket(self):
     name = "default"
     master = self.servers[0]
     rest = RestConnection(master)
     helper = RestHelper(RestConnection(master))
     if not helper.bucket_exists(name):
         node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
         info = rest.get_nodes_self()
         available_ram = info.mcdMemoryReserved * node_ram_ratio
         rest.create_bucket(bucket=name, ramQuotaMB=int(available_ram))
         ready = BucketOperationHelper.wait_for_memcached(master, name)
         self.assertTrue(ready, msg="wait_for_memcached failed")
     self.assertTrue(helper.bucket_exists(name),
                     msg="unable to create {0} bucket".format(name))
     self.load_thread = None
     self.shutdown_load_data = False
开发者ID:steveyen,项目名称:testrunner,代码行数:18,代码来源:syncreplicationtests.py

示例6: backup_restore

# 需要导入模块: from membase.api.rest_client import RestHelper [as 别名]
# 或者: from membase.api.rest_client.RestHelper import bucket_exists [as 别名]
 def backup_restore(self):
     try:
         backup_start = self.backups[int(self.backupset.start) - 1]
     except IndexError:
         backup_start = "{0}{1}".format(self.backups[-1], self.backupset.start)
     try:
         backup_end = self.backups[int(self.backupset.end) - 1]
     except IndexError:
         backup_end = "{0}{1}".format(self.backups[-1], self.backupset.end)
     args = "restore --archive {0} --repo {1} --host http://{2}:{3} --username {4} --password {5} --start {6} " \
            "--end {7}".format(self.backupset.directory, self.backupset.name,
                                               self.backupset.restore_cluster_host.ip,
                                               self.backupset.restore_cluster_host.port,
                                               self.backupset.restore_cluster_host_username,
                                               self.backupset.restore_cluster_host_password, backup_start,
                                               backup_end)
     if self.backupset.exclude_buckets:
         args += " --exclude-buckets {0}".format(self.backupset.exclude_buckets)
     if self.backupset.include_buckets:
         args += " --include-buckets {0}".format(self.backupset.include_buckets)
     if self.backupset.disable_bucket_config:
         args += " --disable-bucket-config {0}".format(self.backupset.disable_bucket_config)
     if self.backupset.disable_views:
         args += " --disable-views {0}".format(self.backupset.disable_views)
     if self.backupset.disable_gsi_indexes:
         args += " --disable-gsi-indexes {0}".format(self.backupset.disable_gsi_indexes)
     if self.backupset.disable_ft_indexes:
         args += " --disable-ft-indexes {0}".format(self.backupset.disable_ft_indexes)
     if self.backupset.disable_data:
         args += " --disable-data {0}".format(self.backupset.disable_data)
     if self.backupset.filter_keys:
         args += " --filter_keys {0}".format(self.backupset.filter_keys)
     if self.backupset.filter_values:
         args += " --filter_values {0}".format(self.backupset.filter_values)
     if self.backupset.force_updates:
         args += " --force-updates"
     if self.no_progress_bar:
         args += " --no-progress-bar"
     if not self.skip_buckets:
         rest_conn = RestConnection(self.backupset.restore_cluster_host)
         rest_helper = RestHelper(rest_conn)
         for bucket in self.buckets:
             if not rest_helper.bucket_exists(bucket.name):
                 self.log.info("Creating bucket {0} in restore host {1}".format(bucket.name,
                                                                                self.backupset.restore_cluster_host.ip))
                 rest_conn.create_bucket(bucket=bucket.name,
                                         ramQuotaMB=512,
                                         authType=bucket.authType if bucket.authType else 'none',
                                         proxyPort=bucket.port,
                                         saslPassword=bucket.saslPassword)
                 bucket_ready = rest_helper.vbucket_map_ready(bucket.name)
                 if not bucket_ready:
                     self.fail("Bucket %s not created after 120 seconds." % bucket.name)
     remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
     command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, args)
     output, error = remote_client.execute_command(command)
     remote_client.log_command_output(output, error)
     return output, error
开发者ID:EricACooper,项目名称:testrunner,代码行数:60,代码来源:enterprise_backup_restore_base.py

示例7: wait_for_bucket_creation

# 需要导入模块: from membase.api.rest_client import RestHelper [as 别名]
# 或者: from membase.api.rest_client.RestHelper import bucket_exists [as 别名]
 def wait_for_bucket_creation(bucket, rest, timeout_in_seconds=120):
     log.info("waiting for bucket creation to complete....")
     start = time.time()
     helper = RestHelper(rest)
     while (time.time() - start) <= timeout_in_seconds:
         if helper.bucket_exists(bucket):
             return True
         else:
             time.sleep(2)
     return False
开发者ID:paul-guo-,项目名称:appstack,代码行数:12,代码来源:rebalance_helper.py

示例8: wait_for_bucket_deletion

# 需要导入模块: from membase.api.rest_client import RestHelper [as 别名]
# 或者: from membase.api.rest_client.RestHelper import bucket_exists [as 别名]
 def wait_for_bucket_deletion(bucket,
                              rest,
                              timeout_in_seconds=120):
     log = logger.Logger.get_logger()
     log.info('waiting for bucket deletion to complete....')
     start = time.time()
     helper = RestHelper(rest)
     while (time.time() - start) <= timeout_in_seconds:
         if not helper.bucket_exists(bucket):
             return True
         else:
             time.sleep(2)
     return False
开发者ID:membase,项目名称:testrunner,代码行数:15,代码来源:bucket_helper.py

示例9: PerfBase

# 需要导入模块: from membase.api.rest_client import RestHelper [as 别名]
# 或者: from membase.api.rest_client.RestHelper import bucket_exists [as 别名]

#.........这里部分代码省略.........
    def set_up_cluster(self, master):
        """Initialize cluster"""

        print "[perf.setUp] Setting up cluster"

        self.rest.init_cluster(master.rest_username, master.rest_password)

        memory_quota = self.parami('mem_quota', PerfDefaults.mem_quota)
        self.rest.init_cluster_memoryQuota(master.rest_username,
                                           master.rest_password,
                                           memoryQuota=memory_quota)

    def set_up_buckets(self):
        """Set up data bucket(s)"""

        print "[perf.setUp] Setting up buckets"

        num_buckets = self.parami('num_buckets', 1)
        if num_buckets > 1:
            self.buckets = ['bucket-{0}'.format(i) for i in range(num_buckets)]
        else:
            self.buckets = [self.param('bucket', 'default')]

        for bucket in self.buckets:
            bucket_ram_quota = self.parami('mem_quota', PerfDefaults.mem_quota)
            bucket_ram_quota = bucket_ram_quota / num_buckets
            replicas = self.parami('replicas', getattr(self, 'replicas', 1))

            self.rest.create_bucket(bucket=bucket, ramQuotaMB=bucket_ram_quota,
                                    replicaNumber=replicas, authType='sasl')

            status = self.rest_helper.vbucket_map_ready(bucket, 60)
            self.assertTrue(status, msg='vbucket_map not ready .. timed out')
            status = self.rest_helper.bucket_exists(bucket)
            self.assertTrue(status,
                            msg='unable to create {0} bucket'.format(bucket))

    def reconfigure(self):
        """Customize basic Couchbase setup"""

        print "[perf.setUp] Customizing setup"

        self.set_loglevel()
        self.set_max_concurrent_reps_per_doc()
        self.set_autocompaction()

    def set_loglevel(self):
        """Set custom loglevel"""

        loglevel = self.param('loglevel', None)
        if loglevel:
            self.rest.set_global_loglevel(loglevel)

    def set_max_concurrent_reps_per_doc(self):
        """Set custom MAX_CONCURRENT_REPS_PER_DOC"""

        max_concurrent_reps_per_doc = self.param('max_concurrent_reps_per_doc',
                                                 None)
        if max_concurrent_reps_per_doc:
            for server in self.input.servers:
                rc = RemoteMachineShellConnection(server)
                rc.set_environment_variable('MAX_CONCURRENT_REPS_PER_DOC',
                                            max_concurrent_reps_per_doc)

    def set_ep_compaction(self, comp_ratio):
        """Set up ep_engine side compaction ratio"""
开发者ID:IrynaMironava,项目名称:testrunner,代码行数:70,代码来源:perf.py

示例10: backup_restore

# 需要导入模块: from membase.api.rest_client import RestHelper [as 别名]
# 或者: from membase.api.rest_client.RestHelper import bucket_exists [as 别名]
 def backup_restore(self):
     try:
         backup_start = self.backups[int(self.backupset.start) - 1]
     except IndexError:
         backup_start = "{0}{1}".format(self.backups[-1], self.backupset.start)
     try:
         backup_end = self.backups[int(self.backupset.end) - 1]
     except IndexError:
         backup_end = "{0}{1}".format(self.backups[-1], self.backupset.end)
     args = (
         "restore --archive {0} --repo {1} {2} http://{3}:{4} --username {5} "
         "--password {6} --start {7} --end {8}".format(
             self.backupset.directory,
             self.backupset.name,
             self.cluster_flag,
             self.backupset.restore_cluster_host.ip,
             self.backupset.restore_cluster_host.port,
             self.backupset.restore_cluster_host_username,
             self.backupset.restore_cluster_host_password,
             backup_start,
             backup_end,
         )
     )
     if self.backupset.exclude_buckets:
         args += " --exclude-buckets {0}".format(self.backupset.exclude_buckets)
     if self.backupset.include_buckets:
         args += " --include-buckets {0}".format(self.backupset.include_buckets)
     if self.backupset.disable_bucket_config:
         args += " --disable-bucket-config {0}".format(self.backupset.disable_bucket_config)
     if self.backupset.disable_views:
         args += " --disable-views {0}".format(self.backupset.disable_views)
     if self.backupset.disable_gsi_indexes:
         args += " --disable-gsi-indexes {0}".format(self.backupset.disable_gsi_indexes)
     if self.backupset.disable_ft_indexes:
         args += " --disable-ft-indexes {0}".format(self.backupset.disable_ft_indexes)
     if self.backupset.disable_data:
         args += " --disable-data {0}".format(self.backupset.disable_data)
     if self.backupset.disable_conf_res_restriction is not None:
         args += " --disable-conf-res-restriction {0}".format(self.backupset.disable_conf_res_restriction)
     if self.backupset.filter_keys:
         args += " --filter_keys {0}".format(self.backupset.filter_keys)
     if self.backupset.filter_values:
         args += " --filter_values {0}".format(self.backupset.filter_values)
     if self.backupset.force_updates:
         args += " --force-updates"
     if self.no_progress_bar:
         args += " --no-progress-bar"
     if not self.skip_buckets:
         rest_conn = RestConnection(self.backupset.restore_cluster_host)
         rest_helper = RestHelper(rest_conn)
         for bucket in self.buckets:
             if not rest_helper.bucket_exists(bucket.name):
                 self.log.info(
                     "Creating bucket {0} in restore host {1}".format(
                         bucket.name, self.backupset.restore_cluster_host.ip
                     )
                 )
                 rest_conn.create_bucket(
                     bucket=bucket.name,
                     ramQuotaMB=512,
                     authType=bucket.authType if bucket.authType else "none",
                     proxyPort=bucket.port,
                     saslPassword=bucket.saslPassword,
                     lww=self.lww_new,
                 )
                 bucket_ready = rest_helper.vbucket_map_ready(bucket.name)
                 if not bucket_ready:
                     self.fail("Bucket %s not created after 120 seconds." % bucket.name)
     remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
     command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, args)
     output, error = remote_client.execute_command(command)
     remote_client.log_command_output(output, error)
     res = output
     res.extend(error)
     error_str = "Error restoring cluster: Transfer failed. Check the logs for more information."
     if error_str in res:
         command = "cat " + self.backupset.directory + "/logs/backup.log | grep '" + error_str + "' -A 10 -B 100"
         output, error = remote_client.execute_command(command)
         remote_client.log_command_output(output, error)
     if "Required Flags:" in res:
         self.fail("Command line failed. Please check test params.")
     return output, error
开发者ID:membase,项目名称:testrunner,代码行数:84,代码来源:enterprise_backup_restore_base.py

示例11: NewUpgradeBaseTest

# 需要导入模块: from membase.api.rest_client import RestHelper [as 别名]
# 或者: from membase.api.rest_client.RestHelper import bucket_exists [as 别名]

#.........这里部分代码省略.........
    def _install(self, servers):
        params = {}
        params['num_nodes'] = len(servers)
        params['product'] = self.product
        params['version'] = self.initial_version
        params['vbuckets'] = [self.initial_vbuckets]
        InstallerJob().parallel_install(servers, params)
        if self.product in ["couchbase", "couchbase-server", "cb"]:
            success = True
            for server in servers:
                success &= RemoteMachineShellConnection(server).is_couchbase_installed()
                if not success:
                    self.log.info("some nodes were not install successfully!")
                    sys.exit(1)

    def operations(self, multi_nodes=False):
        self.quota = self._initialize_nodes(self.cluster, self.servers, self.disabled_consistent_view)
        self.buckets = []
        gc.collect()
        if self.total_buckets > 0:
            self.bucket_size = self._get_bucket_size(self.quota, self.total_buckets)

        if self.default_bucket:
            self.cluster.create_default_bucket(self.master, self.bucket_size, self.num_replicas)
            self.buckets.append(Bucket(name="default", authType="sasl", saslPassword="",
                                       num_replicas=self.num_replicas, bucket_size=self.bucket_size))

        self._create_sasl_buckets(self.master, self.sasl_buckets)
        self._create_standard_buckets(self.master, self.standard_buckets)
        if multi_nodes:
            servers_in = [self.servers[i+1] for i in range(self.initial_num_servers-1)]
            self.cluster.rebalance(self.servers[:1], servers_in, [])
        if self.op_types == "data":
            self._load_data_all_buckets("create")
            if multi_nodes:
                self._wait_for_stats_all_buckets(self.servers[:self.initial_num_servers])
            else:
                self._wait_for_stats_all_buckets([self.master])

    def _load_data_all_buckets(self, op_type='create', start=0):
        loaded = False
        count = 0
        gen_load = BlobGenerator('upgrade-', 'upgrade-', self.data_size, start=start, end=self.num_items)
        while not loaded and count < 60:
            try :
                self._load_all_buckets(self.master, gen_load, op_type, self.expire_time, 1,
                                       self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
                loaded = True
            except MemcachedError as error:
                if error.status == 134:
                    loaded = False
                    self.log.error("Memcached error 134, wait for 5 seconds and then try again")
                    count += 1
                    time.sleep(self.sleep_time)

    def _get_build(self, server, version, remote, is_amazon=False):
        info = remote.extract_remote_info()
        builds, changes = BuildQuery().get_all_builds()
        self.log.info("finding build %s for machine %s" % (version, server))
        result = re.search('r', version)

        if result is None:
            appropriate_build = BuildQuery().\
                find_membase_release_build('%s-enterprise' % (self.product), info.deliverable_type,
                                           info.architecture_type, version.strip(), is_amazon=is_amazon)
        else:
            appropriate_build = BuildQuery().\
                find_membase_build(builds, '%s-enterprise' % (self.product), info.deliverable_type,
                                   info.architecture_type, version.strip(), is_amazon=is_amazon)

        return appropriate_build

    def _upgrade(self, upgrade_version, server, remote):
        appropriate_build = self._get_build(server, upgrade_version, remote)
        self.assertTrue(appropriate_build.url, msg="unable to find build {0}".format(upgrade_version))
        remote.download_build(appropriate_build)
        remote.membase_upgrade(appropriate_build, save_upgrade_config=False)
        self.rest_helper.is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)
        self.rest.init_cluster_port(self.rest_settings.rest_username, self.rest_settings.rest_password)
        time.sleep(self.sleep_time)

    def verification(self, multi_nodes=False):
        for bucket in self.buckets:
            if self.rest_helper.bucket_exists(bucket.name):
                continue
            else:
                raise Exception("bucket:- %s not found" % bucket.name)
            if self.op_types == "bucket":
                bucketinfo = self.rest.get_bucket(bucket.name)
                self.log.info("bucket info :- %s" % bucketinfo)

        if self.op_types == "data":
            if multi_nodes:
                self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
                self._verify_all_buckets(self.master, 1, self.wait_timeout*50, self.max_verify, True, 1)
                self._verify_stats_all_buckets(self.servers[:self.num_servers])
            else:
                self._wait_for_stats_all_buckets([self.master])
                self._verify_all_buckets(self.master, 1, self.wait_timeout*50, self.max_verify, True, 1)
                self._verify_stats_all_buckets([self.master])
开发者ID:mschoch,项目名称:testrunner,代码行数:104,代码来源:newupgradebasetest.py

示例12: MultiNodesUpgradeTests

# 需要导入模块: from membase.api.rest_client import RestHelper [as 别名]
# 或者: from membase.api.rest_client.RestHelper import bucket_exists [as 别名]
class MultiNodesUpgradeTests(NewUpgradeBaseTest):
    def setUp(self):
        super(MultiNodesUpgradeTests, self).setUp()
        if self.initial_version.startswith("1.6") or self.initial_version.startswith("1.7"):
            self.product = "membase-server"
        else:
            self.product = "couchbase-server"
        self.initial_num_servers = self.input.param("initial_num_servers", 2)

    def tearDown(self):
        super(MultiNodesUpgradeTests, self).tearDown()

    def offline_cluster_upgrade(self):
        self._install(self.servers[: self.initial_num_servers])
        self.operations(multi_nodes=True)
        upgrade_versions = self.input.param("upgrade_version", "2.0.0-1870-rel")
        upgrade_versions = upgrade_versions.split(";")
        self.log.info("Installation done going to sleep for %s sec", self.sleep_time)
        time.sleep(self.sleep_time)
        for upgrade_version in upgrade_versions:
            for server in self.servers[: self.initial_num_servers]:
                remote = RemoteMachineShellConnection(server)
                remote.stop_server()
                time.sleep(self.sleep_time)
                remote.disconnect()
            for server in self.servers[: self.initial_num_servers]:
                remote = RemoteMachineShellConnection(server)
                self._upgrade(upgrade_version, server, remote)
                time.sleep(self.sleep_time)
                remote.disconnect()
            time.sleep(self.expire_time)
            self.num_servers = self.initial_num_servers
            self.verification(multi_nodes=True)

    def online_upgrade_rebalance_in_out(self):
        self._install(self.servers[: self.initial_num_servers])
        self.operations(multi_nodes=True)
        self.log.info("Installation of old version is done. Wait for %s sec for upgrade" % (self.sleep_time))
        time.sleep(self.sleep_time)
        upgrade_version = self.input.param("upgrade_version", "2.0.0-1870-rel")
        self.initial_version = upgrade_version
        self.product = "couchbase-server"
        self._install(self.servers[self.initial_num_servers : self.num_servers])
        self.log.info("Installation of new version is done. Wait for %s sec for rebalance" % (self.sleep_time))
        time.sleep(self.sleep_time)

        servers_in = self.servers[self.initial_num_servers : self.num_servers]
        self.cluster.rebalance(self.servers[: self.initial_num_servers], servers_in, [])
        self.log.info("Rebalance in all 2.0 Nodes")
        time.sleep(self.sleep_time)
        status, content = ClusterHelper.find_orchestrator(self.master)
        self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".format(status, content))
        FIND_MASTER = False
        for new_server in servers_in:
            if content.find(new_server.ip) >= 0:
                FIND_MASTER = True
                self.log.info("2.0 Node %s becomes the master" % (new_server.ip))
        if not FIND_MASTER:
            raise Exception("After rebalance in 2.0 Nodes, 2.0 doesn't become the master")

        servers_out = self.servers[: self.initial_num_servers]
        self.cluster.rebalance(self.servers[: self.num_servers], [], servers_out)
        self.log.info("Rebalance out all old version nodes")
        time.sleep(self.sleep_time)
        self.verify_upgrade_rebalance_in_out()

    def verify_upgrade_rebalance_in_out(self):
        self.master = self.servers[self.initial_num_servers]
        self.rest = RestConnection(self.master)
        self.rest_helper = RestHelper(self.rest)
        for bucket in self.buckets:
            if self.rest_helper.bucket_exists(bucket.name):
                continue
            else:
                raise Exception("bucket:- %s not found" % bucket.name)
        if self.op_types == "bucket":
            bucketinfo = self.rest.get_bucket(bucket.name)
            self.log.info("bucket info :- %s" % bucketinfo)
        if self.op_types == "data":
            self._wait_for_stats_all_buckets(self.servers[self.initial_num_servers : self.num_servers])
            self._verify_all_buckets(self.master, 1, self.wait_timeout * 50, self.max_verify, True, 1)
            self._verify_stats_all_buckets(self.servers[self.initial_num_servers : self.num_servers])

    def online_upgrade_swap_rebalance(self):
        self._install(self.servers[: self.initial_num_servers])
        self.operations(multi_nodes=True)
        self.log.info("Installation of old version is done. Wait for %s sec for upgrade" % (self.sleep_time))
        time.sleep(self.sleep_time)
        upgrade_version = self.input.param("upgrade_version", "2.0.0-1870-rel")
        self.initial_version = upgrade_version
        self.product = "couchbase-server"
        self._install(self.servers[self.initial_num_servers : self.num_servers])
        self.log.info("Installation of new version is done. Wait for %s sec for rebalance" % (self.sleep_time))
        time.sleep(self.sleep_time)

        self.swap_num_servers = self.input.param("swap_num_servers", 1)
        old_servers = self.servers[: self.initial_num_servers]
        new_servers = []
        for i in range(self.initial_num_servers / self.swap_num_servers):
            servers_in = self.servers[
#.........这里部分代码省略.........
开发者ID:mschoch,项目名称:testrunner,代码行数:103,代码来源:newupgradetests.py


注:本文中的membase.api.rest_client.RestHelper.bucket_exists方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。