当前位置: 首页>>代码示例>>Python>>正文


Python BucketOperationHelper.delete_bucket_or_assert方法代码示例

本文整理汇总了Python中membase.helper.bucket_helper.BucketOperationHelper.delete_bucket_or_assert方法的典型用法代码示例。如果您正苦于以下问题:Python BucketOperationHelper.delete_bucket_or_assert方法的具体用法?Python BucketOperationHelper.delete_bucket_or_assert怎么用?Python BucketOperationHelper.delete_bucket_or_assert使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在membase.helper.bucket_helper.BucketOperationHelper的用法示例。


在下文中一共展示了BucketOperationHelper.delete_bucket_or_assert方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_oom_delete_bucket

# 需要导入模块: from membase.helper.bucket_helper import BucketOperationHelper [as 别名]
# 或者: from membase.helper.bucket_helper.BucketOperationHelper import delete_bucket_or_assert [as 别名]
 def test_oom_delete_bucket(self):
     """
     1. Get OOM
     2. Delete a bucket
     3. Verify if state of indexes is changed
     :return:
     """
     self.assertTrue(self._push_indexer_off_the_cliff(), "OOM Can't be achieved")
     for i in range(len(self.buckets)):
         log.info("Deleting bucket {0}...".format(self.buckets[i].name))
         BucketOperationHelper.delete_bucket_or_assert(serverInfo=self.oomServer, bucket=self.buckets[i].name)
         self.sleep(120)
         check = self._validate_indexer_status_oom()
         if not check:
             if i < len(self.buckets):
                 self.buckets = self.buckets[i+1:]
             else:
                 #TODO: Pras: Need better solution here
                 self.buckets = []
             break
         log.info("Indexer Still in OOM...")
     self.sleep(120)
     self.assertFalse(self._validate_indexer_status_oom(), "Indexer still in OOM")
     self._verify_bucket_count_with_index_count(self.load_query_definitions)
     self.multi_query_using_index(buckets=self.buckets,
                     query_definitions=self.load_query_definitions)
开发者ID:chethanrao,项目名称:testrunner-archive,代码行数:28,代码来源:memdb_oom_2i.py

示例2: _create_plasma_buckets

# 需要导入模块: from membase.helper.bucket_helper import BucketOperationHelper [as 别名]
# 或者: from membase.helper.bucket_helper.BucketOperationHelper import delete_bucket_or_assert [as 别名]
 def _create_plasma_buckets(self):
     for bucket in self.buckets:
         if bucket.name.startswith("standard"):
             BucketOperationHelper.delete_bucket_or_assert(
                 serverInfo=self.dgmServer, bucket=bucket.name)
     self.buckets = [bu for bu in self.buckets if not bu.name.startswith("standard")]
     buckets = []
     for i in range(self.num_plasma_buckets):
         name = "plasma_dgm_" + str(i)
         buckets.append(name)
     bucket_size = self._get_bucket_size(self.quota,
                                         len(self.buckets)+len(buckets))
     self._create_buckets(server=self.master, bucket_list=buckets,
                          bucket_size=bucket_size)
     testuser = []
     rolelist = []
     for bucket in buckets:
         testuser.append({'id': bucket, 'name': bucket, 'password': 'password'})
         rolelist.append({'id': bucket, 'name': bucket, 'roles': 'admin'})
     self.add_built_in_server_user(testuser=testuser, rolelist=rolelist)
     buckets = []
     for bucket in self.buckets:
         if bucket.name.startswith("plasma_dgm"):
             buckets.append(bucket)
     return buckets
开发者ID:arod1987,项目名称:testrunner,代码行数:27,代码来源:plasma_data_size.py

示例3: test_create_query_drop_bucket

# 需要导入模块: from membase.helper.bucket_helper import BucketOperationHelper [as 别名]
# 或者: from membase.helper.bucket_helper.BucketOperationHelper import delete_bucket_or_assert [as 别名]
 def test_create_query_drop_bucket(self):
     self.multi_create_index_using_rest(buckets=self.buckets, query_definitions=self.query_definitions)
     log.info("Deleting bucket {0}...".format(self.buckets[0]))
     BucketOperationHelper.delete_bucket_or_assert(serverInfo=self.restServer, bucket=self.buckets[0].name)
     log.info("Performing Full Table Scan...")
     for query_definition in self.query_definitions:
         self.run_full_table_scan_using_rest(self.buckets[0], query_definition)
     self.multi_drop_index_using_rest(buckets=self.buckets, query_definitions=self.query_definitions)
开发者ID:prasanna135,项目名称:testrunner,代码行数:10,代码来源:array_index_2i.py

示例4: _test_cluster_topology_change_body

# 需要导入模块: from membase.helper.bucket_helper import BucketOperationHelper [as 别名]
# 或者: from membase.helper.bucket_helper.BucketOperationHelper import delete_bucket_or_assert [as 别名]
    def _test_cluster_topology_change_body(self):
        bucket = "default"
        BucketOperationHelper.create_bucket(serverInfo=self.master, test_case=self)
        ready = BucketOperationHelper.wait_for_memcached(self.master, bucket)
        self.assertTrue(ready, "wait_for_memcached failed")
        self.add_nodes_and_rebalance()

        rest = RestConnection(self.master)

        distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}

        inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[self.master],
                                                                                             ram_load_ratio=1,
                                                                                             value_size_distribution=distribution,
                                                                                             moxi=True,
                                                                                             write_only=True,
                                                                                             number_of_threads=2)

        self.log.info("Sleep after data load")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")

        #let's create a unique folder in the remote location
        for server in self.servers:
            shell = RemoteMachineShellConnection(server)
            output, error = shell.execute_command(self.perm_command)
            shell.log_command_output(output, error)
            node = RestConnection(server).get_nodes_self()
            BackupHelper(server, self).backup(bucket, node, self.remote_tmp_folder)
            shell.disconnect()

        ClusterOperationHelper.cleanup_cluster(self.servers)
        BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)

        servers = []
        for i in range(0, len(self.servers) - 1):
            servers.append(self.servers[i])

        self.add_node_and_rebalance(servers[0], servers)

        BucketOperationHelper.delete_bucket_or_assert(self.master, bucket, self)
        BucketOperationHelper.create_bucket(serverInfo=self.master, test_case=self)

        ready = BucketOperationHelper.wait_for_memcached(self.master, bucket)
        self.assertTrue(ready, "wait_for_memcached failed")

        for server in self.servers:
            BackupHelper(server, self).restore(self.remote_tmp_folder)
            time.sleep(10)

        BucketOperationHelper.verify_data(self.master, inserted_keys, False, False, 11210, self)
开发者ID:steveyen,项目名称:testrunner,代码行数:55,代码来源:backuptests.py

示例5: test_create_query_drop_bucket

# 需要导入模块: from membase.helper.bucket_helper import BucketOperationHelper [as 别名]
# 或者: from membase.helper.bucket_helper.BucketOperationHelper import delete_bucket_or_assert [as 别名]
 def test_create_query_drop_bucket(self):
     secExpr = ["ALL DISTINCT {0}".format(self.index_field)]
     id = self._create_rest_array_index("index_name_1", self.buckets[0], secExpr)
     self.assertIsNotNone(id, "Array Index is not created.")
     log.info("Array Index index_name_1 on field Countries is created.")
     log.info("Performing Full Table Scan...")
     body = {'stale': 'ok'}
     content = self.rest.full_table_scan_gsi_index_with_rest(id, body)
     self.assertIsNotNone(content, "Table Scan not performed")
     log.info("Deleting bucket {0}...".format(self.buckets[0]))
     BucketOperationHelper.delete_bucket_or_assert(serverInfo=self.restServer, bucket=self.buckets[0].name)
     self.sleep(10)
     self.assertIsNone(self._check_index_status(id, "index_name_1"), "Index still exists after dropping the bucket.")
开发者ID:chethanrao,项目名称:testrunner-archive,代码行数:15,代码来源:array_index_2i.py

示例6: _test_backup_and_restore_from_to_different_buckets

# 需要导入模块: from membase.helper.bucket_helper import BucketOperationHelper [as 别名]
# 或者: from membase.helper.bucket_helper.BucketOperationHelper import delete_bucket_or_assert [as 别名]
    def _test_backup_and_restore_from_to_different_buckets(self):
        bucket_before_backup = "bucket_before_backup"
        bucket_after_backup = "bucket_after_backup"
        BucketOperationHelper.create_bucket(serverInfo=self.master, name=bucket_before_backup, port=11212,
                                            test_case=self)
        ready = BucketOperationHelper.wait_for_memcached(self.master, bucket_before_backup)
        self.assertTrue(ready, "wait_for_memcached failed")

        self.add_nodes_and_rebalance()

        distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}
        inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[self.master],
                                                                                             name=bucket_before_backup,
                                                                                             ram_load_ratio=20,
                                                                                             value_size_distribution=distribution,
                                                                                             write_only=True,
                                                                                             moxi=True,
                                                                                             number_of_threads=2)

        self.log.info("Sleep after data load")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket_before_backup, 'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket_before_backup, 'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")

        for server in self.servers:
            shell = RemoteMachineShellConnection(server)
            output, error = shell.execute_command(self.perm_command)
            shell.log_command_output(output, error)
            node = RestConnection(server).get_nodes_self()
            BackupHelper(server, self).backup(bucket_before_backup, node, self.remote_tmp_folder)
            shell.disconnect()

        BucketOperationHelper.delete_bucket_or_assert(self.master, bucket_before_backup, self)
        BucketOperationHelper.create_bucket(serverInfo=self.master, name=bucket_after_backup, port=11212,
                                            test_case=self)
        ready = BucketOperationHelper.wait_for_memcached(self.master, bucket_after_backup)
        self.assertTrue(ready, "wait_for_memcached failed")

        for server in self.servers:
            BackupHelper(server, self).restore(self.remote_tmp_folder, moxi_port=11212)
            time.sleep(10)

        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket_after_backup, 'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket_after_backup, 'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        self.assertTrue(BucketOperationHelper.verify_data(self.master, inserted_keys, False, False, 11212, debug=False,
                                                          bucket=bucket_after_backup), "Missing keys")
开发者ID:jchris,项目名称:testrunner,代码行数:51,代码来源:backuptests.py

示例7: _test_backup_add_restore_bucket_with_expiration_key

# 需要导入模块: from membase.helper.bucket_helper import BucketOperationHelper [as 别名]
# 或者: from membase.helper.bucket_helper.BucketOperationHelper import delete_bucket_or_assert [as 别名]
    def _test_backup_add_restore_bucket_with_expiration_key(self, replica):
        bucket = "default"
        rest = RestConnection(self.master)
        info = rest.get_nodes_self()
        size = int(info.memoryQuota * 2.0 / 3.0)
        rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=info.moxi, replicaNumber=replica)
        BucketOperationHelper.wait_for_memcached(self.master, bucket)
        client = MemcachedClientHelper.direct_client(self.master, bucket)
        expiry = 60
        test_uuid = uuid.uuid4()
        keys = ["key_%s_%d" % (test_uuid, i) for i in range(5000)]
        self.log.info("pushing keys with expiry set to {0}".format(expiry))
        for key in keys:
            try:
                client.set(key, expiry, 0, key)
            except mc_bin_client.MemcachedError as error:
                msg = "unable to push key : {0} to bucket : {1} error : {2}"
                self.log.error(msg.format(key, client.vbucketId, error.status))
                self.fail(msg.format(key, client.vbucketId, error.status))
        client.close()
        self.log.info("inserted {0} keys with expiry set to {1}".format(len(keys), expiry))
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        node = RestConnection(self.master).get_nodes_self()

        output, error = self.shell.execute_command(self.perm_command)
        self.shell.log_command_output(output, error)
        backupHelper = BackupHelper(self.master, self)
        backupHelper.backup(bucket, node, self.remote_tmp_folder)

        BucketOperationHelper.delete_bucket_or_assert(self.master, bucket, self)
        rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=info.moxi)
        BucketOperationHelper.wait_for_memcached(self.master, bucket)
        backupHelper.restore(self.remote_tmp_folder)
        time.sleep(60)
        client = MemcachedClientHelper.direct_client(self.master, bucket)
        self.log.info('verifying that all those keys have expired...')
        for key in keys:
            try:
                client.get(key=key)
                msg = "expiry was set to {0} but key: {1} did not expire after waiting for {2}+ seconds"
                self.fail(msg.format(expiry, key, expiry))
            except mc_bin_client.MemcachedError as error:
                self.assertEquals(error.status, 1,
                                  msg="expected error code {0} but saw error code {1}".format(1, error.status))
        client.close()
        self.log.info("verified that those keys inserted with expiry set to {0} have expired".format(expiry))
开发者ID:jchris,项目名称:testrunner,代码行数:51,代码来源:backuptests.py

示例8: test_delete_bucket_while_index_build

# 需要导入模块: from membase.helper.bucket_helper import BucketOperationHelper [as 别名]
# 或者: from membase.helper.bucket_helper.BucketOperationHelper import delete_bucket_or_assert [as 别名]
 def test_delete_bucket_while_index_build(self):
     create_index_task = []
     index_list = []
     self.defer_build=True
     for bucket in self.buckets:
         for query_definition in self.query_definitions:
             create_index_task.append(self.async_create_index(bucket.name, query_definition))
             index_list.append(query_definition.index_name)
     for task in create_index_task:
         task.result()
     try:
         for bucket in self.buckets:
             build_task = self.async_build_index(bucket, index_list)
             log.info("Deleting bucket {0}".format(bucket.name))
             BucketOperationHelper.delete_bucket_or_assert(serverInfo=self.master, bucket=bucket.name)
             build_task.result()
     except Exception as ex:
         msg = "Keyspace not found keyspace"
         self.assertIn(msg, str(ex), str(ex))
         log.info("Error while building index Expected...")
开发者ID:EricACooper,项目名称:testrunner,代码行数:22,代码来源:indexcreatedrop_2i.py

示例9: test_non_default_moxi

# 需要导入模块: from membase.helper.bucket_helper import BucketOperationHelper [as 别名]
# 或者: from membase.helper.bucket_helper.BucketOperationHelper import delete_bucket_or_assert [as 别名]
 def test_non_default_moxi(self):
     name = "new-bucket-{0}".format(uuid.uuid4())
     for serverInfo in self.servers:
         replicas = [0, 1, 2, 3]
         for replicaNumber in replicas:
             rest = RestConnection(serverInfo)
             proxyPort = rest.get_nodes_self().moxi + 2000
             rest.create_bucket(bucket=name, ramQuotaMB=200, replicaNumber=replicaNumber, proxyPort=proxyPort)
             remote = RemoteMachineShellConnection(serverInfo)
             msg = "create_bucket succeeded but bucket {0} does not exist".format(name)
             self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(name, rest), msg=msg)
             rest.delete_bucket(name)
             msg = 'bucket "{0}" was not deleted even after waiting for 30 seconds'.format(name)
             self.assertTrue(
                 BucketOperationHelper.wait_for_bucket_deletion(name, rest, timeout_in_seconds=30), msg=msg
             )
             msg = "bucket {0} data files are not deleted after bucket deleted from membase".format(name)
             self.assertTrue(
                 self.wait_for_data_files_deletion(name, remote_connection=remote, rest=rest, timeout_in_seconds=20),
                 msg=msg,
             )
             BucketOperationHelper.delete_bucket_or_assert(serverInfo, name, self)
开发者ID:vmx,项目名称:testrunner,代码行数:24,代码来源:deletebuckettests.py

示例10: _test_backup_add_restore_bucket_body

# 需要导入模块: from membase.helper.bucket_helper import BucketOperationHelper [as 别名]
# 或者: from membase.helper.bucket_helper.BucketOperationHelper import delete_bucket_or_assert [as 别名]
    def _test_backup_add_restore_bucket_body(self, bucket="default", port_no = 11211, delay_after_data_load=0, startup_flag = True):

        self.remote_tmp_folder = "/tmp/{0}-{1}".format("mbbackuptestdefaultbucket", uuid.uuid4())
        master = self.servers[0]

        node = RestConnection(master).get_nodes_self()
        BucketOperationHelper.delete_bucket_or_assert(master, bucket, self)
        BucketOperationHelper.create_bucket(serverInfo=master, name=bucket, replica=1, port=port_no, test_case=self)
        keys = BucketOperationHelper.load_some_data(master, bucket_name=bucket, test = self)

        if not startup_flag:
            self.shell.stop_membase()
        else:
            self.log.info("Sleep {0} seconds after data load".format(delay_after_data_load))
            time.sleep(delay_after_data_load)

        #let's create a unique folder in the remote location
        output, error = self.shell.execute_command("mkdir -p {0}".format(self.remote_tmp_folder))
        self.shell.log_command_output(output,error)

        #now let's back up
        BackupHelper(master, self).backup(bucket, node, self.remote_tmp_folder)

        if not startup_flag:
            self.shell.start_membase()

        BucketOperationHelper.delete_bucket_or_assert(master, bucket, self)
        BucketOperationHelper.create_bucket(serverInfo=master, name=bucket, replica=1, port=port_no, test_case=self)

        if not startup_flag:
            self.shell.stop_membase()

        BackupHelper(master, self).restore(self.remote_tmp_folder)

        if not startup_flag:
            self.shell.start_membase()

        BucketOperationHelper.verify_data(master.ip, keys, False, False, port_no, self)
开发者ID:jchris,项目名称:testrunner,代码行数:40,代码来源:backupandrestoretests.py

示例11: _test_backup_add_restore_bucket_body

# 需要导入模块: from membase.helper.bucket_helper import BucketOperationHelper [as 别名]
# 或者: from membase.helper.bucket_helper.BucketOperationHelper import delete_bucket_or_assert [as 别名]
    def _test_backup_add_restore_bucket_body(self,
                                             bucket,
                                             delay_after_data_load,
                                             startup_flag,
                                             single_node):
        server = self.master
        rest = RestConnection(server)
        info = rest.get_nodes_self()
        size = int(info.memoryQuota * 2.0 / 3.0)
        if bucket == "default":
            rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=info.moxi)
        else:
            proxyPort = info.moxi + 500
            rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=proxyPort,
                               authType="sasl", saslPassword="password")

        ready = BucketOperationHelper.wait_for_memcached(server, bucket)
        self.assertTrue(ready, "wait_for_memcached failed")
        if not single_node:
            self.add_nodes_and_rebalance()
        distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}
        inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[self.master],
                                                                                             name=bucket,
                                                                                             ram_load_ratio=1,
                                                                                             value_size_distribution=distribution,
                                                                                             moxi=True,
                                                                                             write_only=True,
                                                                                             number_of_threads=2)

        if not single_node:
            rest = RestConnection(self.master)
            self.assertTrue(RestHelper(rest).wait_for_replication(180), msg="replication did not complete")

        self.log.info("Sleep {0} seconds after data load".format(delay_after_data_load))
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        node = RestConnection(self.master).get_nodes_self()
        if not startup_flag:
            for server in self.servers:
                shell = RemoteMachineShellConnection(server)
                shell.stop_membase()
                shell.stop_couchbase()
                shell.disconnect()

        output, error = self.shell.execute_command(self.perm_command)
        self.shell.log_command_output(output, error)

        #now let's back up
        BackupHelper(self.master, self).backup(bucket, node, self.remote_tmp_folder)

        if not startup_flag:
            for server in self.servers:
                shell = RemoteMachineShellConnection(server)
                shell.start_membase()
                shell.start_couchbase()
                RestHelper(RestConnection(server)).is_ns_server_running()
                shell.disconnect()

        BucketOperationHelper.delete_bucket_or_assert(self.master, bucket, self)

        if bucket == "default":
            rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=info.moxi)
        else:
            proxyPort = info.moxi + 500
            rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=proxyPort,
                               authType="sasl", saslPassword="password")
        BucketOperationHelper.wait_for_memcached(self.master, bucket)

        if bucket == "default":
            BackupHelper(self.master, self).restore(backup_location=self.remote_tmp_folder, moxi_port=info.moxi)
        else:
            BackupHelper(self.master, self).restore(backup_location=self.remote_tmp_folder, moxi_port=info.moxi, username=bucket, password='password')

        keys_exist = BucketOperationHelper.keys_exist_or_assert_in_parallel(inserted_keys, self.master, bucket, self, concurrency=4)
        self.assertTrue(keys_exist, msg="unable to verify keys after restore")
开发者ID:jchris,项目名称:testrunner,代码行数:79,代码来源:backuptests.py

示例12: test_backup_upgrade_restore_default

# 需要导入模块: from membase.helper.bucket_helper import BucketOperationHelper [as 别名]
# 或者: from membase.helper.bucket_helper.BucketOperationHelper import delete_bucket_or_assert [as 别名]
    def test_backup_upgrade_restore_default(self):
        if len(self.servers) < 2:
            self.log.error("At least 2 servers required for this test ..")
            return
        original_set = copy.copy(self.servers)
        worker = self.servers[len(self.servers) - 1]
        self.servers = self.servers[:len(self.servers)-1]
        shell = RemoteMachineShellConnection(self.master)
        o, r = shell.execute_command("cat /opt/couchbase/VERSION.txt")
        fin = o[0]
        shell.disconnect()
        initial_version = self.input.param("initial_version", fin)
        final_version = self.input.param("final_version", fin)
        if initial_version==final_version:
            self.log.error("Same initial and final versions ..")
            return
        if not final_version.startswith('2.0'):
            self.log.error("Upgrade test not set to run from 1.8.1 -> 2.0 ..")
            return
        builds, changes = BuildQuery().get_all_builds(version=final_version)
        product = 'couchbase-server-enterprise'
        #CASE where the worker isn't a 2.0+
        worker_flag = 0
        shell = RemoteMachineShellConnection(worker)
        o, r = shell.execute_command("cat /opt/couchbase/VERSION.txt")
        temp = o[0]
        if not temp.startswith('2.0'):
            worker_flag = 1
        if worker_flag == 1:
            self.log.info("Loading version {0} on worker.. ".format(final_version))
            remote = RemoteMachineShellConnection(worker)
            info = remote.extract_remote_info()
            older_build = BuildQuery().find_build(builds, product, info.deliverable_type,
                                                  info.architecture_type, final_version)
            remote.stop_couchbase()
            remote.couchbase_uninstall()
            remote.download_build(older_build)
            remote.install_server(older_build)
            remote.disconnect()

        remote_tmp = "{1}/{0}".format("backup", "/root")
        perm_comm = "mkdir -p {0}".format(remote_tmp)
        if not initial_version == fin:
            for server in self.servers:
                remote = RemoteMachineShellConnection(server)
                info = remote.extract_remote_info()
                self.log.info("Loading version ..  {0}".format(initial_version))
                older_build = BuildQuery().find_build(builds, product, info.deliverable_type,
                                                      info.architecture_type, initial_version)
                remote.stop_couchbase()
                remote.couchbase_uninstall()
                remote.download_build(older_build)
                remote.install_server(older_build)
                rest = RestConnection(server)
                RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)
                rest.init_cluster(server.rest_username, server.rest_password)
                rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
                remote.disconnect()

        self.common_setUp()
        bucket = "default"
        if len(self.servers) > 1:
            self.add_nodes_and_rebalance()
        rest = RestConnection(self.master)
        info = rest.get_nodes_self()
        size = int(info.memoryQuota * 2.0 / 3.0)
        rest.create_bucket(bucket, ramQuotaMB=size)
        ready = BucketOperationHelper.wait_for_memcached(self.master, bucket)
        self.assertTrue(ready, "wait_for_memcached_failed")
        distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}
        inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[self.master],
                                                                                             name=bucket,
                                                                                             ram_load_ratio=0.5,
                                                                                             value_size_distribution=distribution,
                                                                                             moxi=True,
                                                                                             write_only=True,
                                                                                             delete_ratio=0.1,
                                                                                             number_of_threads=2)
        if len(self.servers) > 1:
            rest = RestConnection(self.master)
            self.assertTrue(RebalanceHelper.wait_for_replication(rest.get_nodes(), timeout=180),
                            msg="replication did not complete")

        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        node = RestConnection(self.master).get_nodes_self()
        shell = RemoteMachineShellConnection(worker)
        o, r = shell.execute_command(perm_comm)
        shell.log_command_output(o, r)
        shell.disconnect()

        #Backup
        #BackupHelper(self.master, self).backup(bucket, node, remote_tmp)
        shell = RemoteMachineShellConnection(worker)
        shell.execute_command("/opt/couchbase/bin/cbbackup http://{0}:{1} {2}".format(
                                                            self.master.ip, self.master.port, remote_tmp))
        shell.disconnect()
        BucketOperationHelper.delete_bucket_or_assert(self.master, bucket, self)
#.........这里部分代码省略.........
开发者ID:ashvindersingh,项目名称:testrunner,代码行数:103,代码来源:backuptests.py


注:本文中的membase.helper.bucket_helper.BucketOperationHelper.delete_bucket_or_assert方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。