当前位置: 首页>>代码示例>>Python>>正文


Python RemoteMachineShellConnection.execute_cbepctl方法代码示例

本文整理汇总了Python中remote.remote_util.RemoteMachineShellConnection.execute_cbepctl方法的典型用法代码示例。如果您正苦于以下问题:Python RemoteMachineShellConnection.execute_cbepctl方法的具体用法?Python RemoteMachineShellConnection.execute_cbepctl怎么用?Python RemoteMachineShellConnection.execute_cbepctl使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在remote.remote_util.RemoteMachineShellConnection的用法示例。


在下文中一共展示了RemoteMachineShellConnection.execute_cbepctl方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_upgrade

# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import execute_cbepctl [as 别名]
    def test_upgrade(self):
        self._install([self.master])
        self.operations([self.master])
        for upgrade_version in self.upgrade_versions:
            self.sleep(self.sleep_time, "Pre-setup of old version is done. Wait for upgrade to {0} version".\
                       format(upgrade_version))
            upgrade_threads = self._async_update(upgrade_version, [self.master])
            #wait upgrade statuses
            for upgrade_thread in upgrade_threads:
                upgrade_thread.join()
            success_upgrade = True
            while not self.queue.empty():
                success_upgrade &= self.queue.get()
            if not success_upgrade:
                self.fail("Upgrade failed!")


            self.sleep(self.expire_time)
#            if not self.is_linux:
#                self.wait_node_restarted(self.master, wait_time=1200, wait_if_warmup=True, check_service=True)
            remote = RemoteMachineShellConnection(self.master)
            for bucket in self.buckets:
                remote.execute_cbepctl(bucket, "", "set flush_param", "exp_pager_stime", 5)
            remote.disconnect()
            self.sleep(30)
            self.verification([self.master])
开发者ID:DavidAlphaFox,项目名称:couchbase,代码行数:28,代码来源:newupgradetests.py

示例2: rebalance_in_out_at_once_persistence_stopped

# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import execute_cbepctl [as 别名]
    def rebalance_in_out_at_once_persistence_stopped(self):
        num_nodes_with_stopped_persistence = self.input.param("num_nodes_with_stopped_persistence", 1)
        servs_init = self.servers[:self.nodes_init]
        servs_in = [self.servers[i + self.nodes_init] for i in range(self.nodes_in)]
        servs_out = [self.servers[self.nodes_init - i - 1] for i in range(self.nodes_out)]
        rest = RestConnection(self.master)
        self._wait_for_stats_all_buckets(servs_init)
        for server in servs_init[:min(num_nodes_with_stopped_persistence, self.nodes_init)]:
            shell = RemoteMachineShellConnection(server)
            for bucket in self.buckets:
                shell.execute_cbepctl(bucket, "stop", "", "", "")
        self.sleep(5)
        self.num_items_without_persistence = self.input.param("num_items_without_persistence", 100000)
        gen_extra = BlobGenerator('mike', 'mike-', self.value_size, start=self.num_items / 2\
                                      , end=self.num_items / 2 + self.num_items_without_persistence)
        self.log.info("current nodes : {0}".format([node.id for node in rest.node_statuses()]))
        self.log.info("adding nodes {0} to cluster".format(servs_in))
        self.log.info("removing nodes {0} from cluster".format(servs_out))
        tasks = self._async_load_all_buckets(self.master, gen_extra, "create", 0, batch_size=1000)
        result_nodes = set(servs_init + servs_in) - set(servs_out)
        # wait timeout in 60 min because MB-7386 rebalance stuck
        self.cluster.rebalance(servs_init[:self.nodes_init], servs_in, servs_out, timeout=self.wait_timeout * 60)
        for task in tasks:
            task.result()

        self._wait_for_stats_all_buckets(servs_init[:self.nodes_init - self.nodes_out], \
                                         ep_queue_size=self.num_items_without_persistence * 0.9, ep_queue_size_cond='>')
        self._wait_for_stats_all_buckets(servs_in)
        self._verify_all_buckets(self.master, timeout=None)
        self._verify_stats_all_buckets(result_nodes)
        #verify that curr_items_tot corresponds to sum of curr_items from all nodes
        verified = True
        for bucket in self.buckets:
            verified &= RebalanceHelper.wait_till_total_numbers_match(self.master, bucket)
        self.assertTrue(verified, "Lost items!!! Replication was completed but sum(curr_items) don't match the curr_items_total")
开发者ID:ashvindersingh,项目名称:testrunner,代码行数:37,代码来源:rebalanceinout.py

示例3: recover_to_cbserver

# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import execute_cbepctl [as 别名]
    def recover_to_cbserver(self):
        """Recover data with 2.0 couchstore files to a 2.0 online server

        We load a number of items to one node first and then do some mutation on these items.
        Later we use cbtranfer to transfer the couchstore files we have on this
        node to a new node. We verify the data by comparison between the items in KVStore
        and items in the new node."""

        self.load_data()

        kvs_before = {}
        bucket_names = []
        for bucket in self.buckets:
            kvs_before[bucket.name] = bucket.kvs[1]
            bucket_names.append(bucket.name)

        del self.buckets
        self.buckets = []
        if self.default_bucket:
            bucket_params = self._create_bucket_params(server=self.server_recovery, size=self.bucket_size,
                                                              replicas=self.num_replicas)
            self.cluster.create_default_bucket(bucket_params)
            self.buckets.append(Bucket(name="default", authType="sasl", saslPassword="", num_replicas=self.num_replicas, bucket_size=self.bucket_size))
        self._create_sasl_buckets(self.server_recovery, self.sasl_buckets)
        self._create_standard_buckets(self.server_recovery, self.standard_buckets)

        transfer_source = "couchstore-files://%s" % (COUCHBASE_DATA_PATH)
        if self.os == 'windows':
            output, error = self.shell.execute_command("taskkill /F /T /IM cbtransfer.exe")
            self.shell.log_command_output(output, error)
            self.shell.delete_files("/cygdrive/c%s" % self.win_data_location)
            self.shell.execute_command("mkdir /cygdrive/c%s" % self.win_data_location)
            self.shell.execute_command("cp -rf %s /cygdrive/c/tmp/" % (WIN_COUCHBASE_DATA_PATH))
            transfer_source = "couchstore-files://C:%s" % (self.win_data_location)
        for bucket in self.buckets:
            bucket.kvs[1] = kvs_before[bucket.name]
            transfer_destination = "http://%[email protected]%s:%s" % (self.couchbase_login_info,
                                                        self.server_recovery.ip,
                                                        self.server_recovery.port)
            self.shell.execute_cbtransfer(transfer_source, transfer_destination, "-b %s -B %s" % (bucket.name, bucket.name))
        del kvs_before

        time.sleep(self.expire_time + 1)
        shell_server_recovery = RemoteMachineShellConnection(self.server_recovery)
        for bucket in self.buckets:
            shell_server_recovery.execute_cbepctl(bucket, "", "set flush_param", "exp_pager_stime", 5)
        shell_server_recovery.disconnect()
        time.sleep(30)

        self._wait_for_stats_all_buckets([self.server_recovery])
        self._verify_all_buckets(self.server_recovery, 1, self.wait_timeout * 50, self.max_verify, True, 1)
        self._verify_stats_all_buckets([self.server_recovery])
开发者ID:arod1987,项目名称:testrunner,代码行数:54,代码来源:recoveryusetransfer.py

示例4: test_rebalance_in_out_at_once_persistence_stopped

# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import execute_cbepctl [as 别名]
    def test_rebalance_in_out_at_once_persistence_stopped(self):
        """
        PERFORMANCE:Rebalance in/out at once with stopped persistence.

        This test begins by loading a given number of items into the cluster with
        self.nodes_init nodes in it. Then we stop persistence on some nodes.
        Test starts  to update some data and load new data in the cluster.
        At that time we add  servs_in nodes and remove  servs_out nodes and start rebalance.
        After rebalance and data ops are completed we start verification phase:
        wait for the disk queues to drain, verify the number of items that were/or not persisted
        with expected values, verify that there has been no data loss,
        sum(curr_items) match the curr_items_total.Once All checks passed, test is finished.
        Available parameters by default are:
        nodes_init=1, nodes_in=1, nodes_out=1,num_nodes_with_stopped_persistence=1
        num_items_without_persistence=100000
        """
        num_nodes_with_stopped_persistence = self.input.param("num_nodes_with_stopped_persistence", 1)
        servs_init = self.servers[:self.nodes_init]
        servs_in = [self.servers[i + self.nodes_init] for i in range(self.nodes_in)]
        servs_out = [self.servers[self.nodes_init - i - 1] for i in range(self.nodes_out)]
        rest = RestConnection(self.master)
        self._wait_for_stats_all_buckets(servs_init)
        for server in servs_init[:min(num_nodes_with_stopped_persistence, self.nodes_init)]:
            shell = RemoteMachineShellConnection(server)
            for bucket in self.buckets:
                shell.execute_cbepctl(bucket, "stop", "", "", "")
        self.sleep(5)
        self.num_items_without_persistence = self.input.param("num_items_without_persistence", 100000)
        gen_extra = BlobGenerator('mike', 'mike-', self.value_size, start=self.num_items / 2,
                                  end=self.num_items / 2 + self.num_items_without_persistence)
        self.log.info("current nodes : {0}".format([node.id for node in rest.node_statuses()]))
        self.log.info("adding nodes {0} to cluster".format(servs_in))
        self.log.info("removing nodes {0} from cluster".format(servs_out))
        tasks = self._async_load_all_buckets(self.master, gen_extra, "create", 0, batch_size=1000)
        result_nodes = set(servs_init + servs_in) - set(servs_out)
        # wait timeout in 60 min because MB-7386 rebalance stuck
        self.cluster.rebalance(servs_init[:self.nodes_init], servs_in, servs_out, timeout=self.wait_timeout * 60)
        for task in tasks:
            task.result()

        self._wait_for_stats_all_buckets(servs_init[:self.nodes_init - self.nodes_out],
                                         ep_queue_size=self.num_items_without_persistence * 0.9, ep_queue_size_cond='>')
        self._wait_for_stats_all_buckets(servs_in)
        self._verify_all_buckets(self.master, timeout=None)
        self._verify_stats_all_buckets(result_nodes)
        # verify that curr_items_tot corresponds to sum of curr_items from all nodes
        verified = True
        for bucket in self.buckets:
            verified &= RebalanceHelper.wait_till_total_numbers_match(self.master, bucket)
        self.assertTrue(verified,
                        "Lost items!!! Replication was completed but sum(curr_items) don't match the curr_items_total")
        self.verify_unacked_bytes_all_buckets()
开发者ID:EricACooper,项目名称:testrunner,代码行数:54,代码来源:rebalanceinout.py

示例5: test_upgrade

# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import execute_cbepctl [as 别名]
    def test_upgrade(self):
        self._install([self.master])
        self.operations([self.master])
        self.sleep(self.sleep_time, "Pre-setup of old version is done. Wait for upgrade")
        for upgrade_version in self.upgrade_versions:
            self._upgrade(upgrade_version, self.master)
            self.sleep(self.expire_time)
#            if not self.is_linux:
#                self.wait_node_restarted(self.master, wait_time=1200, wait_if_warmup=True, check_service=True)
            remote = RemoteMachineShellConnection(self.master)
            for bucket in self.buckets:
                remote.execute_cbepctl(bucket, "", "set flush_param", "exp_pager_stime", 5)
            remote.disconnect()
            self.sleep(30)
            self.verification([self.master])
开发者ID:strategist922,项目名称:testrunner,代码行数:17,代码来源:newupgradetests.py

示例6: test_time_sync_threshold_setting

# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import execute_cbepctl [as 别名]
    def test_time_sync_threshold_setting(self):

        self.log.info("starting test_time_sync_threshold_setting")

        # bucket is created with lww in base test case using the LWW parameter

        # get the stats
        client = MemcachedClientHelper.direct_client(self.servers[0], self.buckets[0])
        ahead_threshold = int(client.stats()["ep_hlc_drift_ahead_threshold_us"])
        self.assertTrue(
            ahead_threshold == LWWStatsTests.DEFAULT_THRESHOLD,
            "Ahead threshold mismatch expected: {0} actual {1}".format(
                LWWStatsTests.DEFAULT_THRESHOLD, ahead_threshold
            ),
        )
        # change the setting and verify it is per the new setting - this may or may not be supported

        shell = RemoteMachineShellConnection(self.servers[0])
        output, error = shell.execute_cbepctl(
            self.buckets[0],
            "",
            "set vbucket_param",
            "hlc_drift_ahead_threshold_us ",
            str(LWWStatsTests.DEFAULT_THRESHOLD / 2) + LWWStatsTests.DUMMY_VBUCKET,
        )
        if len(error) > 0:
            self.fail("Failed to set the drift counter threshold, please check the logs.")

        ahead_threshold = int(client.stats()["ep_hlc_drift_ahead_threshold_us"])
        self.assertTrue(
            ahead_threshold == LWWStatsTests.DEFAULT_THRESHOLD / 2,
            "Ahead threshold mismatch expected: {0} actual {1}".format(
                LWWStatsTests.DEFAULT_THRESHOLD / 2, ahead_threshold
            ),
        )
开发者ID:membase,项目名称:testrunner,代码行数:37,代码来源:lww_stats.py

示例7: test_upgrade

# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import execute_cbepctl [as 别名]
 def test_upgrade(self):
     self._install([self.master])
     self.operations()
     upgrade_versions = self.input.param("upgrade_version", "2.0.0-1870-rel")
     upgrade_versions = upgrade_versions.split(";")
     self.log.info("Installation of old version is done. Wait for %s sec for upgrade" % (self.sleep_time))
     time.sleep(self.sleep_time)
     for upgrade_version in upgrade_versions:
         remote = RemoteMachineShellConnection(self.master)
         self._upgrade(upgrade_version, self.master, remote)
         time.sleep(self.expire_time)
         for bucket in self.buckets:
             remote.execute_cbepctl(bucket, "", "set flush_param", "exp_pager_stime", 5)
         time.sleep(30)
         remote.disconnect()
         self.verification()
开发者ID:mschoch,项目名称:testrunner,代码行数:18,代码来源:newupgradetests.py

示例8: _load_dgm

# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import execute_cbepctl [as 别名]
    def _load_dgm(self):
        generate_load = BlobGenerator('nosql', 'nosql-', self.value_size, end=self.num_items)
        self._load_all_buckets(self.master, generate_load, "create", 0, 1, 0, True, batch_size=20000, pause_secs=5, timeout_secs=180)
        self.load_gen_list.append(generate_load)

        stats_all_buckets = {}
        for bucket in self.buckets:
            stats_all_buckets[bucket.name] = StatsCommon()

        for bucket in self.buckets:
            threshold_reached = False
            while not threshold_reached :
                for server in self.servers:
                    active_resident = stats_all_buckets[bucket.name].get_stats([server], bucket, '', 'vb_active_perc_mem_resident')[server]
                    if int(active_resident) > self.active_resident_threshold:
                        self.log.info("resident ratio is %s greater than %s for %s in bucket %s. Continue loading to the cluster" %
                                      (active_resident, self.active_resident_threshold, server.ip, bucket.name))
                        random_key = self.key_generator()
                        generate_load = BlobGenerator(random_key, '%s-' % random_key, self.value_size, end=self.num_items)
                        tasks = self._async_load_all_buckets(self.master, generate_load, "create", 0, 1, 0, True, batch_size=20000, pause_secs=5, timeout_secs=180)
                        for task in tasks:
                            task.result()
                        self.load_gen_list.append(generate_load)
                    else:
                        threshold_reached = True
                        self.log.info("DGM state achieved for %s in bucket %s!" % (server.ip, bucket.name))
                        break


        if(self.doc_ops is not None):
            if("update" in self.doc_ops):
                for gen in self.load_gen_list[:int(len(self.load_gen_list) * 0.5)]:
                    self._load_all_buckets(self.master, gen, "update", 0, 1, 0, True, batch_size=20000, pause_secs=5, timeout_secs=180)
            if("delete" in self.doc_ops):
                for gen in self.load_gen_list[int(len(self.load_gen_list) * 0.5):]:
                    self._load_all_buckets(self.master, gen, "delete", 0, 1, 0, True, batch_size=20000, pause_secs=5, timeout_secs=180)
            if("expire" in self.doc_ops):
                for gen in self.load_gen_list[:int(len(self.load_gen_list) * 0.8)]:
                    self._load_all_buckets(self.master, gen, "update", self.expire_time, 1, 0, True, batch_size=20000, pause_secs=5, timeout_secs=180)
                time.sleep(self.expire_time * 2)

                for server in self.servers:
                    shell = RemoteMachineShellConnection(server)
                    for bucket in self.buckets:
                        shell.execute_cbepctl(bucket, "", "set flush_param", "exp_pager_stime", 5)
                    shell.disconnect()
                time.sleep(30)
开发者ID:ketakigangal,项目名称:cbsystest,代码行数:49,代码来源:warmuptest.py

示例9: _additional_ops

# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import execute_cbepctl [as 别名]
    def _additional_ops(self):
        generate_update = BlobGenerator('nosql', 'nosql-', self.value_size, end=self.num_items * 3)
        self._load_all_buckets(self.master, generate_update, "create", 0, 1, 0, True, batch_size=20000, pause_secs=5, timeout_secs=180)
        generate_delete = BlobGenerator('nosql', 'nosql-', self.value_size, end=self.num_items * 3)
        self._load_all_buckets(self.master, generate_delete, "create", 0, 1, 0, True, batch_size=20000, pause_secs=5, timeout_secs=180)
        generate_expire = BlobGenerator('nosql', 'nosql-', self.value_size, end=self.num_items * 3)
        self._load_all_buckets(self.master, generate_expire, "create", 0, 1, 0, True, batch_size=20000, pause_secs=5, timeout_secs=180)

        if(self.doc_ops is not None):
            if("update" in self.doc_ops):
                self._load_all_buckets(self.master, generate_update, "update", 0, 1, 0, True, batch_size=20000, pause_secs=5, timeout_secs=180)
            if("delete" in self.doc_ops):
                self._load_all_buckets(self.master, generate_delete, "delete", 0, 1, 0, True, batch_size=20000, pause_secs=5, timeout_secs=180)
            if("expire" in self.doc_ops):
                self._load_all_buckets(self.master, generate_expire, "update", self.expire_time, 1, 0, True, batch_size=20000, pause_secs=5, timeout_secs=180)
                self.sleep(self.expire_time + 10)

                for server in self.servers:
                    shell = RemoteMachineShellConnection(server)
                    for bucket in self.buckets:
                        shell.execute_cbepctl(bucket, "", "set flush_param", "exp_pager_stime", 5)
                    shell.disconnect()
                self.sleep(30)
开发者ID:ketakigangal,项目名称:testrunner,代码行数:25,代码来源:warmuptest.py

示例10: RecoveryUseTransferTests

# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import execute_cbepctl [as 别名]
class RecoveryUseTransferTests(TransferBaseTest):

    def setUp(self):
        self.times_teardown_called = 1
        super(RecoveryUseTransferTests, self).setUp()
        self.server_origin = self.servers[0]
        self.server_recovery = self.servers[1]
        self.shell = RemoteMachineShellConnection(self.server_origin)
        info = self.shell.extract_remote_info()
        self.os = info.type.lower()

    def tearDown(self):
        if not self.input.param("skip_cleanup", True):
            if self.times_teardown_called > 1 :
                if self.os == 'windows':
                    self.shell.delete_files("/cygdrive/c%s" % (self.backup_location))
                else:
                    self.shell.delete_files(self.backup_location)
                self.shell.disconnect()
                del self.buckets
        if self.input.param("skip_cleanup", True):
            if self.case_number > 1 or self.times_teardown_called > 1:
                if self.os == 'windows':
                    self.shell.delete_files("/cygdrive/c%s" % (self.backup_location))
                else:
                    self.shell.delete_files(self.backup_location)
                self.shell.disconnect()
                del self.buckets
        self.times_teardown_called += 1
        super(RecoveryUseTransferTests, self).tearDown()

    def recover_to_cbserver(self):
        """Recover data with 2.0 couchstore files to a 2.0 online server

        We load a number of items to one node first and then do some mutation on these items.
        Later we use cbtranfer to transfer the couchstore files we have on this
        node to a new node. We verify the data by comparison between the items in KVStore
        and items in the new node."""

        self.load_data()

        kvs_before = {}
        bucket_names = []
        for bucket in self.buckets:
            kvs_before[bucket.name] = bucket.kvs[1]
            bucket_names.append(bucket.name)

        if self.default_bucket:
            self.cluster.create_default_bucket(self.server_recovery, self.bucket_size, self.num_replicas)
            self.buckets.append(Bucket(name="default", authType="sasl", saslPassword="", num_replicas=self.num_replicas, bucket_size=self.bucket_size))
        self._create_sasl_buckets(self.server_recovery, self.sasl_buckets)
        self._create_standard_buckets(self.server_recovery, self.standard_buckets)

        for bucket in self.buckets:
            bucket.kvs[1] = kvs_before[bucket.name]
            transfer_source = "couchstore-files://%s" % (COUCHBASE_DATA_PATH)
            transfer_destination = "http://%[email protected]%s:%s -b %s -B %s -v -v -v" % (self.couchbase_login_info,
                                                                             self.server_recovery.ip,
                                                                             self.server_recovery.port,
                                                                             bucket.name, bucket.name)
            self.shell.execute_cbtransfer(transfer_source, transfer_destination)
        del kvs_before
        time.sleep(self.expire_time + 1)
        shell_server_recovery = RemoteMachineShellConnection(self.server_recovery)
        for bucket in self.buckets:
            shell_server_recovery.execute_cbepctl(bucket, "", "set flush_param", "exp_pager_stime", 5)
        shell_server_recovery.disconnect()
        time.sleep(30)

        self._wait_for_stats_all_buckets([self.server_recovery])
        self._verify_all_buckets(self.server_recovery, 1, self.wait_timeout * 50, self.max_verify, True, 1)
        self._verify_stats_all_buckets([self.server_recovery])

    def recover_to_backupdir(self):
        """Recover data with 2.0 couchstore files to a 2.0 backup diretory

        We load a number of items to a node first and then do some mutataion on these items.
        Later we use cbtransfer to transfer the couchstore files we have on this node to
        a backup directory. We use cbrestore to restore these backup files to the same node
        for verification."""

        self.load_data()

        kvs_before = {}
        bucket_names = []

        self.shell.delete_files(self.backup_location)
        self.shell.create_directory(self.backup_location)

        for bucket in self.buckets:
            kvs_before[bucket.name] = bucket.kvs[1]
            bucket_names.append(bucket.name)
            transfer_source = "-v -v -v couchstore-files://%s" % (COUCHBASE_DATA_PATH)
            transfer_destination = self.backup_location
            self.shell.execute_cbtransfer(transfer_source, transfer_destination)

        self._all_buckets_delete(self.server_origin)
        if self.default_bucket:
            self.cluster.create_default_bucket(self.server_origin, self.bucket_size, self.num_replicas)
            self.buckets.append(Bucket(name="default", authType="sasl", saslPassword="", num_replicas=self.num_replicas, bucket_size=self.bucket_size))
#.........这里部分代码省略.........
开发者ID:Boggypop,项目名称:testrunner,代码行数:103,代码来源:recoveryusetransfer.py

示例11: test_poisoned_cas

# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import execute_cbepctl [as 别名]
    def test_poisoned_cas(self):

        self.log.info("starting test_poisoned_cas")

        """
        - set the clock ahead
        - do lots of sets and get some CASs
        - do a set and get the CAS (flag, CAS, value) and save it
        - set the clock back
        - verify the CAS is still big on new sets
        - reset the CAS
        - do the vbucket max cas and verify
        - do a new mutation and verify the CAS is smaller


        """

        sdk_client = SDKClient(scheme="couchbase", hosts=[self.servers[0].ip], bucket=self.buckets[0].name)
        mc_client = MemcachedClientHelper.direct_client(self.servers[0], self.buckets[0])
        shell = RemoteMachineShellConnection(self.servers[0])

        # move the system clock ahead to poison the CAS
        shell = RemoteMachineShellConnection(self.servers[0])
        self.assertTrue(shell.change_system_time(LWWStatsTests.ONE_HOUR_IN_SECONDS), "Failed to advance the clock")

        output, error = shell.execute_command("date")
        self.log.info("Date after is set forward {0}".format(output))

        rc = sdk_client.set("key1", "val1")
        rc = mc_client.get("key1")
        poisoned_cas = rc[1]
        self.log.info("The poisoned CAS is {0}".format(poisoned_cas))

        # do lots of mutations to set the max CAS for all vbuckets

        gen_load = BlobGenerator("key-for-cas-test", "value-for-cas-test-", self.value_size, end=10000)
        self._load_all_buckets(self.master, gen_load, "create", 0)

        # move the clock back again and verify the CAS stays large
        self.assertTrue(shell.change_system_time(-LWWStatsTests.ONE_HOUR_IN_SECONDS), "Failed to change the clock")
        output, error = shell.execute_command("date")
        self.log.info("Date after is set backwards {0}".format(output))

        use_mc_bin_client = self.input.param("use_mc_bin_client", False)

        if use_mc_bin_client:
            rc = mc_client.set("key2", 0, 0, "val2")
            second_poisoned_cas = rc[1]
        else:
            rc = sdk_client.set("key2", "val2")
            second_poisoned_cas = rc.cas
        self.log.info("The second_poisoned CAS is {0}".format(second_poisoned_cas))
        self.assertTrue(
            second_poisoned_cas > poisoned_cas,
            "Second poisoned CAS {0} is not larger than the first poisoned cas".format(
                second_poisoned_cas, poisoned_cas
            ),
        )

        # reset the CAS for all vbuckets. This needs to be done in conjunction with a clock change. If the clock is not
        # changed then the CAS will immediately continue with the clock. I see two scenarios:
        # 1. Set the clock back 1 hours and the CAS back 30 minutes, the CAS should be used
        # 2. Set the clock back 1 hour, set the CAS back 2 hours, the clock should be use

        # do case 1, set the CAS back 30 minutes.  Calculation below assumes the CAS is in nanoseconds
        earlier_max_cas = poisoned_cas - 30 * 60 * 1000000000
        for i in range(self.vbuckets):
            output, error = shell.execute_cbepctl(
                self.buckets[0], "", "set_vbucket_param", "max_cas ", str(i) + " " + str(earlier_max_cas)
            )
            if len(error) > 0:
                self.fail("Failed to set the max cas")

        # verify the max CAS

        for i in range(self.vbuckets):
            max_cas = int(mc_client.stats("vbucket-details")["vb_" + str(i) + ":max_cas"])
            self.assertTrue(
                max_cas == earlier_max_cas,
                "Max CAS not properly set for vbucket {0} set as {1} and observed {2}".format(
                    i, earlier_max_cas, max_cas
                ),
            )
            self.log.info("Per cbstats the max cas for bucket {0} is {1}".format(i, max_cas))

        rc1 = sdk_client.set("key-after-resetting cas", "val1")
        rc2 = mc_client.get("key-after-resetting cas")
        set_cas_after_reset_max_cas = rc2[1]
        self.log.info("The later CAS is {0}".format(set_cas_after_reset_max_cas))
        self.assertTrue(
            set_cas_after_reset_max_cas < poisoned_cas,
            "For {0} CAS has not decreased. Current CAS {1} poisoned CAS {2}".format(
                "key-after-resetting cas", set_cas_after_reset_max_cas, poisoned_cas
            ),
        )

        # do a bunch of sets and verify the CAS is small - this is really only one set, need to do more

        gen_load = BlobGenerator(
            "key-for-cas-test-after-cas-is-reset", "value-for-cas-test-", self.value_size, end=1000
#.........这里部分代码省略.........
开发者ID:membase,项目名称:testrunner,代码行数:103,代码来源:lww_stats.py

示例12: _start_replication

# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import execute_cbepctl [as 别名]
 def _start_replication(self, server, bucket):
     shell = RemoteMachineShellConnection(server)
     shell.execute_cbepctl(self.bucket, "start", "", "", 0)
     shell.execute_cbepctl(self.bucket, "", "set tap_param", "tap_throttle_queue_cap", 1000000)
     shell.disconnect()
开发者ID:Boggypop,项目名称:testrunner,代码行数:7,代码来源:checkpoint.py


注:本文中的remote.remote_util.RemoteMachineShellConnection.execute_cbepctl方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。