当前位置: 首页>>代码示例>>Python>>正文


Python RebalanceHelper.wait_for_stats_on_all方法代码示例

本文整理汇总了Python中membase.helper.rebalance_helper.RebalanceHelper.wait_for_stats_on_all方法的典型用法代码示例。如果您正苦于以下问题:Python RebalanceHelper.wait_for_stats_on_all方法的具体用法?Python RebalanceHelper.wait_for_stats_on_all怎么用?Python RebalanceHelper.wait_for_stats_on_all使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在membase.helper.rebalance_helper.RebalanceHelper的用法示例。


在下文中一共展示了RebalanceHelper.wait_for_stats_on_all方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _verify_data

# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import wait_for_stats_on_all [as 别名]
 def _verify_data(self, master, rest, inserted_keys):
     log = logger.Logger.get_logger()
     log.info("Verifying data")
     ready = RebalanceHelper.wait_for_stats_on_all(master, "default", "ep_queue_size", 0)
     self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
     ready = RebalanceHelper.wait_for_stats_on_all(master, "default", "ep_flusher_todo", 0)
     self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
     BucketOperationHelper.keys_exist_or_assert(keys=inserted_keys, server=master, bucket_name="default", test=self)
开发者ID:strategist922,项目名称:testrunner,代码行数:10,代码来源:upgradetests.py

示例2: verify_data

# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import wait_for_stats_on_all [as 别名]
 def verify_data(master, inserted_keys, bucket, test):
     test.log.info("Verifying data")
     ready = RebalanceHelper.wait_for_stats_on_all(master, bucket, 'ep_queue_size', 0)
     test.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
     ready = RebalanceHelper.wait_for_stats_on_all(master, bucket, 'ep_flusher_todo', 0)
     test.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
     BucketOperationHelper.keys_exist_or_assert_in_parallel(keys=inserted_keys, server=master, \
         bucket_name=bucket, test=test, concurrency=4)
开发者ID:steveyen,项目名称:testrunner,代码行数:10,代码来源:swaprebalance.py

示例3: _test_delete_key_and_backup_and_restore_body

# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import wait_for_stats_on_all [as 别名]
    def _test_delete_key_and_backup_and_restore_body(self):
        bucket = "default"
        BucketOperationHelper.create_bucket(serverInfo=self.master, name=bucket, test_case=self)
        ready = BucketOperationHelper.wait_for_memcached(self.master, bucket)
        self.assertTrue(ready, "wait_for_memcached failed")

        self.add_nodes_and_rebalance()

        client = MemcachedClientHelper.direct_client(self.master, "default")
        expiry = 2400
        test_uuid = uuid.uuid4()
        keys = ["key_%s_%d" % (test_uuid, i) for i in range(500)]
        self.log.info("pushing keys with expiry set to {0}".format(expiry))
        for key in keys:
            try:
                client.set(key, expiry, 0, "1")
            except mc_bin_client.MemcachedError as error:
                msg = "unable to push key : {0} to bucket : {1} error : {2}"
                self.log.error(msg.format(key, client.vbucketId, error.status))
                self.fail(msg.format(key, client.vbucketId, error.status))
        self.log.info("inserted {0} keys with expiry set to {1}".format(len(keys), expiry))

        client.delete(keys[0])

        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")

        #let's create a unique folder in the remote location
        for server in self.servers:
            shell = RemoteMachineShellConnection(server)
            output, error = shell.execute_command(self.perm_command)
            shell.log_command_output(output, error)
            node = RestConnection(server).get_nodes_self()
            BackupHelper(server, self).backup(bucket, node, self.remote_tmp_folder)
            shell.disconnect()

        for server in self.servers:
            BackupHelper(server, self).restore(self.remote_tmp_folder)
            time.sleep(10)

        self.log.info('verifying that all those keys...')
        missing_keys = []
        verify_keys = []
        for key in keys:
            vBucketId = crc32.crc32_hash(key) & 1023  # or & 0x3FF
            client.vbucketId = vBucketId
            if key == keys[0]:
                missing_keys.append(key)
            else:
                verify_keys.append(key)

        self.assertTrue(BucketOperationHelper.keys_dont_exist(self.master, missing_keys, self),
                        "Keys are not empty")
        self.assertTrue(BucketOperationHelper.verify_data(self.master, verify_keys, False, False, 11210, self),
                        "Missing keys")
开发者ID:jchris,项目名称:testrunner,代码行数:59,代码来源:backuptests.py

示例4: _test_backup_and_restore_bucket_overwriting_body

# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import wait_for_stats_on_all [as 别名]
    def _test_backup_and_restore_bucket_overwriting_body(self, overwrite_flag=True):
        bucket = "default"
        BucketOperationHelper.create_bucket(serverInfo=self.master, test_case=self)
        BucketOperationHelper.wait_for_memcached(self.master, bucket)
        self.add_nodes_and_rebalance()

        client = MemcachedClientHelper.direct_client(self.master, "default")
        expiry = 2400
        test_uuid = uuid.uuid4()
        keys = ["key_%s_%d" % (test_uuid, i) for i in range(500)]
        self.log.info("pushing keys with expiry set to {0}".format(expiry))
        for key in keys:
            try:
                client.set(key, expiry, 0, "1")
            except mc_bin_client.MemcachedError as error:
                msg = "unable to push key : {0} to bucket : {1} error : {2}"
                self.log.error(msg.format(key, client.vbucketId, error.status))
                self.fail(msg.format(key, client.vbucketId, error.status))
        self.log.info("inserted {0} keys with expiry set to {1}".format(len(keys), expiry))

        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")

        for server in self.servers:
            shell = RemoteMachineShellConnection(server)

            output, error = shell.execute_command(self.perm_command)
            shell.log_command_output(output, error)
            node = RestConnection(server).get_nodes_self()
            BackupHelper(server, self).backup(bucket, node, self.remote_tmp_folder)
            shell.disconnect()

        for key in keys:
            try:
                client.replace(key, expiry, 0, "2")
            except mc_bin_client.MemcachedError as error:
                msg = "unable to replace key : {0} in bucket : {1} error : {2}"
                self.log.error(msg.format(key, client.vbucketId, error.status))
                self.fail(msg.format(key, client.vbucketId, error.status))
        self.log.info("replaced {0} keys with expiry set to {1}".format(len(keys), expiry))

        for server in self.servers:
            BackupHelper(server, self).restore(self.remote_tmp_folder, overwrite_flag)
            time.sleep(10)

        self.log.info('verifying that all those keys...')
        for key in keys:
            if overwrite_flag:
                self.assertEqual("2", client.get(key=key), key + " should has value = 2")
            else:
                self.assertNotEqual("2", client.get(key=key), key + " should not has value = 2")
        self.log.info("verified that those keys inserted with expiry set to {0} have expired".format(expiry))
开发者ID:jchris,项目名称:testrunner,代码行数:56,代码来源:backuptests.py

示例5: wait_until_warmed_up

# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import wait_for_stats_on_all [as 别名]
    def wait_until_warmed_up(self, master=None):
        if not master:
            master = self.input.servers[0]

        bucket = self.param("bucket", "default")

        fn = RebalanceHelper.wait_for_mc_stats_no_timeout
        for bucket in self.buckets:
            RebalanceHelper.wait_for_stats_on_all(master, bucket,
                                                  'ep_warmup_thread',
                                                  'complete', fn=fn)
开发者ID:IrynaMironava,项目名称:testrunner,代码行数:13,代码来源:perf.py

示例6: load_data

# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import wait_for_stats_on_all [as 别名]
 def load_data(self, master, bucket, keys_count):
     log = logger.Logger.get_logger()
     inserted_keys_cnt = 0
     while inserted_keys_cnt < keys_count:
         keys_cnt, rejected_keys_cnt = MemcachedClientHelper.load_bucket(
             servers=[master], name=bucket, number_of_items=keys_count, number_of_threads=5, write_only=True
         )
         inserted_keys_cnt += keys_cnt
     log.info("wait until data is completely persisted on the disk")
     RebalanceHelper.wait_for_stats_on_all(master, bucket, "ep_queue_size", 0)
     RebalanceHelper.wait_for_stats_on_all(master, bucket, "ep_flusher_todo", 0)
     return inserted_keys_cnt
开发者ID:jason-hou,项目名称:testrunner,代码行数:14,代码来源:autofailovertests.py

示例7: _test_cluster_topology_change_body

# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import wait_for_stats_on_all [as 别名]
    def _test_cluster_topology_change_body(self):
        bucket = "default"
        BucketOperationHelper.create_bucket(serverInfo=self.master, test_case=self)
        ready = BucketOperationHelper.wait_for_memcached(self.master, bucket)
        self.assertTrue(ready, "wait_for_memcached failed")
        self.add_nodes_and_rebalance()

        rest = RestConnection(self.master)

        distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}

        inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[self.master],
                                                                                             ram_load_ratio=1,
                                                                                             value_size_distribution=distribution,
                                                                                             moxi=True,
                                                                                             write_only=True,
                                                                                             number_of_threads=2)

        self.log.info("Sleep after data load")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")

        #let's create a unique folder in the remote location
        for server in self.servers:
            shell = RemoteMachineShellConnection(server)
            output, error = shell.execute_command(self.perm_command)
            shell.log_command_output(output, error)
            node = RestConnection(server).get_nodes_self()
            BackupHelper(server, self).backup(bucket, node, self.remote_tmp_folder)
            shell.disconnect()

        ClusterOperationHelper.cleanup_cluster(self.servers)
        BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)

        servers = []
        for i in range(0, len(self.servers) - 1):
            servers.append(self.servers[i])

        self.add_node_and_rebalance(servers[0], servers)

        BucketOperationHelper.delete_bucket_or_assert(self.master, bucket, self)
        BucketOperationHelper.create_bucket(serverInfo=self.master, test_case=self)

        ready = BucketOperationHelper.wait_for_memcached(self.master, bucket)
        self.assertTrue(ready, "wait_for_memcached failed")

        for server in self.servers:
            BackupHelper(server, self).restore(self.remote_tmp_folder)
            time.sleep(10)

        BucketOperationHelper.verify_data(self.master, inserted_keys, False, False, 11210, self)
开发者ID:steveyen,项目名称:testrunner,代码行数:55,代码来源:backuptests.py

示例8: load_data

# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import wait_for_stats_on_all [as 别名]
 def load_data(master, bucket, keys_count=-1, load_ratio=-1):
     log = logger.Logger.get_logger()
     inserted_keys, rejected_keys =\
     MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[master],
                                                           name=bucket,
                                                           ram_load_ratio=load_ratio,
                                                           number_of_items=keys_count,
                                                           number_of_threads=2,
                                                           write_only=True)
     log.info("wait until data is completely persisted on the disk")
     RebalanceHelper.wait_for_stats_on_all(master, bucket, 'ep_queue_size', 0)
     RebalanceHelper.wait_for_stats_on_all(master, bucket, 'ep_flusher_todo', 0)
     return inserted_keys
开发者ID:jchris,项目名称:testrunner,代码行数:15,代码来源:failovertests.py

示例9: wait_until_repl

# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import wait_for_stats_on_all [as 别名]
    def wait_until_repl(self):
        print "[perf.repl] waiting for replication: %s"\
            % time.strftime(PerfDefaults.strftime)

        master = self.input.servers[0]
        bucket = self.param("bucket", "default")

        RebalanceHelper.wait_for_stats_on_all(master, bucket,
            'vb_replica_queue_size', 0,
            fn=RebalanceHelper.wait_for_stats_no_timeout)

        RebalanceHelper.wait_for_stats_on_all(master, bucket,
            'ep_tap_replica_queue_itemondisk', 0,
            fn=RebalanceHelper.wait_for_stats_no_timeout)

        RebalanceHelper.wait_for_stats_on_all(master, bucket,
            'ep_tap_rebalance_queue_backfillremaining', 0,
            fn=RebalanceHelper.wait_for_stats_no_timeout)

        RebalanceHelper.wait_for_stats_on_all(master, bucket,
            'ep_tap_replica_qlen', 0,
            fn=RebalanceHelper.wait_for_stats_no_timeout)

        print "[perf.repl] replication is done: %s"\
            % time.strftime(PerfDefaults.strftime)
开发者ID:IrynaMironava,项目名称:testrunner,代码行数:27,代码来源:perf.py

示例10: _test_backup_and_restore_from_to_different_buckets

# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import wait_for_stats_on_all [as 别名]
    def _test_backup_and_restore_from_to_different_buckets(self):
        bucket_before_backup = "bucket_before_backup"
        bucket_after_backup = "bucket_after_backup"
        BucketOperationHelper.create_bucket(serverInfo=self.master, name=bucket_before_backup, port=11212,
                                            test_case=self)
        ready = BucketOperationHelper.wait_for_memcached(self.master, bucket_before_backup)
        self.assertTrue(ready, "wait_for_memcached failed")

        self.add_nodes_and_rebalance()

        distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}
        inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[self.master],
                                                                                             name=bucket_before_backup,
                                                                                             ram_load_ratio=20,
                                                                                             value_size_distribution=distribution,
                                                                                             write_only=True,
                                                                                             moxi=True,
                                                                                             number_of_threads=2)

        self.log.info("Sleep after data load")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket_before_backup, 'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket_before_backup, 'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")

        for server in self.servers:
            shell = RemoteMachineShellConnection(server)
            output, error = shell.execute_command(self.perm_command)
            shell.log_command_output(output, error)
            node = RestConnection(server).get_nodes_self()
            BackupHelper(server, self).backup(bucket_before_backup, node, self.remote_tmp_folder)
            shell.disconnect()

        BucketOperationHelper.delete_bucket_or_assert(self.master, bucket_before_backup, self)
        BucketOperationHelper.create_bucket(serverInfo=self.master, name=bucket_after_backup, port=11212,
                                            test_case=self)
        ready = BucketOperationHelper.wait_for_memcached(self.master, bucket_after_backup)
        self.assertTrue(ready, "wait_for_memcached failed")

        for server in self.servers:
            BackupHelper(server, self).restore(self.remote_tmp_folder, moxi_port=11212)
            time.sleep(10)

        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket_after_backup, 'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket_after_backup, 'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        self.assertTrue(BucketOperationHelper.verify_data(self.master, inserted_keys, False, False, 11212, debug=False,
                                                          bucket=bucket_after_backup), "Missing keys")
开发者ID:jchris,项目名称:testrunner,代码行数:51,代码来源:backuptests.py

示例11: _test_backup_add_restore_bucket_with_expiration_key

# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import wait_for_stats_on_all [as 别名]
    def _test_backup_add_restore_bucket_with_expiration_key(self, replica):
        bucket = "default"
        rest = RestConnection(self.master)
        info = rest.get_nodes_self()
        size = int(info.memoryQuota * 2.0 / 3.0)
        rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=info.moxi, replicaNumber=replica)
        BucketOperationHelper.wait_for_memcached(self.master, bucket)
        client = MemcachedClientHelper.direct_client(self.master, bucket)
        expiry = 60
        test_uuid = uuid.uuid4()
        keys = ["key_%s_%d" % (test_uuid, i) for i in range(5000)]
        self.log.info("pushing keys with expiry set to {0}".format(expiry))
        for key in keys:
            try:
                client.set(key, expiry, 0, key)
            except mc_bin_client.MemcachedError as error:
                msg = "unable to push key : {0} to bucket : {1} error : {2}"
                self.log.error(msg.format(key, client.vbucketId, error.status))
                self.fail(msg.format(key, client.vbucketId, error.status))
        client.close()
        self.log.info("inserted {0} keys with expiry set to {1}".format(len(keys), expiry))
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        node = RestConnection(self.master).get_nodes_self()

        output, error = self.shell.execute_command(self.perm_command)
        self.shell.log_command_output(output, error)
        backupHelper = BackupHelper(self.master, self)
        backupHelper.backup(bucket, node, self.remote_tmp_folder)

        BucketOperationHelper.delete_bucket_or_assert(self.master, bucket, self)
        rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=info.moxi)
        BucketOperationHelper.wait_for_memcached(self.master, bucket)
        backupHelper.restore(self.remote_tmp_folder)
        time.sleep(60)
        client = MemcachedClientHelper.direct_client(self.master, bucket)
        self.log.info('verifying that all those keys have expired...')
        for key in keys:
            try:
                client.get(key=key)
                msg = "expiry was set to {0} but key: {1} did not expire after waiting for {2}+ seconds"
                self.fail(msg.format(expiry, key, expiry))
            except mc_bin_client.MemcachedError as error:
                self.assertEquals(error.status, 1,
                                  msg="expected error code {0} but saw error code {1}".format(1, error.status))
        client.close()
        self.log.info("verified that those keys inserted with expiry set to {0} have expired".format(expiry))
开发者ID:jchris,项目名称:testrunner,代码行数:51,代码来源:backuptests.py

示例12: load_data

# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import wait_for_stats_on_all [as 别名]
 def load_data(master, bucket, keys_count= -1, load_ratio= -1, delete_ratio=0, expiry_ratio=0, test=None):
     log = logger.Logger.get_logger()
     inserted_keys, rejected_keys = \
     MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[master],
         name=bucket,
         ram_load_ratio=load_ratio,
         number_of_items=keys_count,
         number_of_threads=2,
         write_only=True,
         delete_ratio=delete_ratio,
         expiry_ratio=expiry_ratio,
         moxi=True)
     log.info("wait until data is completely persisted on the disk")
     ready = RebalanceHelper.wait_for_stats_on_all(master, bucket, 'ep_queue_size', 0, timeout_in_seconds=120)
     test.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
     ready = RebalanceHelper.wait_for_stats_on_all(master, bucket, 'ep_flusher_todo', 0, timeout_in_seconds=120)
     test.assertTrue(ready, "wait_for ep_flusher_todo == 0 failed")
     return inserted_keys
开发者ID:ronniedada,项目名称:testrunner,代码行数:20,代码来源:rebalancetests.py

示例13: wait_until_drained

# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import wait_for_stats_on_all [as 别名]
    def wait_until_drained(self):
        print "[perf.drain] draining disk write queue : %s"\
            % time.strftime(PerfDefaults.strftime)

        master = self.input.servers[0]
        bucket = self.param("bucket", "default")

        RebalanceHelper.wait_for_stats_on_all(master, bucket,
                                              'ep_queue_size', 0,
                                              fn=RebalanceHelper.wait_for_stats_no_timeout)
        RebalanceHelper.wait_for_stats_on_all(master, bucket,
                                              'ep_flusher_todo', 0,
                                              fn=RebalanceHelper.wait_for_stats_no_timeout)

        print "[perf.drain] disk write queue has been drained: %s"\
            % time.strftime(PerfDefaults.strftime)

        return time.time()
开发者ID:IrynaMironava,项目名称:testrunner,代码行数:20,代码来源:perf.py

示例14: load_data

# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import wait_for_stats_on_all [as 别名]
 def load_data(self, master, bucket, keys_count):
     inserted_keys_cnt = 0
     repeat_count = 0
     while inserted_keys_cnt < keys_count and repeat_count < 5:
         keys_cnt, rejected_keys_cnt = \
         MemcachedClientHelper.load_bucket(servers=[master],
             name=bucket,
             number_of_items=keys_count,
             number_of_threads=5,
             write_only=True)
         inserted_keys_cnt += keys_cnt
         if keys_cnt == 0:
             repeat_count += 1
         else:
             repeat_count = 0
     if repeat_count == 5:
         log.exception("impossible to load data")
     log.info("wait until data is completely persisted on the disk")
     RebalanceHelper.wait_for_stats_on_all(master, bucket, 'ep_queue_size', 0)
     RebalanceHelper.wait_for_stats_on_all(master, bucket, 'ep_flusher_todo', 0)
     return inserted_keys_cnt
开发者ID:bcui6611,项目名称:testrunner,代码行数:23,代码来源:autofailovertests.py

示例15: _test_backup_add_restore_bucket_body

# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import wait_for_stats_on_all [as 别名]
    def _test_backup_add_restore_bucket_body(self,
                                             bucket,
                                             delay_after_data_load,
                                             startup_flag,
                                             single_node):
        server = self.master
        rest = RestConnection(server)
        info = rest.get_nodes_self()
        size = int(info.memoryQuota * 2.0 / 3.0)
        if bucket == "default":
            rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=info.moxi)
        else:
            proxyPort = info.moxi + 500
            rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=proxyPort,
                               authType="sasl", saslPassword="password")

        ready = BucketOperationHelper.wait_for_memcached(server, bucket)
        self.assertTrue(ready, "wait_for_memcached failed")
        if not single_node:
            self.add_nodes_and_rebalance()
        distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}
        inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[self.master],
                                                                                             name=bucket,
                                                                                             ram_load_ratio=1,
                                                                                             value_size_distribution=distribution,
                                                                                             moxi=True,
                                                                                             write_only=True,
                                                                                             number_of_threads=2)

        if not single_node:
            rest = RestConnection(self.master)
            self.assertTrue(RestHelper(rest).wait_for_replication(180), msg="replication did not complete")

        self.log.info("Sleep {0} seconds after data load".format(delay_after_data_load))
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        node = RestConnection(self.master).get_nodes_self()
        if not startup_flag:
            for server in self.servers:
                shell = RemoteMachineShellConnection(server)
                shell.stop_membase()
                shell.stop_couchbase()
                shell.disconnect()

        output, error = self.shell.execute_command(self.perm_command)
        self.shell.log_command_output(output, error)

        #now let's back up
        BackupHelper(self.master, self).backup(bucket, node, self.remote_tmp_folder)

        if not startup_flag:
            for server in self.servers:
                shell = RemoteMachineShellConnection(server)
                shell.start_membase()
                shell.start_couchbase()
                RestHelper(RestConnection(server)).is_ns_server_running()
                shell.disconnect()

        BucketOperationHelper.delete_bucket_or_assert(self.master, bucket, self)

        if bucket == "default":
            rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=info.moxi)
        else:
            proxyPort = info.moxi + 500
            rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=proxyPort,
                               authType="sasl", saslPassword="password")
        BucketOperationHelper.wait_for_memcached(self.master, bucket)

        if bucket == "default":
            BackupHelper(self.master, self).restore(backup_location=self.remote_tmp_folder, moxi_port=info.moxi)
        else:
            BackupHelper(self.master, self).restore(backup_location=self.remote_tmp_folder, moxi_port=info.moxi, username=bucket, password='password')

        keys_exist = BucketOperationHelper.keys_exist_or_assert_in_parallel(inserted_keys, self.master, bucket, self, concurrency=4)
        self.assertTrue(keys_exist, msg="unable to verify keys after restore")
开发者ID:jchris,项目名称:testrunner,代码行数:79,代码来源:backuptests.py


注:本文中的membase.helper.rebalance_helper.RebalanceHelper.wait_for_stats_on_all方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。