本文整理汇总了Python中membase.helper.bucket_helper.BucketOperationHelper.keys_exist_or_assert_in_parallel方法的典型用法代码示例。如果您正苦于以下问题:Python BucketOperationHelper.keys_exist_or_assert_in_parallel方法的具体用法?Python BucketOperationHelper.keys_exist_or_assert_in_parallel怎么用?Python BucketOperationHelper.keys_exist_or_assert_in_parallel使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类membase.helper.bucket_helper.BucketOperationHelper
的用法示例。
在下文中一共展示了BucketOperationHelper.keys_exist_or_assert_in_parallel方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: verify_data
# 需要导入模块: from membase.helper.bucket_helper import BucketOperationHelper [as 别名]
# 或者: from membase.helper.bucket_helper.BucketOperationHelper import keys_exist_or_assert_in_parallel [as 别名]
def verify_data(master, inserted_keys, bucket, test):
test.log.info("Verifying data")
ready = RebalanceHelper.wait_for_stats_on_all(master, bucket, 'ep_queue_size', 0)
test.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
ready = RebalanceHelper.wait_for_stats_on_all(master, bucket, 'ep_flusher_todo', 0)
test.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
BucketOperationHelper.keys_exist_or_assert_in_parallel(keys=inserted_keys, server=master, \
bucket_name=bucket, test=test, concurrency=4)
示例2: _test_backup_add_restore_bucket_body
# 需要导入模块: from membase.helper.bucket_helper import BucketOperationHelper [as 别名]
# 或者: from membase.helper.bucket_helper.BucketOperationHelper import keys_exist_or_assert_in_parallel [as 别名]
def _test_backup_add_restore_bucket_body(self,
bucket,
delay_after_data_load,
startup_flag,
single_node):
server = self.master
rest = RestConnection(server)
info = rest.get_nodes_self()
size = int(info.memoryQuota * 2.0 / 3.0)
if bucket == "default":
rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=info.moxi)
else:
proxyPort = info.moxi + 500
rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=proxyPort,
authType="sasl", saslPassword="password")
ready = BucketOperationHelper.wait_for_memcached(server, bucket)
self.assertTrue(ready, "wait_for_memcached failed")
if not single_node:
self.add_nodes_and_rebalance()
distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}
inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[self.master],
name=bucket,
ram_load_ratio=1,
value_size_distribution=distribution,
moxi=True,
write_only=True,
number_of_threads=2)
if not single_node:
rest = RestConnection(self.master)
self.assertTrue(RestHelper(rest).wait_for_replication(180), msg="replication did not complete")
self.log.info("Sleep {0} seconds after data load".format(delay_after_data_load))
ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0)
self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0)
self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
node = RestConnection(self.master).get_nodes_self()
if not startup_flag:
for server in self.servers:
shell = RemoteMachineShellConnection(server)
shell.stop_membase()
shell.stop_couchbase()
shell.disconnect()
output, error = self.shell.execute_command(self.perm_command)
self.shell.log_command_output(output, error)
#now let's back up
BackupHelper(self.master, self).backup(bucket, node, self.remote_tmp_folder)
if not startup_flag:
for server in self.servers:
shell = RemoteMachineShellConnection(server)
shell.start_membase()
shell.start_couchbase()
RestHelper(RestConnection(server)).is_ns_server_running()
shell.disconnect()
BucketOperationHelper.delete_bucket_or_assert(self.master, bucket, self)
if bucket == "default":
rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=info.moxi)
else:
proxyPort = info.moxi + 500
rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=proxyPort,
authType="sasl", saslPassword="password")
BucketOperationHelper.wait_for_memcached(self.master, bucket)
if bucket == "default":
BackupHelper(self.master, self).restore(backup_location=self.remote_tmp_folder, moxi_port=info.moxi)
else:
BackupHelper(self.master, self).restore(backup_location=self.remote_tmp_folder, moxi_port=info.moxi, username=bucket, password='password')
keys_exist = BucketOperationHelper.keys_exist_or_assert_in_parallel(inserted_keys, self.master, bucket, self, concurrency=4)
self.assertTrue(keys_exist, msg="unable to verify keys after restore")
示例3: verify_data
# 需要导入模块: from membase.helper.bucket_helper import BucketOperationHelper [as 别名]
# 或者: from membase.helper.bucket_helper.BucketOperationHelper import keys_exist_or_assert_in_parallel [as 别名]
def verify_data(master, inserted_keys, bucket, test):
log = logger.Logger.get_logger()
log.info("Verifying data")
ready = RebalanceHelper.wait_for_persistence(master, bucket)
BucketOperationHelper.keys_exist_or_assert_in_parallel(keys=inserted_keys, server=master, bucket_name=bucket,
test=test, concurrency=4)
示例4: verify_loaded_data
# 需要导入模块: from membase.helper.bucket_helper import BucketOperationHelper [as 别名]
# 或者: from membase.helper.bucket_helper.BucketOperationHelper import keys_exist_or_assert_in_parallel [as 别名]
def verify_loaded_data(self, master, bucket, inserted_keys):
keys_exist = BucketOperationHelper.keys_exist_or_assert_in_parallel(inserted_keys, master, bucket, self,
concurrency=4)
self.assertTrue(keys_exist, msg="unable to verify keys after restore")
示例5: test_backup_upgrade_restore_default
# 需要导入模块: from membase.helper.bucket_helper import BucketOperationHelper [as 别名]
# 或者: from membase.helper.bucket_helper.BucketOperationHelper import keys_exist_or_assert_in_parallel [as 别名]
#.........这里部分代码省略.........
self.assertTrue(ready, "wait_for_memcached_failed")
distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}
inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[self.master],
name=bucket,
ram_load_ratio=0.5,
value_size_distribution=distribution,
moxi=True,
write_only=True,
delete_ratio=0.1,
number_of_threads=2)
if len(self.servers) > 1:
rest = RestConnection(self.master)
self.assertTrue(RebalanceHelper.wait_for_replication(rest.get_nodes(), timeout=180),
msg="replication did not complete")
ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0)
self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0)
self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
node = RestConnection(self.master).get_nodes_self()
shell = RemoteMachineShellConnection(worker)
o, r = shell.execute_command(perm_comm)
shell.log_command_output(o, r)
shell.disconnect()
#Backup
#BackupHelper(self.master, self).backup(bucket, node, remote_tmp)
shell = RemoteMachineShellConnection(worker)
shell.execute_command("/opt/couchbase/bin/cbbackup http://{0}:{1} {2}".format(
self.master.ip, self.master.port, remote_tmp))
shell.disconnect()
BucketOperationHelper.delete_bucket_or_assert(self.master, bucket, self)
time.sleep(30)
#Upgrade
for server in self.servers:
self.log.info("Upgrading to current version {0}".format(final_version))
remote = RemoteMachineShellConnection(server)
info = remote.extract_remote_info()
new_build = BuildQuery().find_build(builds, product, info.deliverable_type,
info.architecture_type, final_version)
remote.stop_couchbase()
remote.couchbase_uninstall()
remote.download_build(new_build)
remote.install_server(new_build)
rest = RestConnection(server)
RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)
rest.init_cluster(server.rest_username, server.rest_password)
rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
remote.disconnect()
time.sleep(30)
#Restore
rest = RestConnection(self.master)
info = rest.get_nodes_self()
size = int(info.memoryQuota * 2.0 / 3.0)
rest.create_bucket(bucket, ramQuotaMB=size)
ready = BucketOperationHelper.wait_for_memcached(server, bucket)
self.assertTrue(ready, "wait_for_memcached_failed")
#BackupHelper(self.master, self).restore(backup_location=remote_tmp, moxi_port=info.moxi)
shell = RemoteMachineShellConnection(worker)
shell.execute_command("/opt/couchbase/bin/cbrestore {2} http://{0}:{1} -b {3}".format(
self.master.ip, self.master.port, remote_tmp, bucket))
shell.disconnect()
time.sleep(60)
keys_exist = BucketOperationHelper.keys_exist_or_assert_in_parallel(inserted_keys, self.master, bucket, self, concurrency=4)
self.assertTrue(keys_exist, msg="unable to verify keys after restore")
time.sleep(30)
BucketOperationHelper.delete_bucket_or_assert(self.master, bucket, self)
rest = RestConnection(self.master)
helper = RestHelper(rest)
nodes = rest.node_statuses()
master_id = rest.get_nodes_self().id
if len(self.servers) > 1:
removed = helper.remove_nodes(knownNodes=[node.id for node in nodes],
ejectedNodes=[node.id for node in nodes if node.id != master_id],
wait_for_rebalance=True )
shell = RemoteMachineShellConnection(worker)
shell.remove_directory(remote_tmp)
shell.disconnect()
self.servers = copy.copy(original_set)
if initial_version == fin:
builds, changes = BuildQuery().get_all_builds(version=initial_version)
for server in self.servers:
remote = RemoteMachineShellConnection(server)
info = remote.extract_remote_info()
self.log.info("Loading version .. {0}".format(initial_version))
older_build = BuildQuery().find_build(builds, product, info.deliverable_type,
info.architecture_type, initial_version)
remote.stop_couchbase()
remote.couchbase_uninstall()
remote.download_build(older_build)
remote.install_server(older_build)
rest = RestConnection(server)
RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)
rest.init_cluster(server.rest_username, server.rest_password)
rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
remote.disconnect()