本文整理汇总了Python中membase.helper.bucket_helper.BucketOperationHelper.create_bucket方法的典型用法代码示例。如果您正苦于以下问题:Python BucketOperationHelper.create_bucket方法的具体用法?Python BucketOperationHelper.create_bucket怎么用?Python BucketOperationHelper.create_bucket使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类membase.helper.bucket_helper.BucketOperationHelper
的用法示例。
在下文中一共展示了BucketOperationHelper.create_bucket方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _test_delete_key_and_backup_and_restore_body
# 需要导入模块: from membase.helper.bucket_helper import BucketOperationHelper [as 别名]
# 或者: from membase.helper.bucket_helper.BucketOperationHelper import create_bucket [as 别名]
def _test_delete_key_and_backup_and_restore_body(self):
bucket = "default"
BucketOperationHelper.create_bucket(serverInfo=self.master, name=bucket, test_case=self)
ready = BucketOperationHelper.wait_for_memcached(self.master, bucket)
self.assertTrue(ready, "wait_for_memcached failed")
self.add_nodes_and_rebalance()
client = MemcachedClientHelper.direct_client(self.master, "default")
expiry = 2400
test_uuid = uuid.uuid4()
keys = ["key_%s_%d" % (test_uuid, i) for i in range(500)]
self.log.info("pushing keys with expiry set to {0}".format(expiry))
for key in keys:
try:
client.set(key, expiry, 0, "1")
except mc_bin_client.MemcachedError as error:
msg = "unable to push key : {0} to bucket : {1} error : {2}"
self.log.error(msg.format(key, client.vbucketId, error.status))
self.fail(msg.format(key, client.vbucketId, error.status))
self.log.info("inserted {0} keys with expiry set to {1}".format(len(keys), expiry))
client.delete(keys[0])
ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0)
self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0)
self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
#let's create a unique folder in the remote location
for server in self.servers:
shell = RemoteMachineShellConnection(server)
output, error = shell.execute_command(self.perm_command)
shell.log_command_output(output, error)
node = RestConnection(server).get_nodes_self()
BackupHelper(server, self).backup(bucket, node, self.remote_tmp_folder)
shell.disconnect()
for server in self.servers:
BackupHelper(server, self).restore(self.remote_tmp_folder)
time.sleep(10)
self.log.info('verifying that all those keys...')
missing_keys = []
verify_keys = []
for key in keys:
vBucketId = crc32.crc32_hash(key) & 1023 # or & 0x3FF
client.vbucketId = vBucketId
if key == keys[0]:
missing_keys.append(key)
else:
verify_keys.append(key)
self.assertTrue(BucketOperationHelper.keys_dont_exist(self.master, missing_keys, self),
"Keys are not empty")
self.assertTrue(BucketOperationHelper.verify_data(self.master, verify_keys, False, False, 11210, self),
"Missing keys")
示例2: _test_backup_and_restore_bucket_overwriting_body
# 需要导入模块: from membase.helper.bucket_helper import BucketOperationHelper [as 别名]
# 或者: from membase.helper.bucket_helper.BucketOperationHelper import create_bucket [as 别名]
def _test_backup_and_restore_bucket_overwriting_body(self, overwrite_flag=True):
bucket = "default"
BucketOperationHelper.create_bucket(serverInfo=self.master, test_case=self)
BucketOperationHelper.wait_for_memcached(self.master, bucket)
self.add_nodes_and_rebalance()
client = MemcachedClientHelper.direct_client(self.master, "default")
expiry = 2400
test_uuid = uuid.uuid4()
keys = ["key_%s_%d" % (test_uuid, i) for i in range(500)]
self.log.info("pushing keys with expiry set to {0}".format(expiry))
for key in keys:
try:
client.set(key, expiry, 0, "1")
except mc_bin_client.MemcachedError as error:
msg = "unable to push key : {0} to bucket : {1} error : {2}"
self.log.error(msg.format(key, client.vbucketId, error.status))
self.fail(msg.format(key, client.vbucketId, error.status))
self.log.info("inserted {0} keys with expiry set to {1}".format(len(keys), expiry))
ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0)
self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0)
self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
for server in self.servers:
shell = RemoteMachineShellConnection(server)
output, error = shell.execute_command(self.perm_command)
shell.log_command_output(output, error)
node = RestConnection(server).get_nodes_self()
BackupHelper(server, self).backup(bucket, node, self.remote_tmp_folder)
shell.disconnect()
for key in keys:
try:
client.replace(key, expiry, 0, "2")
except mc_bin_client.MemcachedError as error:
msg = "unable to replace key : {0} in bucket : {1} error : {2}"
self.log.error(msg.format(key, client.vbucketId, error.status))
self.fail(msg.format(key, client.vbucketId, error.status))
self.log.info("replaced {0} keys with expiry set to {1}".format(len(keys), expiry))
for server in self.servers:
BackupHelper(server, self).restore(self.remote_tmp_folder, overwrite_flag)
time.sleep(10)
self.log.info('verifying that all those keys...')
for key in keys:
if overwrite_flag:
self.assertEqual("2", client.get(key=key), key + " should has value = 2")
else:
self.assertNotEqual("2", client.get(key=key), key + " should not has value = 2")
self.log.info("verified that those keys inserted with expiry set to {0} have expired".format(expiry))
示例3: _test_cluster_topology_change_body
# 需要导入模块: from membase.helper.bucket_helper import BucketOperationHelper [as 别名]
# 或者: from membase.helper.bucket_helper.BucketOperationHelper import create_bucket [as 别名]
def _test_cluster_topology_change_body(self):
bucket = "default"
BucketOperationHelper.create_bucket(serverInfo=self.master, test_case=self)
ready = BucketOperationHelper.wait_for_memcached(self.master, bucket)
self.assertTrue(ready, "wait_for_memcached failed")
self.add_nodes_and_rebalance()
rest = RestConnection(self.master)
distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}
inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[self.master],
ram_load_ratio=1,
value_size_distribution=distribution,
moxi=True,
write_only=True,
number_of_threads=2)
self.log.info("Sleep after data load")
ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0)
self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0)
self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
#let's create a unique folder in the remote location
for server in self.servers:
shell = RemoteMachineShellConnection(server)
output, error = shell.execute_command(self.perm_command)
shell.log_command_output(output, error)
node = RestConnection(server).get_nodes_self()
BackupHelper(server, self).backup(bucket, node, self.remote_tmp_folder)
shell.disconnect()
ClusterOperationHelper.cleanup_cluster(self.servers)
BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
servers = []
for i in range(0, len(self.servers) - 1):
servers.append(self.servers[i])
self.add_node_and_rebalance(servers[0], servers)
BucketOperationHelper.delete_bucket_or_assert(self.master, bucket, self)
BucketOperationHelper.create_bucket(serverInfo=self.master, test_case=self)
ready = BucketOperationHelper.wait_for_memcached(self.master, bucket)
self.assertTrue(ready, "wait_for_memcached failed")
for server in self.servers:
BackupHelper(server, self).restore(self.remote_tmp_folder)
time.sleep(10)
BucketOperationHelper.verify_data(self.master, inserted_keys, False, False, 11210, self)
示例4: test_default_bucket
# 需要导入模块: from membase.helper.bucket_helper import BucketOperationHelper [as 别名]
# 或者: from membase.helper.bucket_helper.BucketOperationHelper import create_bucket [as 别名]
def test_default_bucket(self):
master = self.servers[0]
BucketOperationHelper.create_bucket(serverInfo=master, test_case=self)
#let's create a unique folder in the remote location
shell = RemoteMachineShellConnection(master)
self.remote_tmp_folder = "/tmp/{0}-{1}".format("mbbackuptestdefaultbucket-", uuid.uuid4())
output, error = shell.execute_command("mkdir -p {0}".format(self.remote_tmp_folder))
shell.log_command_output(output,error)
#now let's back up
BackupHelper(master).backup('default',self.remote_tmp_folder)
backup_files = BackupHelper(master).download_backups(self.remote_tmp_folder)
print 'backup rertued'
for backup_file in backup_files:
self.log.info(backup_file)
示例5: _test_backup_and_restore_from_to_different_buckets
# 需要导入模块: from membase.helper.bucket_helper import BucketOperationHelper [as 别名]
# 或者: from membase.helper.bucket_helper.BucketOperationHelper import create_bucket [as 别名]
def _test_backup_and_restore_from_to_different_buckets(self):
bucket_before_backup = "bucket_before_backup"
bucket_after_backup = "bucket_after_backup"
BucketOperationHelper.create_bucket(serverInfo=self.master, name=bucket_before_backup, port=11212,
test_case=self)
ready = BucketOperationHelper.wait_for_memcached(self.master, bucket_before_backup)
self.assertTrue(ready, "wait_for_memcached failed")
self.add_nodes_and_rebalance()
distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}
inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[self.master],
name=bucket_before_backup,
ram_load_ratio=20,
value_size_distribution=distribution,
write_only=True,
moxi=True,
number_of_threads=2)
self.log.info("Sleep after data load")
ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket_before_backup, 'ep_queue_size', 0)
self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket_before_backup, 'ep_flusher_todo', 0)
self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
for server in self.servers:
shell = RemoteMachineShellConnection(server)
output, error = shell.execute_command(self.perm_command)
shell.log_command_output(output, error)
node = RestConnection(server).get_nodes_self()
BackupHelper(server, self).backup(bucket_before_backup, node, self.remote_tmp_folder)
shell.disconnect()
BucketOperationHelper.delete_bucket_or_assert(self.master, bucket_before_backup, self)
BucketOperationHelper.create_bucket(serverInfo=self.master, name=bucket_after_backup, port=11212,
test_case=self)
ready = BucketOperationHelper.wait_for_memcached(self.master, bucket_after_backup)
self.assertTrue(ready, "wait_for_memcached failed")
for server in self.servers:
BackupHelper(server, self).restore(self.remote_tmp_folder, moxi_port=11212)
time.sleep(10)
ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket_after_backup, 'ep_queue_size', 0)
self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket_after_backup, 'ep_flusher_todo', 0)
self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
self.assertTrue(BucketOperationHelper.verify_data(self.master, inserted_keys, False, False, 11212, debug=False,
bucket=bucket_after_backup), "Missing keys")
示例6: _test_backup_add_restore_bucket_body
# 需要导入模块: from membase.helper.bucket_helper import BucketOperationHelper [as 别名]
# 或者: from membase.helper.bucket_helper.BucketOperationHelper import create_bucket [as 别名]
def _test_backup_add_restore_bucket_body(self, bucket="default", port_no = 11211, delay_after_data_load=0, startup_flag = True):
self.remote_tmp_folder = "/tmp/{0}-{1}".format("mbbackuptestdefaultbucket", uuid.uuid4())
master = self.servers[0]
node = RestConnection(master).get_nodes_self()
BucketOperationHelper.delete_bucket_or_assert(master, bucket, self)
BucketOperationHelper.create_bucket(serverInfo=master, name=bucket, replica=1, port=port_no, test_case=self)
keys = BucketOperationHelper.load_some_data(master, bucket_name=bucket, test = self)
if not startup_flag:
self.shell.stop_membase()
else:
self.log.info("Sleep {0} seconds after data load".format(delay_after_data_load))
time.sleep(delay_after_data_load)
#let's create a unique folder in the remote location
output, error = self.shell.execute_command("mkdir -p {0}".format(self.remote_tmp_folder))
self.shell.log_command_output(output,error)
#now let's back up
BackupHelper(master, self).backup(bucket, node, self.remote_tmp_folder)
if not startup_flag:
self.shell.start_membase()
BucketOperationHelper.delete_bucket_or_assert(master, bucket, self)
BucketOperationHelper.create_bucket(serverInfo=master, name=bucket, replica=1, port=port_no, test_case=self)
if not startup_flag:
self.shell.stop_membase()
BackupHelper(master, self).restore(self.remote_tmp_folder)
if not startup_flag:
self.shell.start_membase()
BucketOperationHelper.verify_data(master.ip, keys, False, False, port_no, self)
示例7: test_getr
# 需要导入模块: from membase.helper.bucket_helper import BucketOperationHelper [as 别名]
# 或者: from membase.helper.bucket_helper.BucketOperationHelper import create_bucket [as 别名]
def test_getr(self):
item_count = self.input.param("item_count", 10000)
replica_count = self.input.param("replica_count", 1)
expiration = self.input.param("expiration", 0)
delay = float(self.input.param("delay", 0))
eject = self.input.param("eject", 0)
delete = self.input.param("delete", 0)
mutate = self.input.param("mutate", 0)
warmup = self.input.param("warmup", 0)
skipload = self.input.param("skipload", 0)
rebalance = self.input.param("rebalance", 0)
negative_test = False
if delay > expiration:
negative_test = True
if delete and not mutate:
negative_test = True
if skipload and not mutate:
negative_test = True
prefix = str(uuid.uuid4())[:7]
BucketOperationHelper.delete_all_buckets_or_assert([self.master], self)
BucketOperationHelper.create_bucket(self.master, name=self.default_bucket_name, replica=replica_count, port=11210, test_case=self, bucket_ram=-1, password="")
if rebalance == GetrTests.DURING_REBALANCE or rebalance == GetrTests.AFTER_REBALANCE:
# leave 1 node unclustered for rebalance in
ClusterOperationHelper.begin_rebalance_out(self.master, self.servers[-1:])
ClusterOperationHelper.end_rebalance(self.master)
ClusterOperationHelper.begin_rebalance_in(self.master, self.servers[:-1])
ClusterOperationHelper.end_rebalance(self.master)
else:
ClusterOperationHelper.begin_rebalance_in(self.master, self.servers)
ClusterOperationHelper.end_rebalance(self.master)
vprefix = ""
if not skipload:
self._load_items(item_count=item_count, expiration=expiration, prefix=prefix, vprefix=vprefix)
if not expiration:
RebalanceHelper.wait_for_stats_int_value(self.master, self.default_bucket_name, "curr_items_tot", item_count * (replica_count + 1), "<=", 600, True)
if delete:
self._delete_items(item_count=item_count, prefix=prefix)
if mutate:
vprefix = "mutated"
self._load_items(item_count=item_count, expiration=expiration, prefix=prefix, vprefix=vprefix)
self.assertTrue(RebalanceHelper.wait_for_replication(self.rest.get_nodes(), timeout=180),
msg="replication did not complete")
if eject:
self._eject_items(item_count=item_count, prefix=prefix)
if delay:
self.sleep(delay)
if rebalance == GetrTests.DURING_REBALANCE:
ClusterOperationHelper.begin_rebalance_in(self.master, self.servers)
if rebalance == GetrTests.AFTER_REBALANCE:
ClusterOperationHelper.end_rebalance(self.master)
if warmup:
self.log.info("restarting memcached")
command = "rpc:multicall(erlang, apply, [fun () -> try ns_server_testrunner_api:restart_memcached(20000) catch _:_ -> ns_port_sup:restart_port_by_name(memcached) end end, []], 20000)."
memcached_restarted, content = self.rest.diag_eval(command)
#wait until memcached starts
self.assertTrue(memcached_restarted, "unable to restart memcached process through diag/eval")
RebalanceHelper.wait_for_stats(self.master, self.default_bucket_name, "curr_items_tot", item_count * (replica_count + 1), 600)
count = self._getr_items(item_count=item_count, replica_count=replica_count, prefix=prefix, vprefix=vprefix)
if negative_test:
self.assertTrue(count == 0, "found {0} items, expected none".format(count))
else:
self.assertTrue(count == replica_count * item_count, "expected {0} items, got {1} items".format(replica_count * item_count, count))
if rebalance == GetrTests.DURING_REBALANCE:
ClusterOperationHelper.end_rebalance(self.master)