本文整理汇总了Python中memcached.helper.data_helper.MemcachedClientHelper.load_bucket方法的典型用法代码示例。如果您正苦于以下问题:Python MemcachedClientHelper.load_bucket方法的具体用法?Python MemcachedClientHelper.load_bucket怎么用?Python MemcachedClientHelper.load_bucket使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类memcached.helper.data_helper.MemcachedClientHelper
的用法示例。
在下文中一共展示了MemcachedClientHelper.load_bucket方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: load_data
# 需要导入模块: from memcached.helper.data_helper import MemcachedClientHelper [as 别名]
# 或者: from memcached.helper.data_helper.MemcachedClientHelper import load_bucket [as 别名]
def load_data(self, master, bucket, keys_count):
log = logger.Logger.get_logger()
inserted_keys_cnt = 0
while inserted_keys_cnt < keys_count:
keys_cnt, rejected_keys_cnt = MemcachedClientHelper.load_bucket(
servers=[master], name=bucket, number_of_items=keys_count, number_of_threads=5, write_only=True
)
inserted_keys_cnt += keys_cnt
log.info("wait until data is completely persisted on the disk")
RebalanceHelper.wait_for_stats_on_all(master, bucket, "ep_queue_size", 0)
RebalanceHelper.wait_for_stats_on_all(master, bucket, "ep_flusher_todo", 0)
return inserted_keys_cnt
示例2: load_data_for_buckets
# 需要导入模块: from memcached.helper.data_helper import MemcachedClientHelper [as 别名]
# 或者: from memcached.helper.data_helper.MemcachedClientHelper import load_bucket [as 别名]
def load_data_for_buckets(rest, load_ratio, distribution, rebalanced_servers, bucket_data, test):
buckets = rest.get_buckets()
for bucket in buckets:
inserted_count, rejected_count = \
MemcachedClientHelper.load_bucket(name=bucket.name,
servers=rebalanced_servers,
ram_load_ratio=load_ratio,
value_size_distribution=distribution,
number_of_threads=1,
write_only=True,
moxi=True)
test.log.info('inserted {0} keys'.format(inserted_count))
bucket_data[bucket.name]["items_inserted_count"] += inserted_count
示例3: load_data
# 需要导入模块: from memcached.helper.data_helper import MemcachedClientHelper [as 别名]
# 或者: from memcached.helper.data_helper.MemcachedClientHelper import load_bucket [as 别名]
def load_data(self, master, bucket, keys_count):
inserted_keys_cnt = 0
repeat_count = 0
while inserted_keys_cnt < keys_count and repeat_count < 5:
keys_cnt, rejected_keys_cnt = \
MemcachedClientHelper.load_bucket(servers=[master],
name=bucket,
number_of_items=keys_count,
number_of_threads=5,
write_only=True)
inserted_keys_cnt += keys_cnt
if keys_cnt == 0:
repeat_count += 1
else:
repeat_count = 0
if repeat_count == 5:
log.exception("impossible to load data")
log.info("wait until data is completely persisted on the disk")
RebalanceHelper.wait_for_persistence(master, bucket)
return inserted_keys_cnt
示例4: load_buckets
# 需要导入模块: from memcached.helper.data_helper import MemcachedClientHelper [as 别名]
# 或者: from memcached.helper.data_helper.MemcachedClientHelper import load_bucket [as 别名]
def load_buckets(server, name, get, threads, moxi):
distro = {500: 0.5, 1024: 0.5}
MemcachedClientHelper.load_bucket([server], name, -1, 10000000, distro, threads, -1, get, moxi)
示例5: common_test_body
# 需要导入模块: from memcached.helper.data_helper import MemcachedClientHelper [as 别名]
# 或者: from memcached.helper.data_helper.MemcachedClientHelper import load_bucket [as 别名]
def common_test_body(self, replica, failover_reason, load_ratio, age, max_nodes):
log = logger.Logger.get_logger()
bucket_name = "default"
log.info("replica : {0}".format(replica))
log.info("failover_reason : {0}".format(failover_reason))
log.info("load_ratio : {0}".format(load_ratio))
log.info("age : {0}".format(age))
log.info("max_nodes : {0}".format(max_nodes))
master = self._servers[0]
log.info('picking server : {0} as the master'.format(master))
rest = RestConnection(master)
info = rest.get_nodes_self()
rest.init_cluster(username=master.rest_username,
password=master.rest_password)
rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
rest.update_autofailover_settings(True, age, max_nodes)
rest.reset_autofailover()
bucket_ram = info.memoryQuota * 2 / 3
rest.create_bucket(bucket=bucket_name,
ramQuotaMB=bucket_ram,
replicaNumber=replica,
proxyPort=info.moxi)
ready = BucketOperationHelper.wait_for_memcached(master, bucket_name)
self.assertTrue(ready, "wait_for_memcached failed")
credentials = self._input.membase_settings
log.info("inserting some items in the master before adding any nodes")
distribution = {512: 0.4, 1 * 1024: 0.59, 5 * 1024: 0.01}
if load_ratio > 10:
distribution = {5 * 1024: 0.4, 10 * 1024: 0.5, 20 * 1024: 0.1}
ClusterOperationHelper.add_all_nodes_or_assert(master, self._servers, credentials, self)
nodes = rest.node_statuses()
rest.rebalance(otpNodes=[node.id for node in nodes], ejectedNodes=[])
msg = "rebalance failed after adding these nodes {0}".format(nodes)
self.assertTrue(rest.monitorRebalance(), msg=msg)
inserted_count, rejected_count =\
MemcachedClientHelper.load_bucket(servers=self._servers,
ram_load_ratio=load_ratio,
value_size_distribution=distribution,
number_of_threads=1)
log.info('inserted {0} keys'.format(inserted_count))
nodes = rest.node_statuses()
# why are we in this while loop?
while (len(nodes) - replica) >= 1:
final_replication_state = RestHelper(rest).wait_for_replication(900)
msg = "replication state after waiting for up to 15 minutes : {0}"
self.log.info(msg.format(final_replication_state))
chosen = AutoFailoverBaseTest.choose_nodes(master, nodes, replica)
for node in chosen:
#let's do op
if failover_reason == 'stop_membase':
self.stop_membase(node)
log.info("10 seconds delay to wait for membase-server to shutdown")
#wait for 5 minutes until node is down
self.assertTrue(RestHelper(rest).wait_for_node_status(node, "unhealthy", 300),
msg="node status is not unhealthy even after waiting for 5 minutes")
elif failover_reason == "firewall":
self.enable_firewall(node)
self.assertTrue(RestHelper(rest).wait_for_node_status(node, "unhealthy", 300),
msg="node status is not unhealthy even after waiting for 5 minutes")
# list pre-autofailover stats
stats = rest.get_bucket_stats()
self.log.info("pre-autofail - curr_items : {0} versus {1}".format(stats["curr_items"], inserted_count))
AutoFailoverBaseTest.wait_for_failover_or_assert(master, replica, age, self)
# manually fail over any unhealthy:active nodes left, max that we should need to manually failover is replica-max_nodes
manual_failover_count = replica - max_nodes
for node in chosen:
self.log.info("checking {0}".format(node.ip))
if node.status.lower() == "unhealthy" and node.clusterMembership == "active":
msg = "node {0} not failed over and we are over out manual failover limit of {1}"
self.assertTrue(manual_failover_count > 0, msg.format(node.ip, (replica - max_nodes)))
self.log.info("manual failover {0}".format(node.ip))
rest.fail_over(node.id)
manual_failover_count -= 1
stats = rest.get_bucket_stats()
self.log.info("post-autofail - curr_items : {0} versus {1}".format(stats["curr_items"], inserted_count))
self.assertTrue(stats["curr_items"] == inserted_count, "failover completed but curr_items ({0}) does not match inserted items ({1})".format(stats["curr_items"], inserted_count))
log.info("10 seconds sleep after autofailover before invoking rebalance...")
time.sleep(10)
rest.rebalance(otpNodes=[node.id for node in nodes],
ejectedNodes=[node.id for node in chosen])
msg="rebalance failed while removing failover nodes {0}".format(chosen)
self.assertTrue(rest.monitorRebalance(), msg=msg)
nodes = rest.node_statuses()
if len(nodes) / (1 + replica) >= 1:
final_replication_state = RestHelper(rest).wait_for_replication(900)
msg = "replication state after waiting for up to 15 minutes : {0}"
self.log.info(msg.format(final_replication_state))
self.assertTrue(RebalanceHelper.wait_till_total_numbers_match(master,bucket_name,600),
msg="replication was completed but sum(curr_items) dont match the curr_items_total")
start_time = time.time()
stats = rest.get_bucket_stats()
#.........这里部分代码省略.........