本文整理汇总了Python中membase.helper.rebalance_helper.RebalanceHelper.wait_for_stats方法的典型用法代码示例。如果您正苦于以下问题:Python RebalanceHelper.wait_for_stats方法的具体用法?Python RebalanceHelper.wait_for_stats怎么用?Python RebalanceHelper.wait_for_stats使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类membase.helper.rebalance_helper.RebalanceHelper
的用法示例。
在下文中一共展示了RebalanceHelper.wait_for_stats方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _monitor_drain_queue
# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import wait_for_stats [as 别名]
def _monitor_drain_queue(self):
#start whenever drain_queue is > 0
rest = RestConnection(self.master)
start = time.time()
stats = rest.get_bucket_stats(self.bucket)
self.log.info("current ep_queue_size: {0}".format(stats["ep_queue_size"]))
verified = RebalanceHelper.wait_for_stats(self.master, self.bucket, 'ep_queue_size', 0, timeout_in_seconds=300, verbose=False)\
and RebalanceHelper.wait_for_stats(self.master, self.bucket, 'ep_flusher_todo', 0, timeout_in_seconds=300, verbose=False)
self.drained = verified
self.drained_in_seconds = time.time() - start
示例2: _load_data
# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import wait_for_stats [as 别名]
def _load_data(self, master, load_ratio):
log = logger.Logger.get_logger()
if load_ratio == -1:
#let's load 0.1 data
load_ratio = 0.1
distribution = {1024: 0.5, 20: 0.5}
#TODO: with write_only = False, sometimes the load hangs, debug this
inserted_keys, rejected_keys =\
MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[master],
ram_load_ratio=load_ratio,
number_of_threads=1,
value_size_distribution=distribution,
write_only=True)
log.info("wait until data is completely persisted on the disk")
RebalanceHelper.wait_for_stats(master, "default", 'ep_queue_size', 0)
RebalanceHelper.wait_for_stats(master, "default", 'ep_flusher_todo', 0)
return inserted_keys
示例3: _create_load_multiple_bucket
# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import wait_for_stats [as 别名]
def _create_load_multiple_bucket(self, server, bucket_data, howmany=2):
created = BucketOperationHelper.create_multiple_buckets(server, 1, howmany=howmany)
self.assertTrue(created, "unable to create multiple buckets")
rest = RestConnection(server)
buckets = rest.get_buckets()
for bucket in buckets:
bucket_data[bucket.name] = {}
ready = BucketOperationHelper.wait_for_memcached(server, bucket.name)
self.assertTrue(ready, "wait_for_memcached failed")
#let's insert some data
distribution = {2 * 1024: 0.5, 20: 0.5}
bucket_data[bucket.name]["inserted_keys"], bucket_data[bucket.name]["reject_keys"] =\
MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[server], name=bucket.name,
ram_load_ratio=2.0,
number_of_threads=2,
value_size_distribution=distribution,
write_only=True,
moxi=True)
RebalanceHelper.wait_for_stats(server, bucket.name, 'ep_queue_size', 0)
RebalanceHelper.wait_for_stats(server, bucket.name, 'ep_flusher_todo', 0)
示例4: test_getr
# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import wait_for_stats [as 别名]
def test_getr(self):
item_count = self.input.param("item_count", 10000)
replica_count = self.input.param("replica_count", 1)
expiration = self.input.param("expiration", 0)
delay = float(self.input.param("delay", 0))
eject = self.input.param("eject", 0)
delete = self.input.param("delete", 0)
mutate = self.input.param("mutate", 0)
warmup = self.input.param("warmup", 0)
skipload = self.input.param("skipload", 0)
rebalance = self.input.param("rebalance", 0)
negative_test = False
if delay > expiration:
negative_test = True
if delete and not mutate:
negative_test = True
if skipload and not mutate:
negative_test = True
prefix = str(uuid.uuid4())[:7]
BucketOperationHelper.delete_all_buckets_or_assert([self.master], self)
BucketOperationHelper.create_bucket(self.master, name=self.default_bucket_name, replica=replica_count, port=11210, test_case=self, bucket_ram=-1, password="")
if rebalance == GetrTests.DURING_REBALANCE or rebalance == GetrTests.AFTER_REBALANCE:
# leave 1 node unclustered for rebalance in
ClusterOperationHelper.begin_rebalance_out(self.master, self.servers[-1:])
ClusterOperationHelper.end_rebalance(self.master)
ClusterOperationHelper.begin_rebalance_in(self.master, self.servers[:-1])
ClusterOperationHelper.end_rebalance(self.master)
else:
ClusterOperationHelper.begin_rebalance_in(self.master, self.servers)
ClusterOperationHelper.end_rebalance(self.master)
vprefix = ""
if not skipload:
self._load_items(item_count=item_count, expiration=expiration, prefix=prefix, vprefix=vprefix)
if not expiration:
RebalanceHelper.wait_for_stats_int_value(self.master, self.default_bucket_name, "curr_items_tot", item_count * (replica_count + 1), "<=", 600, True)
if delete:
self._delete_items(item_count=item_count, prefix=prefix)
if mutate:
vprefix = "mutated"
self._load_items(item_count=item_count, expiration=expiration, prefix=prefix, vprefix=vprefix)
self.assertTrue(RebalanceHelper.wait_for_replication(self.rest.get_nodes(), timeout=180),
msg="replication did not complete")
if eject:
self._eject_items(item_count=item_count, prefix=prefix)
if delay:
self.sleep(delay)
if rebalance == GetrTests.DURING_REBALANCE:
ClusterOperationHelper.begin_rebalance_in(self.master, self.servers)
if rebalance == GetrTests.AFTER_REBALANCE:
ClusterOperationHelper.end_rebalance(self.master)
if warmup:
self.log.info("restarting memcached")
command = "rpc:multicall(erlang, apply, [fun () -> try ns_server_testrunner_api:restart_memcached(20000) catch _:_ -> ns_port_sup:restart_port_by_name(memcached) end end, []], 20000)."
memcached_restarted, content = self.rest.diag_eval(command)
#wait until memcached starts
self.assertTrue(memcached_restarted, "unable to restart memcached process through diag/eval")
RebalanceHelper.wait_for_stats(self.master, self.default_bucket_name, "curr_items_tot", item_count * (replica_count + 1), 600)
count = self._getr_items(item_count=item_count, replica_count=replica_count, prefix=prefix, vprefix=vprefix)
if negative_test:
self.assertTrue(count == 0, "found {0} items, expected none".format(count))
else:
self.assertTrue(count == replica_count * item_count, "expected {0} items, got {1} items".format(replica_count * item_count, count))
if rebalance == GetrTests.DURING_REBALANCE:
ClusterOperationHelper.end_rebalance(self.master)