本文整理汇总了Python中membase.helper.cluster_helper.ClusterOperationHelper.flushctl_set方法的典型用法代码示例。如果您正苦于以下问题:Python ClusterOperationHelper.flushctl_set方法的具体用法?Python ClusterOperationHelper.flushctl_set怎么用?Python ClusterOperationHelper.flushctl_set使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类membase.helper.cluster_helper.ClusterOperationHelper
的用法示例。
在下文中一共展示了ClusterOperationHelper.flushctl_set方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_expiry_mutation_for_dcp_stream_boundary_from_beginning
# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import flushctl_set [as 别名]
def test_expiry_mutation_for_dcp_stream_boundary_from_beginning(self):
self.load(self.gens_load, buckets=self.src_bucket, flag=self.item_flag, verify_data=False,
batch_size=self.batch_size, exp=1)
# set expiry pager interval
ClusterOperationHelper.flushctl_set(self.master, "exp_pager_stime", 1, bucket=self.src_bucket_name)
body = self.create_save_function_body(self.function_name, HANDLER_CODE.BUCKET_OPS_ON_DELETE, worker_count=3)
self.deploy_function(body)
# Wait for eventing to catch up with all the expiry mutations and verify results
self.verify_eventing_results(self.function_name, self.docs_per_day * 2016, on_delete=True)
self.undeploy_and_delete_function(body)
示例2: _create_access_log
# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import flushctl_set [as 别名]
def _create_access_log(self):
stats_all_buckets = {}
for bucket in self.buckets:
stats_all_buckets[bucket.name] = StatsCommon()
for bucket in self.buckets:
for server in self.servers:
scanner_runs = stats_all_buckets[bucket.name].get_stats([server], bucket, '', 'ep_num_access_scanner_runs')[server]
self.log.info("current access scanner run for %s in bucket %s is %s times" % (server.ip, bucket.name, scanner_runs))
self.log.info("setting access scanner time %s minutes for %s in bucket %s" % (self.access_log_time, server.ip, bucket.name))
ClusterOperationHelper.flushctl_set(server, "alog_sleep_time", self.access_log_time , bucket.name)
if not self._wait_for_access_run(self.access_log_time, scanner_runs, server, bucket, stats_all_buckets[bucket.name]):
self.fail("Not able to create access log within %s minutes" % self.access_log_time)
示例3: test_warmup
# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import flushctl_set [as 别名]
def test_warmup(self):
ep_threshold = self.input.param("ep_threshold", "ep_mem_low_wat")
active_resident_threshold = int(self.input.param("active_resident_threshold", 110))
access_log_time = self.input.param("access_log_time", 2)
mc = MemcachedClientHelper.direct_client(self.servers[0], self.bucket_name)
stats = mc.stats()
threshold = int(self.input.param('threshold', stats[ep_threshold]))
threshold_reached = False
self.num_items = self.input.param("items", 10000)
self._load_doc_data_all_buckets('create')
# load items till reached threshold or mem-ratio is less than resident ratio threshold
while not threshold_reached :
mem_used = int(mc.stats()["mem_used"])
if mem_used < threshold or int(mc.stats()["vb_active_perc_mem_resident"]) >= active_resident_threshold:
self.log.info("mem_used and vb_active_perc_mem_resident_ratio reached at %s/%s and %s " % (mem_used, threshold, mc.stats()["vb_active_perc_mem_resident"]))
items = self.num_items
self.num_items += self.input.param("items", 10000)
self._load_doc_data_all_buckets('create', items)
else:
threshold_reached = True
self.log.info("DGM state achieved!!!!")
# parallel load of data
items = self.num_items
self.num_items += 10000
tasks = self._async_load_doc_data_all_buckets('create', items)
# wait for draining of data before restart and warm up
rest = RestConnection(self.servers[0])
self.nodes_server = rest.get_nodes()
self._wait_for_stats_all_buckets(self.nodes_server)
self._stats_befor_warmup()
for task in tasks:
task.result()
# If warmup is done through access log then run access scanner
if self.access_log :
scanner_runs = int(mc.stats()["ep_num_access_scanner_runs"])
self.log.info("setting access scanner time %s minutes" % access_log_time)
self.log.info("current access scanner run is %s" % scanner_runs)
ClusterOperationHelper.flushctl_set(self.nodes_server[0], "alog_sleep_time", access_log_time , self.bucket_name)
if not self._wait_for_access_run(access_log_time, scanner_runs, mc):
self.fail("Not able to create access log within %s" % access_log_time)
self._restart_memcache()
if self._warmup():
self._load_doc_data_all_buckets('update', self.num_items - items)
示例4: replication_verification
# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import flushctl_set [as 别名]
def replication_verification(master, bucket_data, replica, test, failed_over=False):
asserts = []
rest = RestConnection(master)
buckets = rest.get_buckets()
nodes = rest.node_statuses()
test.log.info("expect {0} / {1} replication ? {2}".format(len(nodes),
(1.0 + replica), len(nodes) / (1.0 + replica)))
for bucket in buckets:
ClusterOperationHelper.flushctl_set(master, "exp_pager_stime", 30, bucket.name)
if len(nodes) / (1.0 + replica) >= 1:
final_replication_state = RestHelper(rest).wait_for_replication(300)
msg = "replication state after waiting for up to 5 minutes : {0}"
test.log.info(msg.format(final_replication_state))
#run expiry_pager on all nodes before doing the replication verification
for bucket in buckets:
ClusterOperationHelper.flushctl_set(master, "exp_pager_stime", 30, bucket.name)
test.log.info("wait for expiry pager to run on all these nodes")
time.sleep(30)
ClusterOperationHelper.flushctl_set(master, "exp_pager_stime", 3600, bucket.name)
ClusterOperationHelper.flushctl_set(master, "exp_pager_stime", 30, bucket.name)
# windows need more than 15 minutes to get number matched
replica_match = RebalanceHelper.wait_till_total_numbers_match(bucket=bucket.name,
master=master,
timeout_in_seconds=600)
if not replica_match:
asserts.append("replication was completed but sum(curr_items) don't match the curr_items_total %s" %
bucket.name)
if not failed_over:
stats = rest.get_bucket_stats(bucket=bucket.name)
RebalanceHelper.print_taps_from_all_nodes(rest, bucket.name)
msg = "curr_items : {0} is not equal to actual # of keys inserted : {1} : bucket: {2}"
if bucket_data[bucket.name]['kv_store'] is None:
items_inserted = bucket_data[bucket.name]["items_inserted_count"]
else:
items_inserted = len(bucket_data[bucket.name]['kv_store'].valid_items())
active_items_match = stats["curr_items"] == items_inserted
if not active_items_match:
asserts.append(msg.format(stats["curr_items"], items_inserted, bucket.name))
if len(asserts) > 0:
for msg in asserts:
test.log.error(msg)
test.assertTrue(len(asserts) == 0, msg=asserts)
示例5: run_expiry_pager
# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import flushctl_set [as 别名]
def run_expiry_pager(self, ts = 15):
ClusterOperationHelper.flushctl_set(self.master, "exp_pager_stime", ts)
self.log.info("wait for expiry pager to run on all these nodes")
示例6: _set_checkpoint_timeout
# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import flushctl_set [as 别名]
def _set_checkpoint_timeout(self, servers, bucket, time):
ClusterOperationHelper.flushctl_set(servers[0], 'chk_period', time, bucket)
示例7: _set_checkpoint_size
# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import flushctl_set [as 别名]
def _set_checkpoint_size(self, servers, bucket, size):
ClusterOperationHelper.flushctl_set(servers[0], 'chk_max_items', size, bucket)
示例8: _expiry_pager
# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import flushctl_set [as 别名]
def _expiry_pager(self, master):
buckets = self._get_cluster_buckets(master)
for bucket in buckets:
ClusterOperationHelper.flushctl_set(master, "exp_pager_stime", 10, bucket)
self._log.info("wait for expiry pager to run on all these nodes")
time.sleep(30)