本文整理汇总了Python中membase.helper.rebalance_helper.RebalanceHelper类的典型用法代码示例。如果您正苦于以下问题:Python RebalanceHelper类的具体用法?Python RebalanceHelper怎么用?Python RebalanceHelper使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了RebalanceHelper类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create_ddocs
def create_ddocs(self, is_dev_view):
mapview = View(
self.map_view_name,
"""function(doc) {
emit(doc.integer, doc.string);
}""",
dev_view=is_dev_view,
)
self.cluster.create_view(self.master, "test", mapview)
redview = View(
self.red_view_name,
"""function(doc) {
emit([doc.integer, doc.string], doc.integer);
}""",
"""_count""",
dev_view=is_dev_view,
)
self.cluster.create_view(self.master, "test", redview)
redview_stats = View(
self.red_view_stats_name,
"""function(doc) {
emit(doc.string, doc.string);
}""",
"""_stats""",
dev_view=is_dev_view,
)
self.cluster.create_view(self.master, "test2", redview_stats)
RebalanceHelper.wait_for_persistence(self.master, self.bucket, 0)
示例2: replication_verification
def replication_verification(master, bucket, replica, inserted_count, test):
rest = RestConnection(master)
nodes = rest.node_statuses()
if len(nodes) / (1 + replica) >= 1:
final_replication_state = RestHelper(rest).wait_for_replication(900)
msg = "replication state after waiting for up to 15 minutes : {0}"
test.log.info(msg.format(final_replication_state))
# in windows, we need to set timeout_in_seconds to 15+ minutes
test.assertTrue(RebalanceHelper.wait_till_total_numbers_match(master=master,
bucket=bucket,
timeout_in_seconds=1200),
msg="replication was completed but sum(curr_items) dont match the curr_items_total")
start_time = time.time()
stats = rest.get_bucket_stats()
while time.time() < (start_time + 120) and stats["curr_items"] != inserted_count:
test.log.info("curr_items : {0} versus {1}".format(stats["curr_items"], inserted_count))
time.sleep(5)
stats = rest.get_bucket_stats()
RebalanceHelper.print_taps_from_all_nodes(rest, bucket)
test.log.info("curr_items : {0} versus {1}".format(stats["curr_items"], inserted_count))
stats = rest.get_bucket_stats()
msg = "curr_items : {0} is not equal to actual # of keys inserted : {1}"
test.assertEquals(stats["curr_items"], inserted_count,
msg=msg.format(stats["curr_items"], inserted_count))
示例3: _verify_stats_all_buckets
def _verify_stats_all_buckets(self, servers, timeout=60):
stats_tasks = []
for bucket in self.buckets:
items = sum([len(kv_store) for kv_store in bucket.kvs.values()])
stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
'curr_items', '==', items))
stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
'vb_active_curr_items', '==', items))
available_replicas = self.num_replicas
if len(servers) == self.num_replicas:
available_replicas = len(servers) - 1
elif len(servers) <= self.num_replicas:
available_replicas = len(servers) - 1
stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
'vb_replica_curr_items', '==', items * available_replicas))
stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
'curr_items_tot', '==', items * (available_replicas + 1)))
try:
for task in stats_tasks:
task.result(timeout)
except Exception as e:
print e;
for task in stats_tasks:
task.cancel()
self.log.error("unable to get expected stats for any node! Print taps for all nodes:")
rest = RestConnection(self.master)
for bucket in self.buckets:
RebalanceHelper.print_taps_from_all_nodes(rest, bucket)
raise Exception("unable to get expected stats during {0} sec".format(timeout))
示例4: run_test
def run_test(self):
ep_threshold = self.input.param("ep_threshold", "ep_mem_low_wat")
active_resident_threshold = int(self.input.param("active_resident_threshold", 10))
mc = MemcachedClientHelper.direct_client(self.servers[0], self.bucket_name)
stats = mc.stats()
threshold = int(self.input.param("threshold", stats[ep_threshold]))
threshold_reached = False
self.num_items = self.input.param("items", 10000)
self._load_doc_data_all_buckets("create")
# load items till reached threshold or mem-ratio is less than resident ratio threshold
while not threshold_reached:
mem_used = int(mc.stats()["mem_used"])
if mem_used < threshold or int(mc.stats()["vb_active_perc_mem_resident"]) >= active_resident_threshold:
self.log.info(
"mem_used and vb_active_perc_mem_resident_ratio reached at %s/%s and %s "
% (mem_used, threshold, mc.stats()["vb_active_perc_mem_resident"])
)
items = self.num_items
self.num_items += self.input.param("items", 10000)
self._load_doc_data_all_buckets("create", items)
else:
threshold_reached = True
self.log.info("DGM state achieved!!!!")
# wait for draining of data before restart and warm up
for bucket in self.buckets:
RebalanceHelper.wait_for_persistence(self.nodes_server[0], bucket)
while 1:
# read_data_task = self.cluster.async_verify_data(self.master, self.buckets[0], self.buckets[0].kvs[1])
read_data_task = Thread(target=self._run_get)
read_data_task.start()
# 5 threads to run stats all and reset asynchronously
start = time.time()
while (time.time() - start) < 300:
stats_all_thread = []
stats_reset_thread = []
for i in xrange(self.threads_to_run):
stat_str = ""
stats_all_thread.append(Thread(target=self._get_stats, args=[stat_str]))
stats_all_thread[i].start()
stat_str = "reset"
stats_reset_thread.append(Thread(target=self._get_stats, args=[stat_str]))
stats_reset_thread[i].start()
for i in xrange(self.threads_to_run):
stats_all_thread[i].join()
stats_reset_thread[i].join()
del stats_all_thread
del stats_reset_thread
# read_data_task.result()
read_data_task.join()
示例5: _verify_data
def _verify_data(self, master, rest, inserted_keys):
log = logger.Logger.get_logger()
log.info("Verifying data")
ready = RebalanceHelper.wait_for_stats_on_all(master, "default", "ep_queue_size", 0)
self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
ready = RebalanceHelper.wait_for_stats_on_all(master, "default", "ep_flusher_todo", 0)
self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
BucketOperationHelper.keys_exist_or_assert(keys=inserted_keys, server=master, bucket_name="default", test=self)
示例6: verify_data
def verify_data(master, inserted_keys, bucket, test):
test.log.info("Verifying data")
ready = RebalanceHelper.wait_for_stats_on_all(master, bucket, 'ep_queue_size', 0)
test.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
ready = RebalanceHelper.wait_for_stats_on_all(master, bucket, 'ep_flusher_todo', 0)
test.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
BucketOperationHelper.keys_exist_or_assert_in_parallel(keys=inserted_keys, server=master, \
bucket_name=bucket, test=test, concurrency=4)
示例7: test_views_failover
def test_views_failover(self):
num_nodes = self.input.param('num-nodes', 1)
ddocs = self.make_ddocs(self.num_ddoc, self.views_per_ddoc, 0)
RebalanceHelper.wait_for_persistence(self.master, self.bucket_name)
self.cluster.failover(self.servers,
self.servers[1:num_nodes])
self.cluster.rebalance(self.servers, [], self.servers[1:num_nodes])
self.perform_ddoc_ops(ddocs)
示例8: _failover_swap_rebalance
def _failover_swap_rebalance(self):
master = self.servers[0]
rest = RestConnection(master)
creds = self.input.membase_settings
num_initial_servers = self.num_initial_servers
intial_severs = self.servers[:num_initial_servers]
self.log.info("CREATE BUCKET PHASE")
SwapRebalanceBase.create_buckets(self)
# Cluster all starting set of servers
self.log.info("INITIAL REBALANCE PHASE")
status, servers_rebalanced = RebalanceHelper.rebalance_in(intial_severs, len(intial_severs) - 1)
self.assertTrue(status, msg="Rebalance was failed")
self.log.info("DATA LOAD PHASE")
self.loaders = SwapRebalanceBase.start_load_phase(self, master)
# Wait till load phase is over
SwapRebalanceBase.stop_load(self.loaders, do_stop=False)
self.log.info("DONE LOAD PHASE")
# Start the swap rebalance
self.log.info("current nodes : {0}".format(RebalanceHelper.getOtpNodeIds(master)))
toBeEjectedNodes = RebalanceHelper.pick_nodes(master, howmany=self.failover_factor)
optNodesIds = [node.id for node in toBeEjectedNodes]
if self.fail_orchestrator:
status, content = ClusterOperationHelper.find_orchestrator(master)
self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
format(status, content))
optNodesIds[0] = content
self.log.info("FAILOVER PHASE")
# Failover selected nodes
for node in optNodesIds:
self.log.info("failover node {0} and rebalance afterwards".format(node))
rest.fail_over(node)
new_swap_servers = self.servers[num_initial_servers:num_initial_servers + self.failover_factor]
for server in new_swap_servers:
otpNode = rest.add_node(creds.rest_username, creds.rest_password, server.ip)
msg = "unable to add node {0} to the cluster"
self.assertTrue(otpNode, msg.format(server.ip))
if self.fail_orchestrator:
rest = RestConnection(new_swap_servers[0])
master = new_swap_servers[0]
self.log.info("DATA ACCESS PHASE")
self.loaders = SwapRebalanceBase.start_access_phase(self, master)
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()], \
ejectedNodes=optNodesIds)
self.assertTrue(rest.monitorRebalance(),
msg="rebalance operation failed after adding node {0}".format(new_swap_servers))
SwapRebalanceBase.verification_phase(self, master)
示例9: _test_delete_key_and_backup_and_restore_body
def _test_delete_key_and_backup_and_restore_body(self):
bucket = "default"
BucketOperationHelper.create_bucket(serverInfo=self.master, name=bucket, test_case=self)
ready = BucketOperationHelper.wait_for_memcached(self.master, bucket)
self.assertTrue(ready, "wait_for_memcached failed")
self.add_nodes_and_rebalance()
client = MemcachedClientHelper.direct_client(self.master, "default")
expiry = 2400
test_uuid = uuid.uuid4()
keys = ["key_%s_%d" % (test_uuid, i) for i in range(500)]
self.log.info("pushing keys with expiry set to {0}".format(expiry))
for key in keys:
try:
client.set(key, expiry, 0, "1")
except mc_bin_client.MemcachedError as error:
msg = "unable to push key : {0} to bucket : {1} error : {2}"
self.log.error(msg.format(key, client.vbucketId, error.status))
self.fail(msg.format(key, client.vbucketId, error.status))
self.log.info("inserted {0} keys with expiry set to {1}".format(len(keys), expiry))
client.delete(keys[0])
ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0)
self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0)
self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
#let's create a unique folder in the remote location
for server in self.servers:
shell = RemoteMachineShellConnection(server)
output, error = shell.execute_command(self.perm_command)
shell.log_command_output(output, error)
node = RestConnection(server).get_nodes_self()
BackupHelper(server, self).backup(bucket, node, self.remote_tmp_folder)
shell.disconnect()
for server in self.servers:
BackupHelper(server, self).restore(self.remote_tmp_folder)
time.sleep(10)
self.log.info('verifying that all those keys...')
missing_keys = []
verify_keys = []
for key in keys:
vBucketId = crc32.crc32_hash(key) & 1023 # or & 0x3FF
client.vbucketId = vBucketId
if key == keys[0]:
missing_keys.append(key)
else:
verify_keys.append(key)
self.assertTrue(BucketOperationHelper.keys_dont_exist(self.master, missing_keys, self),
"Keys are not empty")
self.assertTrue(BucketOperationHelper.verify_data(self.master, verify_keys, False, False, 11210, self),
"Missing keys")
示例10: test_parallel_enable_DB_compaction
def test_parallel_enable_DB_compaction(self):
rest = RestConnection(self.master)
self.set_auto_compaction(rest, parallelDBAndVC="true", dbFragmentThresholdPercentage=self.fragmentation_value)
self.make_ddocs(self.ddocs_num, self.view_per_ddoc)
self.create_ddocs()
self._load_all_buckets(self.master, self.gen_load, "create", 0)
RebalanceHelper.wait_for_persistence(self.master, self.default_bucket_name)
self._compaction_thread()
if self.thread_crashed.is_set():
self.log.info("View Compaction is not started as expected")
示例11: create_ddocs
def create_ddocs(self):
mapview = View(self.map_view_name, '''function(doc) {
emit(doc.integer, doc.string);
}''', dev_view=self.is_dev_view)
self.cluster.create_view(self.master, 'test', mapview)
redview = View(self.red_view_name, '''function(doc) {
emit([doc.integer, doc.string], doc.integer);
}''', '''_count''', dev_view=self.is_dev_view)
self.cluster.create_view(self.master, 'test', redview)
RebalanceHelper.wait_for_persistence(self.master, self.bucket, 0)
示例12: _monitor_drain_queue
def _monitor_drain_queue(self):
#start whenever drain_queue is > 0
rest = RestConnection(self.master)
start = time.time()
stats = rest.get_bucket_stats(self.bucket)
self.log.info("current ep_queue_size: {0}".format(stats["ep_queue_size"]))
verified = RebalanceHelper.wait_for_stats(self.master, self.bucket, 'ep_queue_size', 0, timeout_in_seconds=300, verbose=False)\
and RebalanceHelper.wait_for_stats(self.master, self.bucket, 'ep_flusher_todo', 0, timeout_in_seconds=300, verbose=False)
self.drained = verified
self.drained_in_seconds = time.time() - start
示例13: test_parallel_DB_views_compaction
def test_parallel_DB_views_compaction(self):
rest = RestConnection(self.master)
self.set_auto_compaction(rest, parallelDBAndVC="true", viewFragmntThresholdPercentage=self.fragmentation_value, dbFragmentThresholdPercentage=self.fragmentation_value)
self.make_ddocs(self.ddocs_num, self.view_per_ddoc)
self.create_ddocs()
self._load_all_buckets(self.master, self.gen_load, "create", 0)
RebalanceHelper.wait_for_persistence(self.master, self.default_bucket_name)
self._compaction_thread()
if self.thread_crashed.is_set():
self.fail("Error occurred during run")
示例14: wait_until_warmed_up
def wait_until_warmed_up(self, master=None):
if not master:
master = self.input.servers[0]
bucket = self.param("bucket", "default")
fn = RebalanceHelper.wait_for_mc_stats_no_timeout
for bucket in self.buckets:
RebalanceHelper.wait_for_stats_on_all(master, bucket,
'ep_warmup_thread',
'complete', fn=fn)
示例15: test_observe_with_warmup
def test_observe_with_warmup(self):
self._load_doc_data_all_buckets('create', 0, self.num_items)
# Persist all the loaded data item
self.log.info("Nodes in cluster: %s" % self.servers[:self.nodes_init])
for bucket in self.buckets:
RebalanceHelper.wait_for_persistence(self.master, bucket)
self._stats_befor_warmup(bucket.name)
self._restart_memcache(bucket.name)
# for bucket in self.buckets:
ClusterOperationHelper._wait_warmup_completed(self, self.servers[:self.nodes_init], bucket.name)
self._run_observe(self)