本文整理汇总了Python中membase.helper.rebalance_helper.RebalanceHelper.pick_node方法的典型用法代码示例。如果您正苦于以下问题:Python RebalanceHelper.pick_node方法的具体用法?Python RebalanceHelper.pick_node怎么用?Python RebalanceHelper.pick_node使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类membase.helper.rebalance_helper.RebalanceHelper
的用法示例。
在下文中一共展示了RebalanceHelper.pick_node方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _common_test_body
# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import pick_node [as 别名]
def _common_test_body(self):
master = self.servers[0]
rest = RestConnection(master)
bucket_data = RebalanceBaseTest.bucket_data_init(rest)
cluster_size = self.input.param("cluster_size", len(self.servers))
howMany = self.input.param("howMany", cluster_size - 1)
if howMany >= cluster_size:
self.fail(
"Input error! howMany {0} rebalance-outs should be lesser than cluster_size {1}".format(howMany, \
cluster_size))
# add all servers
self.log.info("Rebalancing In with cluster size {0}".format(cluster_size))
RebalanceTaskHelper.add_rebalance_task(self.task_manager,
[master],
self.servers[1:cluster_size],
[])
self.log.info("Initial Load with key-count {0}".format(self.keys_count))
RebalanceBaseTest.load_all_buckets_task(rest, self.task_manager,
bucket_data, ram_load_ratio=self.load_ratio,
keys_count=self.keys_count)
while howMany > 0:
if len(rest.node_statuses()) < 2:
break
if self.checkResidentRatio:
self.log.info("Getting the resident ratio stats before failover/rebalancing out the nodes")
RebalanceBaseTest.check_resident_ratio(self, master)
# Never pick master node - The modified function takes care of this one.
rebalanceOutNode = RebalanceHelper.pick_node(master)
self.log.info(
"Incrementally rebalancing out node {0}:{1}".format(rebalanceOutNode.ip, rebalanceOutNode.port))
# rebalance out a server
RebalanceTaskHelper.add_rebalance_task(self.task_manager,
[master],
[],
[rebalanceOutNode], do_stop=self.do_stop)
# wait for loading tasks to finish
RebalanceBaseTest.finish_all_bucket_tasks(rest, bucket_data)
self.log.info("Completed Loading and Rebalacing out")
if self.checkResidentRatio:
self.log.info("Getting the resident ratio stats after rebalancing out the nodes")
RebalanceBaseTest.check_resident_ratio(self, master)
# verification step
if self.do_verify:
self.log.info("Verifying with KV store")
RebalanceBaseTest.do_kv_and_replica_verification(master, self.task_manager,
bucket_data, self.replica, self)
else:
self.log.info("No Verification with KV store")
howMany = howMany - 1
示例2: test_rebalance_out
# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import pick_node [as 别名]
def test_rebalance_out(self):
RebalanceBaseTest.common_setup(self._input, self, replica=1)
log = logger.Logger().get_logger()
master = self._servers[0]
num_of_docs = TestInputSingleton.input.param("num_of_docs",100000)
replica = TestInputSingleton.input.param("replica",100000)
add_items_count = TestInputSingleton.input.param("num_of_creates",30000)
size = TestInputSingleton.input.param("item_size",256)
params = {"sizes": [size], "count": num_of_docs, "seed": str(uuid.uuid4())[:7]}
rest = RestConnection(master)
buckets = rest.get_buckets()
bucket_data = {}
generators = {}
for bucket in buckets:
bucket_data[bucket.name] = {"kv_store": ClientKeyValueStore()}
rebalanced_in, which_servers = RebalanceBaseTest.rebalance_in(self._servers, len(self.servers) - 1)
self.assertTrue(rebalanced_in, msg="unable to add and rebalance more nodes")
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()], ejectedNodes=[])
self.assertTrue(rest.monitorRebalance(),
msg="rebalance operation failed after adding nodes {0}".format(
[node.id for node in rest.node_statuses()]))
while len(rest.node_statuses()) > 1:
#pick a node that is not the master node
toBeEjectedNode = RebalanceHelper.pick_node(master)
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()], ejectedNodes=[toBeEjectedNode.id])
self.assertTrue(rest.monitorRebalance(),
msg="rebalance operation failed after adding node {0}".format(toBeEjectedNode.id))
for bucket in buckets:
kv_store = bucket_data[bucket.name]["kv_store"]
add_items_seed = str(uuid.uuid4())[:7]
self._add_items(add_items_seed, bucket, add_items_count, kv_store)
errors = RebalanceDataGenerator.do_verification(kv_store, rest, bucket.name)
if errors:
log.error("verification returned {0} errors".format(len(errors)))
load_set_ops = {"ops": "set", "bucket": bucket.name}
load_set_ops.update(params)
load_delete_ops = {"ops": "delete", "bucket": bucket.name,
"sizes": [size], "count": add_items_count / 5, "seed": add_items_seed}
thread= RebalanceDataGenerator.start_load(rest, bucket.name,
RebalanceDataGenerator.create_loading_tasks(load_set_ops), kv_store)
generators["set"] = {"thread": thread}
#restart three times
generators["set"]["thread"].start()
thread = RebalanceDataGenerator.start_load(rest, bucket.name,
RebalanceDataGenerator.create_loading_tasks(load_delete_ops),kv_store)
generators["delete"] = {"thread": thread}
generators["delete"]["thread"].start()
self.log.info("current nodes : {0}".format([node.id for node in rest.node_statuses()]))
for bucket in buckets:
kv_store = bucket_data[bucket.name]["kv_store"]
errors = RebalanceDataGenerator.do_verification(kv_store, rest, bucket.name)
if errors:
log.error("verification returned {0} errors".format(len(errors)))
generators["set"]["thread"].join()
generators["delete"]["thread"].join()
for bucket in buckets:
kv_store = bucket_data[bucket.name]["kv_store"]
bucket_data[bucket.name]["items_inserted_count"] = len(kv_store.valid_items())
RebalanceBaseTest.replication_verification(master, bucket_data, replica, self)
示例3: test_incremental_rebalance_out_continuous_bidirectional_sets_deletes
# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import pick_node [as 别名]
def test_incremental_rebalance_out_continuous_bidirectional_sets_deletes(self):
cluster_ref_a = "cluster_ref_a"
master_a = self._input.clusters.get(0)[0]
rest_conn_a = RestConnection(master_a)
cluster_ref_b = "cluster_ref_b"
master_b = self._input.clusters.get(1)[0]
rest_conn_b = RestConnection(master_b)
# Setup bi-directional continuous replication
replication_type = "continuous"
rest_conn_a.add_remote_cluster(master_b.ip, master_b.port,
master_b.rest_username,
master_b.rest_password, cluster_ref_b)
rest_conn_b.add_remote_cluster(master_a.ip, master_a.port,
master_a.rest_username,
master_a.rest_password, cluster_ref_a)
(rep_database_a, rep_id_a) = rest_conn_a.start_replication(
replication_type, self._buckets[0],
cluster_ref_b)
(rep_database_b, rep_id_b) = rest_conn_b.start_replication(
replication_type, self._buckets[0],
cluster_ref_a)
self._state.append((rest_conn_a, cluster_ref_b, rep_database_a, rep_id_a))
self._state.append((rest_conn_b, cluster_ref_a, rep_database_b, rep_id_b))
load_thread_list = []
# Start load
kvstore = ClientKeyValueStore()
self._params["ops"] = "set"
task_def = RebalanceDataGenerator.create_loading_tasks(self._params)
load_thread = RebalanceDataGenerator.start_load(rest_conn_a,
self._buckets[0],
task_def, kvstore)
load_thread.start()
load_thread.join()
# Do some deletes
self._params["ops"] = "delete"
self._params["count"] = self._num_items/5
task_def = RebalanceDataGenerator.create_loading_tasks(self._params)
load_thread = RebalanceDataGenerator.start_load(rest_conn_a,
self._buckets[0],
task_def, kvstore)
load_thread_list.append(load_thread)
# Start all loads concurrently
for lt in load_thread_list:
lt.start()
# Trigger rebalance on both source and destination clusters
servers_a = self._input.clusters.get(0)
servers_b = self._input.clusters.get(1)
rebalanced_servers_a = []
rebalanced_servers_b = []
which_servers_a = []
which_servers_b = []
# Rebalance all the nodes together
RebalanceHelper.rebalance_in(servers_a, len(servers_a)-1)
RebalanceHelper.rebalance_in(servers_b, len(servers_b)-1)
rebalanced_servers_a.extend(servers_a)
rebalanced_servers_b.extend(servers_b)
nodes_a = rest_conn_a.node_statuses()
nodes_b = rest_conn_b.node_statuses()
# Incremental rebalance out one node in cluster_a, then cluster_b
while len(nodes_a) > 1:
toBeEjectedNode = RebalanceHelper.pick_node(master_a)
self.log.info("current nodes : {0}".format(RebalanceHelper.getOtpNodeIds(master_a)))
self.log.info("removing node {0} and rebalance afterwards".format(toBeEjectedNode.id))
rest_conn_a.rebalance(otpNodes=[node.id for node in rest_conn_a.node_statuses()], \
ejectedNodes=[toBeEjectedNode.id])
self.assertTrue(rest_conn_a.monitorRebalance(),
msg="rebalance operation failed after adding node {0}".format(toBeEjectedNode.id))
while len(nodes_b) > 1:
toBeEjectedNode = RebalanceHelper.pick_node(master_b)
self.log.info("current nodes : {0}".format(RebalanceHelper.getOtpNodeIds(master_b)))
self.log.info("removing node {0} and rebalance afterwards".format(toBeEjectedNode.id))
rest_conn_b.rebalance(otpNodes=[node.id for node in rest_conn_b.node_statuses()],\
ejectedNodes=[toBeEjectedNode.id])
self.assertTrue(rest_conn_b.monitorRebalance(),
msg="rebalance operation failed after adding node {0}".format(toBeEjectedNode.id))
break
for node in nodes_b:
for rebalanced_server in rebalanced_servers_b:
if rebalanced_server.ip.find(node.ip) != -1:
rebalanced_servers_b.remove(rebalanced_server)
break
nodes_b = rest_conn_a.node_statuses()
for node in nodes_a:
for rebalanced_server in rebalanced_servers_a:
if rebalanced_server.ip.find(node.ip) != -1:
#.........这里部分代码省略.........
示例4: test_failover_source_sets
# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import pick_node [as 别名]
def test_failover_source_sets(self):
replication_type = "continuous"
self.log.info("Force initial rebalance.")
# This test starts with a 2-2 unidirectional replication from cluster a
# to cluster b; during the replication, we trigger failover of one node
# on source cluster , resulting a 1-2 replication.
# After all loading finish, verify data and rev on both clusters.
replication_type = "continuous"
self.log.info("Force initial rebalance.")
cluster_ref_a = "cluster_ref_a"
master_a = self._input.clusters.get(0)[0]
rest_conn_a = RestConnection(master_a)
cluster_ref_b = "cluster_ref_b"
master_b = self._input.clusters.get(1)[0]
rest_conn_b = RestConnection(master_b)
self.log.info("START XDC replication...")
# Start replication
rest_conn_a.add_remote_cluster(master_b.ip, master_b.port,
master_b.rest_username,
master_b.rest_password, cluster_ref_b)
(rep_database, rep_id) = rest_conn_a.start_replication(replication_type,
self._buckets[0],
cluster_ref_b)
self._state.append((rest_conn_a, cluster_ref_b, rep_database, rep_id))
# Start load
self.log.info("START loading data...")
load_thread_list = []
kvstore = ClientKeyValueStore()
self._params["ops"] = "set"
task_def = RebalanceDataGenerator.create_loading_tasks(self._params)
load_thread = RebalanceDataGenerator.start_load(rest_conn_a,
self._buckets[0],
task_def, kvstore)
load_thread.start()
# sleep a while to allow more data loaded
time.sleep(5)
self.log.info("current nodes on source cluster: {0}".format(RebalanceHelper.getOtpNodeIds(master_a)))
# Trigger failover, we fail over one node each time until there is only one node remaining
self.log.info("DURING replication, start failover...")
self.log.info("FAILOVER nodes on Cluster A ...")
nodes_a = rest_conn_a.node_statuses()
while len(nodes_a) > 1:
toBeFailedOverNode = RebalanceHelper.pick_node(master_a)
self.log.info("failover node {0}".format(toBeFailedOverNode.id))
rest_conn_a.fail_over(toBeFailedOverNode)
self.log.info("rebalance after failover")
rest_conn_a.rebalance(otpNodes=[node.id for node in rest_conn_a.node_statuses()], \
ejectedNodes=[toBeFailedOverNode.id])
self.assertTrue(rest_conn_a.monitorRebalance(),
msg="rebalance operation failed after removing node {0}".format(toBeFailedOverNode.id))
nodes_a = rest_conn_a.node_statuses()
self.log.info("ALL failed over done...")
# Wait for loading threads to finish
for lt in load_thread_list:
lt.join()
self.log.info("All loading threads finished")
# Verify replication
self.log.info("START data verification at cluster A...")
self.assertTrue(XDCRBaseTest.verify_replicated_data(rest_conn_a,
self._buckets[0],
kvstore,
self._poll_sleep,
self._poll_timeout),
"Verification of replicated data failed")
self.log.info("START data verification at cluster B...")
self.assertTrue(XDCRBaseTest.verify_replicated_data(rest_conn_b,
self._buckets[0],
kvstore,
self._poll_sleep,
self._poll_timeout),
"Verification of replicated data failed")
self.log.info("START revision verification on both clusters...")
self.assertTrue(XDCRBaseTest.verify_replicated_revs(rest_conn_a,
rest_conn_b,
self._buckets[0],
self._poll_sleep,
self._poll_timeout),
"Verification of replicated revisions failed")