本文整理汇总了Python中membase.helper.rebalance_helper.RebalanceHelper.getOtpNodeIds方法的典型用法代码示例。如果您正苦于以下问题:Python RebalanceHelper.getOtpNodeIds方法的具体用法?Python RebalanceHelper.getOtpNodeIds怎么用?Python RebalanceHelper.getOtpNodeIds使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类membase.helper.rebalance_helper.RebalanceHelper
的用法示例。
在下文中一共展示了RebalanceHelper.getOtpNodeIds方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _common_test_body
# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import getOtpNodeIds [as 别名]
def _common_test_body(self, moxi=False):
master = self.servers[0]
rest = RestConnection(master)
creds = self.input.membase_settings
bucket_data = RebalanceBaseTest.bucket_data_init(rest)
for server in self.servers[1:]:
self.log.info("current nodes : {0}".format(RebalanceHelper.getOtpNodeIds(master)))
self.log.info("adding node {0}:{1} and rebalance afterwards".format(server.ip, server.port))
otpNode = rest.add_node(creds.rest_username, creds.rest_password, server.ip, server.port)
msg = "unable to add node {0} to the cluster {1}"
self.assertTrue(otpNode, msg.format(server.ip, master.ip))
for name in bucket_data:
inserted_keys, rejected_keys = \
MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[self.servers[0]],
name=name,
ram_load_ratio= -1,
number_of_items=self.keys_count,
number_of_threads=1,
write_only=True)
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()], ejectedNodes=[])
self.assertTrue(rest.monitorRebalance(),
msg="rebalance operation failed after adding node {0}".format(server.ip))
self.log.info("completed rebalancing in server {0}".format(server))
IncrementalRebalanceWithParallelReadTests._reader_thread(self, inserted_keys, bucket_data, moxi=moxi)
self.assertTrue(rest.monitorRebalance(),
msg="rebalance operation failed after adding node {0}".format(server.ip))
break
示例2: _failover_swap_rebalance
# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import getOtpNodeIds [as 别名]
def _failover_swap_rebalance(self):
master = self.servers[0]
rest = RestConnection(master)
creds = self.input.membase_settings
num_initial_servers = self.num_initial_servers
intial_severs = self.servers[:num_initial_servers]
self.log.info("CREATE BUCKET PHASE")
SwapRebalanceBase.create_buckets(self)
# Cluster all starting set of servers
self.log.info("INITIAL REBALANCE PHASE")
status, servers_rebalanced = RebalanceHelper.rebalance_in(intial_severs, len(intial_severs) - 1)
self.assertTrue(status, msg="Rebalance was failed")
self.log.info("DATA LOAD PHASE")
self.loaders = SwapRebalanceBase.start_load_phase(self, master)
# Wait till load phase is over
SwapRebalanceBase.stop_load(self.loaders, do_stop=False)
self.log.info("DONE LOAD PHASE")
# Start the swap rebalance
self.log.info("current nodes : {0}".format(RebalanceHelper.getOtpNodeIds(master)))
toBeEjectedNodes = RebalanceHelper.pick_nodes(master, howmany=self.failover_factor)
optNodesIds = [node.id for node in toBeEjectedNodes]
if self.fail_orchestrator:
status, content = ClusterOperationHelper.find_orchestrator(master)
self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
format(status, content))
optNodesIds[0] = content
self.log.info("FAILOVER PHASE")
# Failover selected nodes
for node in optNodesIds:
self.log.info("failover node {0} and rebalance afterwards".format(node))
rest.fail_over(node)
new_swap_servers = self.servers[num_initial_servers:num_initial_servers + self.failover_factor]
for server in new_swap_servers:
otpNode = rest.add_node(creds.rest_username, creds.rest_password, server.ip)
msg = "unable to add node {0} to the cluster"
self.assertTrue(otpNode, msg.format(server.ip))
if self.fail_orchestrator:
rest = RestConnection(new_swap_servers[0])
master = new_swap_servers[0]
self.log.info("DATA ACCESS PHASE")
self.loaders = SwapRebalanceBase.start_access_phase(self, master)
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()], \
ejectedNodes=optNodesIds)
self.assertTrue(rest.monitorRebalance(),
msg="rebalance operation failed after adding node {0}".format(new_swap_servers))
SwapRebalanceBase.verification_phase(self, master)
示例3: _add_back_failed_node
# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import getOtpNodeIds [as 别名]
def _add_back_failed_node(self, do_node_cleanup=False):
master = self.servers[0]
rest = RestConnection(master)
creds = self.input.membase_settings
self.log.info("CREATE BUCKET PHASE")
SwapRebalanceBase.create_buckets(self)
# Cluster all servers
self.log.info("INITIAL REBALANCE PHASE")
status, servers_rebalanced = RebalanceHelper.rebalance_in(self.servers, len(self.servers) - 1)
self.assertTrue(status, msg="Rebalance was failed")
self.log.info("DATA LOAD PHASE")
self.loaders = SwapRebalanceBase.start_load_phase(self, master)
# Wait till load phase is over
SwapRebalanceBase.stop_load(self.loaders, do_stop=False)
self.log.info("DONE LOAD PHASE")
# Start the swap rebalance
current_nodes = RebalanceHelper.getOtpNodeIds(master)
self.log.info("current nodes : {0}".format(current_nodes))
toBeEjectedNodes = RebalanceHelper.pick_nodes(master, howmany=self.failover_factor)
optNodesIds = [node.id for node in toBeEjectedNodes]
# List of servers that will not be failed over
not_failed_over = []
for server in self.servers:
if server.ip not in [node.ip for node in toBeEjectedNodes]:
not_failed_over.append(server)
self.log.info("Node %s not failed over" % server.ip)
if self.fail_orchestrator:
status, content = ClusterOperationHelper.find_orchestrator(master)
self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
format(status, content))
# When swapping all the nodes
if self.num_swap is len(current_nodes):
optNodesIds.append(content)
else:
optNodesIds[0] = content
master = not_failed_over[-1]
self.log.info("DATA ACCESS PHASE")
self.loaders = SwapRebalanceBase.start_access_phase(self, master)
# Failover selected nodes
for node in optNodesIds:
self.log.info("failover node {0} and rebalance afterwards".format(node))
rest.fail_over(node)
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()], \
ejectedNodes=optNodesIds)
self.assertTrue(rest.monitorRebalance(),
msg="rebalance operation failed after adding node {0}".format(optNodesIds))
# Add back the same failed over nodes
# Cleanup the node, somehow
# TODO: cluster_run?
if do_node_cleanup:
pass
# Make rest connection with node part of cluster
rest = RestConnection(master)
# Given the optNode, find ip
add_back_servers = []
nodes = rest.get_nodes()
for server in [node.ip for node in nodes]:
if isinstance(server, unicode):
add_back_servers.append(server)
final_add_back_servers = []
for server in self.servers:
if server.ip not in add_back_servers:
final_add_back_servers.append(server)
for server in final_add_back_servers:
otpNode = rest.add_node(creds.rest_username, creds.rest_password, server.ip)
msg = "unable to add node {0} to the cluster"
self.assertTrue(otpNode, msg.format(server.ip))
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()], ejectedNodes=[])
self.assertTrue(rest.monitorRebalance(),
msg="rebalance operation failed after adding node {0}".format(add_back_servers))
SwapRebalanceBase.verification_phase(self, master)
示例4: _common_test_body_failed_swap_rebalance
# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import getOtpNodeIds [as 别名]
def _common_test_body_failed_swap_rebalance(self):
master = self.servers[0]
rest = RestConnection(master)
num_initial_servers = self.num_initial_servers
creds = self.input.membase_settings
intial_severs = self.servers[:num_initial_servers]
self.log.info("CREATE BUCKET PHASE")
SwapRebalanceBase.create_buckets(self)
# Cluster all starting set of servers
self.log.info("INITIAL REBALANCE PHASE")
status, servers_rebalanced = RebalanceHelper.rebalance_in(intial_severs, len(intial_severs) - 1)
self.assertTrue(status, msg="Rebalance was failed")
self.log.info("DATA LOAD PHASE")
self.loaders = SwapRebalanceBase.start_load_phase(self, master)
# Wait till load phase is over
SwapRebalanceBase.stop_load(self.loaders, do_stop=False)
self.log.info("DONE LOAD PHASE")
# Start the swap rebalance
current_nodes = RebalanceHelper.getOtpNodeIds(master)
self.log.info("current nodes : {0}".format(current_nodes))
toBeEjectedNodes = RebalanceHelper.pick_nodes(master, howmany=self.num_swap)
optNodesIds = [node.id for node in toBeEjectedNodes]
if self.swap_orchestrator:
status, content = ClusterOperationHelper.find_orchestrator(master)
self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
format(status, content))
# When swapping all the nodes
if self.num_swap is len(current_nodes):
optNodesIds.append(content)
else:
optNodesIds[0] = content
for node in optNodesIds:
self.log.info("removing node {0} and rebalance afterwards".format(node))
new_swap_servers = self.servers[num_initial_servers:num_initial_servers + self.num_swap]
for server in new_swap_servers:
otpNode = rest.add_node(creds.rest_username, creds.rest_password, server.ip)
msg = "unable to add node {0} to the cluster"
self.assertTrue(otpNode, msg.format(server.ip))
if self.swap_orchestrator:
rest = RestConnection(new_swap_servers[0])
master = new_swap_servers[0]
self.log.info("DATA ACCESS PHASE")
self.loaders = SwapRebalanceBase.start_access_phase(self, master)
self.log.info("SWAP REBALANCE PHASE")
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()],
ejectedNodes=optNodesIds)
SwapRebalanceBase.sleep(self, 10, "Rebalance should start")
self.log.info("FAIL SWAP REBALANCE PHASE @ {0}".format(self.percentage_progress))
reached = RestHelper(rest).rebalance_reached(self.percentage_progress)
if reached == 100 and not RestHelper(rest).is_cluster_rebalanced():
# handle situation when rebalance failed at the beginning
self.log.error('seems rebalance failed!')
self.log.info("Latest logs from UI:")
for i in rest.get_logs(): self.log.error(i)
self.fail("rebalance failed even before killing memcached")
bucket = rest.get_buckets()[0].name
pid = None
if self.swap_orchestrator:
# get PID via remote connection if master is a new node
shell = RemoteMachineShellConnection(master)
o, _ = shell.execute_command("ps -eo comm,pid | awk '$1 == \"memcached\" { print $2 }'")
pid = o[0]
shell.disconnect()
else:
for i in xrange(2):
try:
_mc = MemcachedClientHelper.direct_client(master, bucket)
pid = _mc.stats()["pid"]
break
except EOFError as e:
self.log.error("{0}.Retry in 2 sec".format(e))
SwapRebalanceBase.sleep(self, 1)
if pid is None:
self.fail("impossible to get a PID")
command = "os:cmd(\"kill -9 {0} \")".format(pid)
self.log.info(command)
killed = rest.diag_eval(command)
self.log.info("killed {0}:{1}?? {2} ".format(master.ip, master.port, killed))
self.log.info("sleep for 10 sec after kill memcached")
SwapRebalanceBase.sleep(self, 10)
# we can't get stats for new node when rebalance falls
if not self.swap_orchestrator:
ClusterOperationHelper._wait_warmup_completed(self, [master], bucket, wait_time=600)
i = 0
# we expect that rebalance will be failed
try:
rest.monitorRebalance()
except RebalanceFailedException:
# retry rebalance if it failed
self.log.warn("Rebalance failed but it's expected")
#.........这里部分代码省略.........
示例5: _common_test_body_swap_rebalance
# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import getOtpNodeIds [as 别名]
def _common_test_body_swap_rebalance(self, do_stop_start=False):
master = self.servers[0]
rest = RestConnection(master)
num_initial_servers = self.num_initial_servers
creds = self.input.membase_settings
intial_severs = self.servers[:num_initial_servers]
self.log.info("CREATE BUCKET PHASE")
SwapRebalanceBase.create_buckets(self)
# Cluster all starting set of servers
self.log.info("INITIAL REBALANCE PHASE")
status, servers_rebalanced = RebalanceHelper.rebalance_in(intial_severs, len(intial_severs) - 1)
self.assertTrue(status, msg="Rebalance was failed")
self.log.info("DATA LOAD PHASE")
self.loaders = SwapRebalanceBase.start_load_phase(self, master)
# Wait till load phase is over
SwapRebalanceBase.stop_load(self.loaders, do_stop=False)
self.log.info("DONE LOAD PHASE")
# Start the swap rebalance
current_nodes = RebalanceHelper.getOtpNodeIds(master)
self.log.info("current nodes : {0}".format(current_nodes))
toBeEjectedNodes = RebalanceHelper.pick_nodes(master, howmany=self.num_swap)
optNodesIds = [node.id for node in toBeEjectedNodes]
if self.swap_orchestrator:
status, content = ClusterOperationHelper.find_orchestrator(master)
self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
format(status, content))
if self.num_swap is len(current_nodes):
optNodesIds.append(content)
else:
optNodesIds[0] = content
for node in optNodesIds:
self.log.info("removing node {0} and rebalance afterwards".format(node))
new_swap_servers = self.servers[num_initial_servers:num_initial_servers + self.num_swap]
for server in new_swap_servers:
otpNode = rest.add_node(creds.rest_username, creds.rest_password, server.ip)
msg = "unable to add node {0} to the cluster"
self.assertTrue(otpNode, msg.format(server.ip))
if self.swap_orchestrator:
rest = RestConnection(new_swap_servers[0])
master = new_swap_servers[0]
if self.do_access:
self.log.info("DATA ACCESS PHASE")
self.loaders = SwapRebalanceBase.start_access_phase(self, master)
self.log.info("SWAP REBALANCE PHASE")
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()],
ejectedNodes=optNodesIds)
if do_stop_start:
# Rebalance is stopped at 20%, 40% and 60% completion
retry = 0
for expected_progress in (20, 40, 60):
self.log.info("STOP/START SWAP REBALANCE PHASE WITH PROGRESS {0}%".
format(expected_progress))
while True:
progress = rest._rebalance_progress()
if progress < 0:
self.log.error("rebalance progress code : {0}".format(progress))
break
elif progress == 100:
self.log.warn("Rebalance has already reached 100%")
break
elif progress >= expected_progress:
self.log.info("Rebalance will be stopped with {0}%".format(progress))
stopped = rest.stop_rebalance()
self.assertTrue(stopped, msg="unable to stop rebalance")
SwapRebalanceBase.sleep(self, 20)
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()],
ejectedNodes=optNodesIds)
break
elif retry > 100:
break
else:
retry += 1
SwapRebalanceBase.sleep(self, 1)
self.assertTrue(rest.monitorRebalance(),
msg="rebalance operation failed after adding node {0}".format(optNodesIds))
SwapRebalanceBase.verification_phase(self, master)
示例6: _common_test_body_failed_swap_rebalance
# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import getOtpNodeIds [as 别名]
def _common_test_body_failed_swap_rebalance(self):
master = self.servers[0]
rest = RestConnection(master)
num_initial_servers = self.num_initial_servers
creds = self.input.membase_settings
intial_severs = self.servers[:num_initial_servers]
# Cluster all starting set of servers
self.log.info("INITIAL REBALANCE PHASE")
RebalanceHelper.rebalance_in(intial_severs, len(intial_severs)-1)
self.log.info("CREATE BUCKET PHASE")
SwapRebalanceBase.create_buckets(self)
self.log.info("DATA LOAD PHASE")
loaders = SwapRebalanceBase.start_load_phase(self, master)
# Wait till load phase is over
SwapRebalanceBase.stop_load(loaders, do_stop=False)
self.log.info("DONE LOAD PHASE")
# Start the swap rebalance
current_nodes = RebalanceHelper.getOtpNodeIds(master)
self.log.info("current nodes : {0}".format(current_nodes))
toBeEjectedNodes = RebalanceHelper.pick_nodes(master, howmany=self.num_swap)
optNodesIds = [node.id for node in toBeEjectedNodes]
if self.swap_orchestrator:
status, content = ClusterHelper.find_orchestrator(master)
self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
format(status, content))
# When swapping all the nodes
if self.num_swap is len(current_nodes):
optNodesIds.append(content)
else:
optNodesIds[0] = content
for node in optNodesIds:
self.log.info("removing node {0} and rebalance afterwards".format(node))
new_swap_servers = self.servers[num_initial_servers:num_initial_servers+self.num_swap]
for server in new_swap_servers:
otpNode = rest.add_node(creds.rest_username, creds.rest_password, server.ip)
msg = "unable to add node {0} to the cluster"
self.assertTrue(otpNode, msg.format(server.ip))
if self.swap_orchestrator:
rest = RestConnection(new_swap_servers[0])
master = new_swap_servers[0]
self.log.info("DATA ACCESS PHASE")
loaders = SwapRebalanceBase.start_access_phase(self, master)
self.log.info("SWAP REBALANCE PHASE")
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()],\
ejectedNodes=optNodesIds)
# Rebalance is failed at 20%, 40% and 60% completion
for i in [1, 2, 3]:
expected_progress = 20*i
self.log.info("FAIL SWAP REBALANCE PHASE @ {0}".format(expected_progress))
reached = RestHelper(rest).rebalance_reached(expected_progress)
command = "[erlang:exit(element(2, X), kill) || X <- supervisor:which_children(ns_port_sup)]."
memcached_restarted = rest.diag_eval(command)
self.assertTrue(memcached_restarted, "unable to restart memcached/moxi process through diag/eval")
time.sleep(20)
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()],\
ejectedNodes=optNodesIds)
# Stop loaders
SwapRebalanceBase.stop_load(loaders)
self.assertTrue(rest.monitorRebalance(),
msg="rebalance operation failed after adding node {0}".format(toBeEjectedNodes))
self.log.info("DONE DATA ACCESS PHASE")
#for bucket in rest.get_buckets():
# SwapRebalanceBase.verify_data(new_swap_servers[0], bucket_data[bucket.name].get('inserted_keys'),\
# bucket.name, self)
# RebalanceHelper.wait_for_persistence(master, bucket.name)
self.log.info("VERIFICATION PHASE")
SwapRebalanceBase.items_verification(master, self)
示例7: test_failover_continuous_bidirectional_sets_deletes
# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import getOtpNodeIds [as 别名]
def test_failover_continuous_bidirectional_sets_deletes(self):
cluster_ref_a = "cluster_ref_a"
master_a = self._input.clusters.get(0)[0]
rest_conn_a = RestConnection(master_a)
cluster_ref_b = "cluster_ref_b"
master_b = self._input.clusters.get(1)[0]
rest_conn_b = RestConnection(master_b)
# Rebalance all the nodes together
servers_a = self._input.clusters.get(0)
servers_b = self._input.clusters.get(1)
rebalanced_servers_a = []
rebalanced_servers_b = []
RebalanceHelper.rebalance_in(servers_a, len(servers_a)-1)
RebalanceHelper.rebalance_in(servers_b, len(servers_b)-1)
rebalanced_servers_a.extend(servers_a)
rebalanced_servers_b.extend(servers_b)
# Setup bi-directional continuous replication
replication_type = "continuous"
rest_conn_a.add_remote_cluster(master_b.ip, master_b.port,
master_b.rest_username,
master_b.rest_password, cluster_ref_b)
rest_conn_b.add_remote_cluster(master_a.ip, master_a.port,
master_a.rest_username,
master_a.rest_password, cluster_ref_a)
(rep_database_a, rep_id_a) = rest_conn_a.start_replication(
replication_type, self._buckets[0],
cluster_ref_b)
(rep_database_b, rep_id_b) = rest_conn_b.start_replication(
replication_type, self._buckets[0],
cluster_ref_a)
load_thread_list = []
# Start load
kvstore = ClientKeyValueStore()
self._params["ops"] = "set"
task_def = RebalanceDataGenerator.create_loading_tasks(self._params)
load_thread = RebalanceDataGenerator.start_load(rest_conn_a,
self._buckets[0],
task_def, kvstore)
load_thread.start()
load_thread.join()
RebalanceHelper.wait_for_persistence(master_a, self._buckets[0])
# Do some deletes
self._params["ops"] = "delete"
self._params["count"] = self._num_items/5
task_def = RebalanceDataGenerator.create_loading_tasks(self._params)
load_thread = RebalanceDataGenerator.start_load(rest_conn_a,
self._buckets[0],
task_def, kvstore)
load_thread_list.append(load_thread)
# Start all loads concurrently
for lt in load_thread_list:
lt.start()
# Do the failover of nodes on both clusters
self.log.info("Failing over nodes")
self.log.info("current nodes on cluster 1: {0}".format(RebalanceHelper.getOtpNodeIds(master_a)))
self.log.info("current nodes on cluster 2: {0}".format(RebalanceHelper.getOtpNodeIds(master_b)))
# Find nodes to be failed_over
toBeEjectedNodes = RebalanceHelper.pick_nodes(master_a, howmany=self._failover_factor)
optNodesIds_a = [node.id for node in toBeEjectedNodes]
if self._fail_orchestrator_a:
status, content = ClusterOperationHelper.find_orchestrator(master_a)
self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
format(status, content))
optNodesIds_a[0] = content
master_a = self._input.clusters.get(0)[-1]
rest_conn_a = RestConnection(master_a)
#Failover selected nodes
for node in optNodesIds_a:
self.log.info("failover node {0} and rebalance afterwards".format(node))
rest_conn_a.fail_over(node)
toBeEjectedNodes = RebalanceHelper.pick_nodes(master_b, howmany=self._failover_factor)
optNodesIds_b = [node.id for node in toBeEjectedNodes]
if self._fail_orchestrator_b:
status, content = ClusterOperationHelper.find_orchestrator(master_b)
self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
format(status, content))
optNodesIds_b[0] = content
master_b = self._input.clusters.get(1)[-1]
rest_conn_b = RestConnection(master_b)
self._state.append((rest_conn_a, cluster_ref_b, rep_database_a, rep_id_a))
self._state.append((rest_conn_b, cluster_ref_a, rep_database_b, rep_id_b))
#Failover selected nodes
for node in optNodesIds_b:
self.log.info("failover node {0} and rebalance afterwards".format(node))
rest_conn_b.fail_over(node)
#.........这里部分代码省略.........
示例8: test_incremental_rebalance_out_continuous_bidirectional_sets_deletes
# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import getOtpNodeIds [as 别名]
def test_incremental_rebalance_out_continuous_bidirectional_sets_deletes(self):
cluster_ref_a = "cluster_ref_a"
master_a = self._input.clusters.get(0)[0]
rest_conn_a = RestConnection(master_a)
cluster_ref_b = "cluster_ref_b"
master_b = self._input.clusters.get(1)[0]
rest_conn_b = RestConnection(master_b)
# Setup bi-directional continuous replication
replication_type = "continuous"
rest_conn_a.add_remote_cluster(master_b.ip, master_b.port,
master_b.rest_username,
master_b.rest_password, cluster_ref_b)
rest_conn_b.add_remote_cluster(master_a.ip, master_a.port,
master_a.rest_username,
master_a.rest_password, cluster_ref_a)
(rep_database_a, rep_id_a) = rest_conn_a.start_replication(
replication_type, self._buckets[0],
cluster_ref_b)
(rep_database_b, rep_id_b) = rest_conn_b.start_replication(
replication_type, self._buckets[0],
cluster_ref_a)
self._state.append((rest_conn_a, cluster_ref_b, rep_database_a, rep_id_a))
self._state.append((rest_conn_b, cluster_ref_a, rep_database_b, rep_id_b))
load_thread_list = []
# Start load
kvstore = ClientKeyValueStore()
self._params["ops"] = "set"
task_def = RebalanceDataGenerator.create_loading_tasks(self._params)
load_thread = RebalanceDataGenerator.start_load(rest_conn_a,
self._buckets[0],
task_def, kvstore)
load_thread.start()
load_thread.join()
# Do some deletes
self._params["ops"] = "delete"
self._params["count"] = self._num_items/5
task_def = RebalanceDataGenerator.create_loading_tasks(self._params)
load_thread = RebalanceDataGenerator.start_load(rest_conn_a,
self._buckets[0],
task_def, kvstore)
load_thread_list.append(load_thread)
# Start all loads concurrently
for lt in load_thread_list:
lt.start()
# Trigger rebalance on both source and destination clusters
servers_a = self._input.clusters.get(0)
servers_b = self._input.clusters.get(1)
rebalanced_servers_a = []
rebalanced_servers_b = []
which_servers_a = []
which_servers_b = []
# Rebalance all the nodes together
RebalanceHelper.rebalance_in(servers_a, len(servers_a)-1)
RebalanceHelper.rebalance_in(servers_b, len(servers_b)-1)
rebalanced_servers_a.extend(servers_a)
rebalanced_servers_b.extend(servers_b)
nodes_a = rest_conn_a.node_statuses()
nodes_b = rest_conn_b.node_statuses()
# Incremental rebalance out one node in cluster_a, then cluster_b
while len(nodes_a) > 1:
toBeEjectedNode = RebalanceHelper.pick_node(master_a)
self.log.info("current nodes : {0}".format(RebalanceHelper.getOtpNodeIds(master_a)))
self.log.info("removing node {0} and rebalance afterwards".format(toBeEjectedNode.id))
rest_conn_a.rebalance(otpNodes=[node.id for node in rest_conn_a.node_statuses()], \
ejectedNodes=[toBeEjectedNode.id])
self.assertTrue(rest_conn_a.monitorRebalance(),
msg="rebalance operation failed after adding node {0}".format(toBeEjectedNode.id))
while len(nodes_b) > 1:
toBeEjectedNode = RebalanceHelper.pick_node(master_b)
self.log.info("current nodes : {0}".format(RebalanceHelper.getOtpNodeIds(master_b)))
self.log.info("removing node {0} and rebalance afterwards".format(toBeEjectedNode.id))
rest_conn_b.rebalance(otpNodes=[node.id for node in rest_conn_b.node_statuses()],\
ejectedNodes=[toBeEjectedNode.id])
self.assertTrue(rest_conn_b.monitorRebalance(),
msg="rebalance operation failed after adding node {0}".format(toBeEjectedNode.id))
break
for node in nodes_b:
for rebalanced_server in rebalanced_servers_b:
if rebalanced_server.ip.find(node.ip) != -1:
rebalanced_servers_b.remove(rebalanced_server)
break
nodes_b = rest_conn_a.node_statuses()
for node in nodes_a:
for rebalanced_server in rebalanced_servers_a:
if rebalanced_server.ip.find(node.ip) != -1:
#.........这里部分代码省略.........
示例9: test_failover_source_sets
# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import getOtpNodeIds [as 别名]
def test_failover_source_sets(self):
replication_type = "continuous"
self.log.info("Force initial rebalance.")
# This test starts with a 2-2 unidirectional replication from cluster a
# to cluster b; during the replication, we trigger failover of one node
# on source cluster , resulting a 1-2 replication.
# After all loading finish, verify data and rev on both clusters.
replication_type = "continuous"
self.log.info("Force initial rebalance.")
cluster_ref_a = "cluster_ref_a"
master_a = self._input.clusters.get(0)[0]
rest_conn_a = RestConnection(master_a)
cluster_ref_b = "cluster_ref_b"
master_b = self._input.clusters.get(1)[0]
rest_conn_b = RestConnection(master_b)
self.log.info("START XDC replication...")
# Start replication
rest_conn_a.add_remote_cluster(master_b.ip, master_b.port,
master_b.rest_username,
master_b.rest_password, cluster_ref_b)
(rep_database, rep_id) = rest_conn_a.start_replication(replication_type,
self._buckets[0],
cluster_ref_b)
self._state.append((rest_conn_a, cluster_ref_b, rep_database, rep_id))
# Start load
self.log.info("START loading data...")
load_thread_list = []
kvstore = ClientKeyValueStore()
self._params["ops"] = "set"
task_def = RebalanceDataGenerator.create_loading_tasks(self._params)
load_thread = RebalanceDataGenerator.start_load(rest_conn_a,
self._buckets[0],
task_def, kvstore)
load_thread.start()
# sleep a while to allow more data loaded
time.sleep(5)
self.log.info("current nodes on source cluster: {0}".format(RebalanceHelper.getOtpNodeIds(master_a)))
# Trigger failover, we fail over one node each time until there is only one node remaining
self.log.info("DURING replication, start failover...")
self.log.info("FAILOVER nodes on Cluster A ...")
nodes_a = rest_conn_a.node_statuses()
while len(nodes_a) > 1:
toBeFailedOverNode = RebalanceHelper.pick_node(master_a)
self.log.info("failover node {0}".format(toBeFailedOverNode.id))
rest_conn_a.fail_over(toBeFailedOverNode)
self.log.info("rebalance after failover")
rest_conn_a.rebalance(otpNodes=[node.id for node in rest_conn_a.node_statuses()], \
ejectedNodes=[toBeFailedOverNode.id])
self.assertTrue(rest_conn_a.monitorRebalance(),
msg="rebalance operation failed after removing node {0}".format(toBeFailedOverNode.id))
nodes_a = rest_conn_a.node_statuses()
self.log.info("ALL failed over done...")
# Wait for loading threads to finish
for lt in load_thread_list:
lt.join()
self.log.info("All loading threads finished")
# Verify replication
self.log.info("START data verification at cluster A...")
self.assertTrue(XDCRBaseTest.verify_replicated_data(rest_conn_a,
self._buckets[0],
kvstore,
self._poll_sleep,
self._poll_timeout),
"Verification of replicated data failed")
self.log.info("START data verification at cluster B...")
self.assertTrue(XDCRBaseTest.verify_replicated_data(rest_conn_b,
self._buckets[0],
kvstore,
self._poll_sleep,
self._poll_timeout),
"Verification of replicated data failed")
self.log.info("START revision verification on both clusters...")
self.assertTrue(XDCRBaseTest.verify_replicated_revs(rest_conn_a,
rest_conn_b,
self._buckets[0],
self._poll_sleep,
self._poll_timeout),
"Verification of replicated revisions failed")