本文整理汇总了Python中membase.helper.rebalance_helper.RebalanceHelper.rebalance_in方法的典型用法代码示例。如果您正苦于以下问题:Python RebalanceHelper.rebalance_in方法的具体用法?Python RebalanceHelper.rebalance_in怎么用?Python RebalanceHelper.rebalance_in使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类membase.helper.rebalance_helper.RebalanceHelper
的用法示例。
在下文中一共展示了RebalanceHelper.rebalance_in方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _failover_swap_rebalance
# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import rebalance_in [as 别名]
def _failover_swap_rebalance(self):
master = self.servers[0]
rest = RestConnection(master)
creds = self.input.membase_settings
num_initial_servers = self.num_initial_servers
intial_severs = self.servers[:num_initial_servers]
self.log.info("CREATE BUCKET PHASE")
SwapRebalanceBase.create_buckets(self)
# Cluster all starting set of servers
self.log.info("INITIAL REBALANCE PHASE")
RebalanceHelper.rebalance_in(intial_severs, len(intial_severs) - 1)
self.log.info("DATA LOAD PHASE")
self.loaders = SwapRebalanceBase.start_load_phase(self, master)
# Wait till load phase is over
SwapRebalanceBase.stop_load(self.loaders, do_stop=False)
self.log.info("DONE LOAD PHASE")
# Start the swap rebalance
self.log.info("current nodes : {0}".format(RebalanceHelper.getOtpNodeIds(master)))
toBeEjectedNodes = RebalanceHelper.pick_nodes(master, howmany=self.failover_factor)
optNodesIds = [node.id for node in toBeEjectedNodes]
if self.fail_orchestrator:
status, content = ClusterOperationHelper.find_orchestrator(master)
self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
format(status, content))
optNodesIds[0] = content
self.log.info("FAILOVER PHASE")
#Failover selected nodes
for node in optNodesIds:
self.log.info("failover node {0} and rebalance afterwards".format(node))
rest.fail_over(node)
new_swap_servers = self.servers[num_initial_servers:num_initial_servers + self.failover_factor]
for server in new_swap_servers:
otpNode = rest.add_node(creds.rest_username, creds.rest_password, server.ip)
msg = "unable to add node {0} to the cluster"
self.assertTrue(otpNode, msg.format(server.ip))
if self.fail_orchestrator:
rest = RestConnection(new_swap_servers[0])
master = new_swap_servers[0]
self.log.info("DATA ACCESS PHASE")
self.loaders = SwapRebalanceBase.start_access_phase(self, master)
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()], \
ejectedNodes=optNodesIds)
self.assertTrue(rest.monitorRebalance(),
msg="rebalance operation failed after adding node {0}".format(new_swap_servers))
SwapRebalanceBase.verification_phase(self, master)
示例2: rebalance_nodes
# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import rebalance_in [as 别名]
def rebalance_nodes(self, num_nodes):
"""Rebalance cluster(s) if more than 1 node provided"""
if len(self.input.servers) == 1 or num_nodes == 1:
print "WARNING: running on single node cluster"
return
else:
print "[perf.setUp] rebalancing nodes: num_nodes = {0}".\
format(num_nodes)
if self.input.clusters:
for cluster in self.input.clusters.values():
status, _ = RebalanceHelper.rebalance_in(cluster,
num_nodes - 1,
do_shuffle=False)
self.assertTrue(status)
else:
status, _ = RebalanceHelper.rebalance_in(self.input.servers,
num_nodes - 1,
do_shuffle=False)
self.assertTrue(status)
示例3: delayed_rebalance_worker
# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import rebalance_in [as 别名]
def delayed_rebalance_worker(servers, num_nodes, delay_seconds, sc,
max_retries=PerfDefaults.reb_max_retries):
time.sleep(delay_seconds)
gmt_now = time.strftime(PerfDefaults.strftime, time.gmtime())
print "[delayed_rebalance_worker] rebalance started: %s" % gmt_now
if not sc:
print "[delayed_rebalance_worker] invalid stats collector"
return
status = False
retries = 0
while not status and retries <= max_retries:
start_time = time.time()
status, nodes = RebalanceHelper.rebalance_in(servers,
num_nodes - 1,
do_check=(not retries))
end_time = time.time()
print "[delayed_rebalance_worker] status: {0}, nodes: {1}, retries: {2}"\
.format(status, nodes, retries)
if not status:
retries += 1
time.sleep(delay_seconds)
sc.reb_stats(start_time, end_time - start_time)
示例4: _add_back_failed_node
# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import rebalance_in [as 别名]
def _add_back_failed_node(self, do_node_cleanup=False):
master = self.servers[0]
rest = RestConnection(master)
creds = self.input.membase_settings
self.log.info("CREATE BUCKET PHASE")
SwapRebalanceBase.create_buckets(self)
# Cluster all servers
self.log.info("INITIAL REBALANCE PHASE")
status, servers_rebalanced = RebalanceHelper.rebalance_in(self.servers, len(self.servers) - 1)
self.assertTrue(status, msg="Rebalance was failed")
self.log.info("DATA LOAD PHASE")
self.loaders = SwapRebalanceBase.start_load_phase(self, master)
# Wait till load phase is over
SwapRebalanceBase.stop_load(self.loaders, do_stop=False)
self.log.info("DONE LOAD PHASE")
# Start the swap rebalance
current_nodes = RebalanceHelper.getOtpNodeIds(master)
self.log.info("current nodes : {0}".format(current_nodes))
toBeEjectedNodes = RebalanceHelper.pick_nodes(master, howmany=self.failover_factor)
optNodesIds = [node.id for node in toBeEjectedNodes]
# List of servers that will not be failed over
not_failed_over = []
for server in self.servers:
if server.ip not in [node.ip for node in toBeEjectedNodes]:
not_failed_over.append(server)
self.log.info("Node %s not failed over" % server.ip)
if self.fail_orchestrator:
status, content = ClusterOperationHelper.find_orchestrator(master)
self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
format(status, content))
# When swapping all the nodes
if self.num_swap is len(current_nodes):
optNodesIds.append(content)
else:
optNodesIds[0] = content
master = not_failed_over[-1]
self.log.info("DATA ACCESS PHASE")
self.loaders = SwapRebalanceBase.start_access_phase(self, master)
# Failover selected nodes
for node in optNodesIds:
self.log.info("failover node {0} and rebalance afterwards".format(node))
rest.fail_over(node)
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()], \
ejectedNodes=optNodesIds)
self.assertTrue(rest.monitorRebalance(),
msg="rebalance operation failed after adding node {0}".format(optNodesIds))
# Add back the same failed over nodes
# Cleanup the node, somehow
# TODO: cluster_run?
if do_node_cleanup:
pass
# Make rest connection with node part of cluster
rest = RestConnection(master)
# Given the optNode, find ip
add_back_servers = []
nodes = rest.get_nodes()
for server in [node.ip for node in nodes]:
if isinstance(server, unicode):
add_back_servers.append(server)
final_add_back_servers = []
for server in self.servers:
if server.ip not in add_back_servers:
final_add_back_servers.append(server)
for server in final_add_back_servers:
otpNode = rest.add_node(creds.rest_username, creds.rest_password, server.ip)
msg = "unable to add node {0} to the cluster"
self.assertTrue(otpNode, msg.format(server.ip))
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()], ejectedNodes=[])
self.assertTrue(rest.monitorRebalance(),
msg="rebalance operation failed after adding node {0}".format(add_back_servers))
SwapRebalanceBase.verification_phase(self, master)
示例5: _common_test_body_failed_swap_rebalance
# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import rebalance_in [as 别名]
def _common_test_body_failed_swap_rebalance(self):
master = self.servers[0]
rest = RestConnection(master)
num_initial_servers = self.num_initial_servers
creds = self.input.membase_settings
intial_severs = self.servers[:num_initial_servers]
self.log.info("CREATE BUCKET PHASE")
SwapRebalanceBase.create_buckets(self)
# Cluster all starting set of servers
self.log.info("INITIAL REBALANCE PHASE")
status, servers_rebalanced = RebalanceHelper.rebalance_in(intial_severs, len(intial_severs) - 1)
self.assertTrue(status, msg="Rebalance was failed")
self.log.info("DATA LOAD PHASE")
self.loaders = SwapRebalanceBase.start_load_phase(self, master)
# Wait till load phase is over
SwapRebalanceBase.stop_load(self.loaders, do_stop=False)
self.log.info("DONE LOAD PHASE")
# Start the swap rebalance
current_nodes = RebalanceHelper.getOtpNodeIds(master)
self.log.info("current nodes : {0}".format(current_nodes))
toBeEjectedNodes = RebalanceHelper.pick_nodes(master, howmany=self.num_swap)
optNodesIds = [node.id for node in toBeEjectedNodes]
if self.swap_orchestrator:
status, content = ClusterOperationHelper.find_orchestrator(master)
self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
format(status, content))
# When swapping all the nodes
if self.num_swap is len(current_nodes):
optNodesIds.append(content)
else:
optNodesIds[0] = content
for node in optNodesIds:
self.log.info("removing node {0} and rebalance afterwards".format(node))
new_swap_servers = self.servers[num_initial_servers:num_initial_servers + self.num_swap]
for server in new_swap_servers:
otpNode = rest.add_node(creds.rest_username, creds.rest_password, server.ip)
msg = "unable to add node {0} to the cluster"
self.assertTrue(otpNode, msg.format(server.ip))
if self.swap_orchestrator:
rest = RestConnection(new_swap_servers[0])
master = new_swap_servers[0]
self.log.info("DATA ACCESS PHASE")
self.loaders = SwapRebalanceBase.start_access_phase(self, master)
self.log.info("SWAP REBALANCE PHASE")
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()],
ejectedNodes=optNodesIds)
SwapRebalanceBase.sleep(self, 10, "Rebalance should start")
self.log.info("FAIL SWAP REBALANCE PHASE @ {0}".format(self.percentage_progress))
reached = RestHelper(rest).rebalance_reached(self.percentage_progress)
if reached == 100 and not RestHelper(rest).is_cluster_rebalanced():
# handle situation when rebalance failed at the beginning
self.log.error('seems rebalance failed!')
self.log.info("Latest logs from UI:")
for i in rest.get_logs(): self.log.error(i)
self.fail("rebalance failed even before killing memcached")
bucket = rest.get_buckets()[0].name
pid = None
if self.swap_orchestrator:
# get PID via remote connection if master is a new node
shell = RemoteMachineShellConnection(master)
o, _ = shell.execute_command("ps -eo comm,pid | awk '$1 == \"memcached\" { print $2 }'")
pid = o[0]
shell.disconnect()
else:
for i in xrange(2):
try:
_mc = MemcachedClientHelper.direct_client(master, bucket)
pid = _mc.stats()["pid"]
break
except EOFError as e:
self.log.error("{0}.Retry in 2 sec".format(e))
SwapRebalanceBase.sleep(self, 1)
if pid is None:
self.fail("impossible to get a PID")
command = "os:cmd(\"kill -9 {0} \")".format(pid)
self.log.info(command)
killed = rest.diag_eval(command)
self.log.info("killed {0}:{1}?? {2} ".format(master.ip, master.port, killed))
self.log.info("sleep for 10 sec after kill memcached")
SwapRebalanceBase.sleep(self, 10)
# we can't get stats for new node when rebalance falls
if not self.swap_orchestrator:
ClusterOperationHelper._wait_warmup_completed(self, [master], bucket, wait_time=600)
i = 0
# we expect that rebalance will be failed
try:
rest.monitorRebalance()
except RebalanceFailedException:
# retry rebalance if it failed
self.log.warn("Rebalance failed but it's expected")
#.........这里部分代码省略.........
示例6: _common_test_body_swap_rebalance
# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import rebalance_in [as 别名]
def _common_test_body_swap_rebalance(self, do_stop_start=False):
master = self.servers[0]
rest = RestConnection(master)
num_initial_servers = self.num_initial_servers
creds = self.input.membase_settings
intial_severs = self.servers[:num_initial_servers]
self.log.info("CREATE BUCKET PHASE")
SwapRebalanceBase.create_buckets(self)
# Cluster all starting set of servers
self.log.info("INITIAL REBALANCE PHASE")
status, servers_rebalanced = RebalanceHelper.rebalance_in(intial_severs, len(intial_severs) - 1)
self.assertTrue(status, msg="Rebalance was failed")
self.log.info("DATA LOAD PHASE")
self.loaders = SwapRebalanceBase.start_load_phase(self, master)
# Wait till load phase is over
SwapRebalanceBase.stop_load(self.loaders, do_stop=False)
self.log.info("DONE LOAD PHASE")
# Start the swap rebalance
current_nodes = RebalanceHelper.getOtpNodeIds(master)
self.log.info("current nodes : {0}".format(current_nodes))
toBeEjectedNodes = RebalanceHelper.pick_nodes(master, howmany=self.num_swap)
optNodesIds = [node.id for node in toBeEjectedNodes]
if self.swap_orchestrator:
status, content = ClusterOperationHelper.find_orchestrator(master)
self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
format(status, content))
if self.num_swap is len(current_nodes):
optNodesIds.append(content)
else:
optNodesIds[0] = content
for node in optNodesIds:
self.log.info("removing node {0} and rebalance afterwards".format(node))
new_swap_servers = self.servers[num_initial_servers:num_initial_servers + self.num_swap]
for server in new_swap_servers:
otpNode = rest.add_node(creds.rest_username, creds.rest_password, server.ip)
msg = "unable to add node {0} to the cluster"
self.assertTrue(otpNode, msg.format(server.ip))
if self.swap_orchestrator:
rest = RestConnection(new_swap_servers[0])
master = new_swap_servers[0]
if self.do_access:
self.log.info("DATA ACCESS PHASE")
self.loaders = SwapRebalanceBase.start_access_phase(self, master)
self.log.info("SWAP REBALANCE PHASE")
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()],
ejectedNodes=optNodesIds)
if do_stop_start:
# Rebalance is stopped at 20%, 40% and 60% completion
retry = 0
for expected_progress in (20, 40, 60):
self.log.info("STOP/START SWAP REBALANCE PHASE WITH PROGRESS {0}%".
format(expected_progress))
while True:
progress = rest._rebalance_progress()
if progress < 0:
self.log.error("rebalance progress code : {0}".format(progress))
break
elif progress == 100:
self.log.warn("Rebalance has already reached 100%")
break
elif progress >= expected_progress:
self.log.info("Rebalance will be stopped with {0}%".format(progress))
stopped = rest.stop_rebalance()
self.assertTrue(stopped, msg="unable to stop rebalance")
SwapRebalanceBase.sleep(self, 20)
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()],
ejectedNodes=optNodesIds)
break
elif retry > 100:
break
else:
retry += 1
SwapRebalanceBase.sleep(self, 1)
self.assertTrue(rest.monitorRebalance(),
msg="rebalance operation failed after adding node {0}".format(optNodesIds))
SwapRebalanceBase.verification_phase(self, master)
示例7: _common_test_body_failed_swap_rebalance
# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import rebalance_in [as 别名]
def _common_test_body_failed_swap_rebalance(self):
master = self.servers[0]
rest = RestConnection(master)
num_initial_servers = self.num_initial_servers
creds = self.input.membase_settings
intial_severs = self.servers[:num_initial_servers]
# Cluster all starting set of servers
self.log.info("INITIAL REBALANCE PHASE")
RebalanceHelper.rebalance_in(intial_severs, len(intial_severs)-1)
self.log.info("CREATE BUCKET PHASE")
SwapRebalanceBase.create_buckets(self)
self.log.info("DATA LOAD PHASE")
loaders = SwapRebalanceBase.start_load_phase(self, master)
# Wait till load phase is over
SwapRebalanceBase.stop_load(loaders, do_stop=False)
self.log.info("DONE LOAD PHASE")
# Start the swap rebalance
current_nodes = RebalanceHelper.getOtpNodeIds(master)
self.log.info("current nodes : {0}".format(current_nodes))
toBeEjectedNodes = RebalanceHelper.pick_nodes(master, howmany=self.num_swap)
optNodesIds = [node.id for node in toBeEjectedNodes]
if self.swap_orchestrator:
status, content = ClusterHelper.find_orchestrator(master)
self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
format(status, content))
# When swapping all the nodes
if self.num_swap is len(current_nodes):
optNodesIds.append(content)
else:
optNodesIds[0] = content
for node in optNodesIds:
self.log.info("removing node {0} and rebalance afterwards".format(node))
new_swap_servers = self.servers[num_initial_servers:num_initial_servers+self.num_swap]
for server in new_swap_servers:
otpNode = rest.add_node(creds.rest_username, creds.rest_password, server.ip)
msg = "unable to add node {0} to the cluster"
self.assertTrue(otpNode, msg.format(server.ip))
if self.swap_orchestrator:
rest = RestConnection(new_swap_servers[0])
master = new_swap_servers[0]
self.log.info("DATA ACCESS PHASE")
loaders = SwapRebalanceBase.start_access_phase(self, master)
self.log.info("SWAP REBALANCE PHASE")
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()],\
ejectedNodes=optNodesIds)
# Rebalance is failed at 20%, 40% and 60% completion
for i in [1, 2, 3]:
expected_progress = 20*i
self.log.info("FAIL SWAP REBALANCE PHASE @ {0}".format(expected_progress))
reached = RestHelper(rest).rebalance_reached(expected_progress)
command = "[erlang:exit(element(2, X), kill) || X <- supervisor:which_children(ns_port_sup)]."
memcached_restarted = rest.diag_eval(command)
self.assertTrue(memcached_restarted, "unable to restart memcached/moxi process through diag/eval")
time.sleep(20)
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()],\
ejectedNodes=optNodesIds)
# Stop loaders
SwapRebalanceBase.stop_load(loaders)
self.assertTrue(rest.monitorRebalance(),
msg="rebalance operation failed after adding node {0}".format(toBeEjectedNodes))
self.log.info("DONE DATA ACCESS PHASE")
#for bucket in rest.get_buckets():
# SwapRebalanceBase.verify_data(new_swap_servers[0], bucket_data[bucket.name].get('inserted_keys'),\
# bucket.name, self)
# RebalanceHelper.wait_for_persistence(master, bucket.name)
self.log.info("VERIFICATION PHASE")
SwapRebalanceBase.items_verification(master, self)
示例8: rebalance_in
# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import rebalance_in [as 别名]
def rebalance_in(servers, how_many):
return RebalanceHelper.rebalance_in(servers, how_many)
示例9: rebalance_in
# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import rebalance_in [as 别名]
def rebalance_in(servers, how_many, monitor=True):
return RebalanceHelper.rebalance_in(servers, how_many, monitor)
示例10: test_failover_continuous_bidirectional_sets_deletes
# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import rebalance_in [as 别名]
def test_failover_continuous_bidirectional_sets_deletes(self):
cluster_ref_a = "cluster_ref_a"
master_a = self._input.clusters.get(0)[0]
rest_conn_a = RestConnection(master_a)
cluster_ref_b = "cluster_ref_b"
master_b = self._input.clusters.get(1)[0]
rest_conn_b = RestConnection(master_b)
# Rebalance all the nodes together
servers_a = self._input.clusters.get(0)
servers_b = self._input.clusters.get(1)
rebalanced_servers_a = []
rebalanced_servers_b = []
RebalanceHelper.rebalance_in(servers_a, len(servers_a)-1)
RebalanceHelper.rebalance_in(servers_b, len(servers_b)-1)
rebalanced_servers_a.extend(servers_a)
rebalanced_servers_b.extend(servers_b)
# Setup bi-directional continuous replication
replication_type = "continuous"
rest_conn_a.add_remote_cluster(master_b.ip, master_b.port,
master_b.rest_username,
master_b.rest_password, cluster_ref_b)
rest_conn_b.add_remote_cluster(master_a.ip, master_a.port,
master_a.rest_username,
master_a.rest_password, cluster_ref_a)
(rep_database_a, rep_id_a) = rest_conn_a.start_replication(
replication_type, self._buckets[0],
cluster_ref_b)
(rep_database_b, rep_id_b) = rest_conn_b.start_replication(
replication_type, self._buckets[0],
cluster_ref_a)
load_thread_list = []
# Start load
kvstore = ClientKeyValueStore()
self._params["ops"] = "set"
task_def = RebalanceDataGenerator.create_loading_tasks(self._params)
load_thread = RebalanceDataGenerator.start_load(rest_conn_a,
self._buckets[0],
task_def, kvstore)
load_thread.start()
load_thread.join()
RebalanceHelper.wait_for_persistence(master_a, self._buckets[0])
# Do some deletes
self._params["ops"] = "delete"
self._params["count"] = self._num_items/5
task_def = RebalanceDataGenerator.create_loading_tasks(self._params)
load_thread = RebalanceDataGenerator.start_load(rest_conn_a,
self._buckets[0],
task_def, kvstore)
load_thread_list.append(load_thread)
# Start all loads concurrently
for lt in load_thread_list:
lt.start()
# Do the failover of nodes on both clusters
self.log.info("Failing over nodes")
self.log.info("current nodes on cluster 1: {0}".format(RebalanceHelper.getOtpNodeIds(master_a)))
self.log.info("current nodes on cluster 2: {0}".format(RebalanceHelper.getOtpNodeIds(master_b)))
# Find nodes to be failed_over
toBeEjectedNodes = RebalanceHelper.pick_nodes(master_a, howmany=self._failover_factor)
optNodesIds_a = [node.id for node in toBeEjectedNodes]
if self._fail_orchestrator_a:
status, content = ClusterOperationHelper.find_orchestrator(master_a)
self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
format(status, content))
optNodesIds_a[0] = content
master_a = self._input.clusters.get(0)[-1]
rest_conn_a = RestConnection(master_a)
#Failover selected nodes
for node in optNodesIds_a:
self.log.info("failover node {0} and rebalance afterwards".format(node))
rest_conn_a.fail_over(node)
toBeEjectedNodes = RebalanceHelper.pick_nodes(master_b, howmany=self._failover_factor)
optNodesIds_b = [node.id for node in toBeEjectedNodes]
if self._fail_orchestrator_b:
status, content = ClusterOperationHelper.find_orchestrator(master_b)
self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
format(status, content))
optNodesIds_b[0] = content
master_b = self._input.clusters.get(1)[-1]
rest_conn_b = RestConnection(master_b)
self._state.append((rest_conn_a, cluster_ref_b, rep_database_a, rep_id_a))
self._state.append((rest_conn_b, cluster_ref_a, rep_database_b, rep_id_b))
#Failover selected nodes
for node in optNodesIds_b:
self.log.info("failover node {0} and rebalance afterwards".format(node))
rest_conn_b.fail_over(node)
#.........这里部分代码省略.........
示例11: test_incremental_rebalance_out_continuous_bidirectional_sets_deletes
# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import rebalance_in [as 别名]
def test_incremental_rebalance_out_continuous_bidirectional_sets_deletes(self):
cluster_ref_a = "cluster_ref_a"
master_a = self._input.clusters.get(0)[0]
rest_conn_a = RestConnection(master_a)
cluster_ref_b = "cluster_ref_b"
master_b = self._input.clusters.get(1)[0]
rest_conn_b = RestConnection(master_b)
# Setup bi-directional continuous replication
replication_type = "continuous"
rest_conn_a.add_remote_cluster(master_b.ip, master_b.port,
master_b.rest_username,
master_b.rest_password, cluster_ref_b)
rest_conn_b.add_remote_cluster(master_a.ip, master_a.port,
master_a.rest_username,
master_a.rest_password, cluster_ref_a)
(rep_database_a, rep_id_a) = rest_conn_a.start_replication(
replication_type, self._buckets[0],
cluster_ref_b)
(rep_database_b, rep_id_b) = rest_conn_b.start_replication(
replication_type, self._buckets[0],
cluster_ref_a)
self._state.append((rest_conn_a, cluster_ref_b, rep_database_a, rep_id_a))
self._state.append((rest_conn_b, cluster_ref_a, rep_database_b, rep_id_b))
load_thread_list = []
# Start load
kvstore = ClientKeyValueStore()
self._params["ops"] = "set"
task_def = RebalanceDataGenerator.create_loading_tasks(self._params)
load_thread = RebalanceDataGenerator.start_load(rest_conn_a,
self._buckets[0],
task_def, kvstore)
load_thread.start()
load_thread.join()
# Do some deletes
self._params["ops"] = "delete"
self._params["count"] = self._num_items/5
task_def = RebalanceDataGenerator.create_loading_tasks(self._params)
load_thread = RebalanceDataGenerator.start_load(rest_conn_a,
self._buckets[0],
task_def, kvstore)
load_thread_list.append(load_thread)
# Start all loads concurrently
for lt in load_thread_list:
lt.start()
# Trigger rebalance on both source and destination clusters
servers_a = self._input.clusters.get(0)
servers_b = self._input.clusters.get(1)
rebalanced_servers_a = []
rebalanced_servers_b = []
which_servers_a = []
which_servers_b = []
# Rebalance all the nodes together
RebalanceHelper.rebalance_in(servers_a, len(servers_a)-1)
RebalanceHelper.rebalance_in(servers_b, len(servers_b)-1)
rebalanced_servers_a.extend(servers_a)
rebalanced_servers_b.extend(servers_b)
nodes_a = rest_conn_a.node_statuses()
nodes_b = rest_conn_b.node_statuses()
# Incremental rebalance out one node in cluster_a, then cluster_b
while len(nodes_a) > 1:
toBeEjectedNode = RebalanceHelper.pick_node(master_a)
self.log.info("current nodes : {0}".format(RebalanceHelper.getOtpNodeIds(master_a)))
self.log.info("removing node {0} and rebalance afterwards".format(toBeEjectedNode.id))
rest_conn_a.rebalance(otpNodes=[node.id for node in rest_conn_a.node_statuses()], \
ejectedNodes=[toBeEjectedNode.id])
self.assertTrue(rest_conn_a.monitorRebalance(),
msg="rebalance operation failed after adding node {0}".format(toBeEjectedNode.id))
while len(nodes_b) > 1:
toBeEjectedNode = RebalanceHelper.pick_node(master_b)
self.log.info("current nodes : {0}".format(RebalanceHelper.getOtpNodeIds(master_b)))
self.log.info("removing node {0} and rebalance afterwards".format(toBeEjectedNode.id))
rest_conn_b.rebalance(otpNodes=[node.id for node in rest_conn_b.node_statuses()],\
ejectedNodes=[toBeEjectedNode.id])
self.assertTrue(rest_conn_b.monitorRebalance(),
msg="rebalance operation failed after adding node {0}".format(toBeEjectedNode.id))
break
for node in nodes_b:
for rebalanced_server in rebalanced_servers_b:
if rebalanced_server.ip.find(node.ip) != -1:
rebalanced_servers_b.remove(rebalanced_server)
break
nodes_b = rest_conn_a.node_statuses()
for node in nodes_a:
for rebalanced_server in rebalanced_servers_a:
if rebalanced_server.ip.find(node.ip) != -1:
#.........这里部分代码省略.........
示例12: test_incremental_rebalance_in_continuous_bidirectional_sets_deletes
# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import rebalance_in [as 别名]
def test_incremental_rebalance_in_continuous_bidirectional_sets_deletes(self):
cluster_ref_a = "cluster_ref_a"
master_a = self._input.clusters.get(0)[0]
rest_conn_a = RestConnection(master_a)
cluster_ref_b = "cluster_ref_b"
master_b = self._input.clusters.get(1)[0]
rest_conn_b = RestConnection(master_b)
# Setup bi-directional continuous replication
replication_type = "continuous"
rest_conn_a.add_remote_cluster(master_b.ip, master_b.port,
master_b.rest_username,
master_b.rest_password, cluster_ref_b)
rest_conn_b.add_remote_cluster(master_a.ip, master_a.port,
master_a.rest_username,
master_a.rest_password, cluster_ref_a)
(rep_database_a, rep_id_a) = rest_conn_a.start_replication(
replication_type, self._buckets[0],
cluster_ref_b)
(rep_database_b, rep_id_b) = rest_conn_b.start_replication(
replication_type, self._buckets[0],
cluster_ref_a)
self._state.append((rest_conn_a, cluster_ref_b, rep_database_a, rep_id_a))
self._state.append((rest_conn_b, cluster_ref_a, rep_database_b, rep_id_b))
load_thread_list = []
# Start load
kvstore = ClientKeyValueStore()
self._params["ops"] = "set"
task_def = RebalanceDataGenerator.create_loading_tasks(self._params)
load_thread = RebalanceDataGenerator.start_load(rest_conn_a,
self._buckets[0],
task_def, kvstore)
load_thread.start()
load_thread.join()
# Do some deletes
self._params["ops"] = "delete"
self._params["count"] = self._num_items/5
task_def = RebalanceDataGenerator.create_loading_tasks(self._params)
load_thread = RebalanceDataGenerator.start_load(rest_conn_a,
self._buckets[0],
task_def, kvstore)
load_thread_list.append(load_thread)
# Start all loads concurrently
for lt in load_thread_list:
lt.start()
# Trigger rebalance on both source and destination clusters
servers_a = self._input.clusters.get(0)
servers_b = self._input.clusters.get(1)
nodes_a = rest_conn_a.node_statuses()
nodes_b = rest_conn_b.node_statuses()
rebalanced_servers_a = [master_a]
rebalanced_servers_b = [master_b]
which_servers_a = []
which_servers_b = []
# Incremental rebalance in one node in cluster_a, then cluster_b
while len(nodes_a) < len(servers_a):
self.log.info("current nodes : {0}".format([node.id for node in rest_conn_a.node_statuses()]))
rebalanced_in, which_servers_a = RebalanceHelper.rebalance_in(servers_a, 1, monitor=False)
self.assertTrue(rebalanced_in, msg="unable to add and rebalance more nodes")
self.assertTrue(rest_conn_a.monitorRebalance(),
msg="rebalance operation on cluster {0}".format(nodes_a))
while len(nodes_b) < len(servers_b):
self.log.info("current nodes : {0}".format([node.id for node in rest_conn_b.node_statuses()]))
rebalanced_in, which_servers_b = RebalanceHelper.rebalance_in(servers_b, 1, monitor=False)
self.assertTrue(rebalanced_in, msg="unable to add and rebalance more nodes")
break
self.assertTrue(rest_conn_b.monitorRebalance(),
msg="rebalance operation on cluster {0}".format(nodes_b))
rebalanced_servers_b.extend(which_servers_b)
nodes_b = rest_conn_b.node_statuses()
rebalanced_servers_a.extend(which_servers_a)
nodes_a = rest_conn_a.node_statuses()
# Wait for loading threads to finish
for lt in load_thread_list:
lt.join()
self.log.info("All loading threads finished")
# Verify replication
self.assertTrue(XDCRBaseTest.verify_replicated_data(rest_conn_b,
self._buckets[0],
kvstore,
self._poll_sleep,
self._poll_timeout),
"Verification of replicated data failed")
self.assertTrue(XDCRBaseTest.verify_replicated_revs(rest_conn_a,
rest_conn_b,
self._buckets[0],
self._poll_sleep,
self._poll_timeout),
"Verification of replicated revisions failed")
示例13: test_rebalance_in_dest_sets
# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import rebalance_in [as 别名]
def test_rebalance_in_dest_sets(self):
# This test starts with a 1-1 unidirectional replication from cluster a
# to cluster b; during the replication, we trigger rebalace-in on dest
# cluster b, to create a 1-2 replication. After all loading finish,
# verify data and rev on both clusters.
replication_type = "continuous"
cluster_ref_a = "cluster_ref_a"
master_a = self._input.clusters.get(0)[0]
rest_conn_a = RestConnection(master_a)
cluster_ref_b = "cluster_ref_b"
master_b = self._input.clusters.get(1)[0]
rest_conn_b = RestConnection(master_b)
self.log.info("START XDC replication...")
# Start replication
rest_conn_a.add_remote_cluster(master_b.ip, master_b.port,
master_b.rest_username,
master_b.rest_password, cluster_ref_b)
(rep_database, rep_id) = rest_conn_a.start_replication(replication_type,
self._buckets[0],
cluster_ref_b)
self._state.append((rest_conn_a, cluster_ref_b, rep_database, rep_id))
# Start load
self.log.info("START loading data...")
load_thread_list = []
kvstore = ClientKeyValueStore()
self._params["ops"] = "set"
task_def = RebalanceDataGenerator.create_loading_tasks(self._params)
load_thread = RebalanceDataGenerator.start_load(rest_conn_a,
self._buckets[0],
task_def, kvstore)
load_thread.start()
# Trigger rebalance
self.log.info("DURING replication, start rebalancing...")
servers_a = self._input.clusters.get(0)
self.log.info("REBALANCING IN Cluster B ...")
RebalanceHelper.rebalance_in(servers_b, len(servers_b)-1, monitor=False)
self.assertTrue(rest_conn_b.monitorRebalance(),
msg="rebalance operation on cluster {0}".format(servers_b))
self.log.info("ALL rebalancing done...")
# Wait for loading to finish
load_thread.join()
self.log.info("All deleting threads finished")
# Verify replication
self.log.info("START data verification at cluster A...")
self.assertTrue(XDCRBaseTest.verify_replicated_data(rest_conn_a,
self._buckets[0],
kvstore,
self._poll_sleep,
self._poll_timeout),
"Verification of replicated data failed")
self.log.info("START data verification at cluster B...")
self.assertTrue(XDCRBaseTest.verify_replicated_data(rest_conn_b,
self._buckets[0],
kvstore,
self._poll_sleep,
self._poll_timeout),
"Verification of replicated data failed")
self.log.info("START revision verification on both clusters...")
self.assertTrue(XDCRBaseTest.verify_replicated_revs(rest_conn_a,
rest_conn_b,
self._buckets[0],
self._poll_sleep,
self._poll_timeout),
"Verification of replicated revisions failed")
示例14: cluster_rebalance_in
# 需要导入模块: from membase.helper.rebalance_helper import RebalanceHelper [as 别名]
# 或者: from membase.helper.rebalance_helper.RebalanceHelper import rebalance_in [as 别名]
def cluster_rebalance_in(testcase, servers, monitor=True):
servers_a = testcase._input.clusters.get(0)
servers_b = testcase._input.clusters.get(1)
RebalanceHelper.rebalance_in(servers_a, len(servers_a) - 1, monitor=True)
RebalanceHelper.rebalance_in(servers_b, len(servers_b) - 1, monitor=True)