本文整理汇总了Python中membase.api.rest_client.RestConnection.add_back_node方法的典型用法代码示例。如果您正苦于以下问题:Python RestConnection.add_back_node方法的具体用法?Python RestConnection.add_back_node怎么用?Python RestConnection.add_back_node使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类membase.api.rest_client.RestConnection
的用法示例。
在下文中一共展示了RestConnection.add_back_node方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_failover_add_back
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import add_back_node [as 别名]
def test_failover_add_back(self):
try:
rest = RestConnection(self.master)
recoveryType = self.input.param("recoveryType", "full")
servr_out = self.nodes_out_list
nodes_all = rest.node_statuses()
tasks = self.async_check_and_run_operations(buckets=self.buckets, before=True)
for task in tasks:
task.result()
failover_task = self.cluster.async_failover([self.master], failover_nodes=servr_out, graceful=self.graceful)
failover_task.result()
nodes_all = rest.node_statuses()
nodes = []
if servr_out[0].ip == "127.0.0.1":
for failover_node in servr_out:
nodes.extend([node for node in nodes_all if (str(node.port) == failover_node.port)])
else:
for failover_node in servr_out:
nodes.extend([node for node in nodes_all if node.ip == failover_node.ip])
for node in nodes:
self.log.info(node)
rest.add_back_node(node.id)
rest.set_recovery_type(otpNode=node.id, recoveryType=recoveryType)
rebalance = self.cluster.async_rebalance(self.servers[: self.nodes_init], [], [])
self._run_aync_tasks()
rebalance.result()
self.run_after_operations()
except Exception, ex:
raise
示例2: test_add_remove_add_back_node_with_cert
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import add_back_node [as 别名]
def test_add_remove_add_back_node_with_cert(self,rebalance=None):
rebalance = self.input.param('rebalance')
rest = RestConnection(self.master)
servs_inout = self.servers[1:3]
serv_out = '[email protected]' + servs_inout[1].ip
known_nodes = ['[email protected]'+self.master.ip]
x509main(self.master).setup_master()
x509main().setup_cluster_nodes_ssl(servs_inout)
for server in servs_inout:
rest.add_node('Administrator','password',server.ip)
known_nodes.append('[email protected]' + server.ip)
rest.rebalance(known_nodes)
self.assertTrue(self.check_rebalance_complete(rest),"Issue with rebalance")
for server in servs_inout:
status = x509main(server)._validate_ssl_login()
self.assertEqual(status,200,"Not able to login via SSL code")
rest.fail_over(serv_out,graceful=False)
if (rebalance):
rest.rebalance(known_nodes,[serv_out])
self.assertTrue(self.check_rebalance_complete(rest),"Issue with rebalance")
rest.add_node('Administrator','password',servs_inout[1].ip)
else:
rest.add_back_node(serv_out)
rest.rebalance(known_nodes)
self.assertTrue(self.check_rebalance_complete(rest),"Issue with rebalance")
for server in servs_inout:
response = x509main(server)._validate_ssl_login()
self.assertEqual(status,200,"Not able to login via SSL code")
示例3: test_add_remove_graceful_add_back_node_with_cert
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import add_back_node [as 别名]
def test_add_remove_graceful_add_back_node_with_cert(self,recovery_type=None):
recovery_type = self.input.param('recovery_type')
rest = RestConnection(self.master)
known_nodes = ['[email protected]'+self.master.ip]
progress = None
count = 0
servs_inout = self.servers[1:]
serv_out = '[email protected]' + servs_inout[1].ip
rest.create_bucket(bucket='default', ramQuotaMB=100)
x509main(self.master).setup_master()
x509main().setup_cluster_nodes_ssl(servs_inout)
for server in servs_inout:
rest.add_node('Administrator','password',server.ip)
known_nodes.append('[email protected]' + server.ip)
rest.rebalance(known_nodes)
self.assertTrue(self.check_rebalance_complete(rest),"Issue with rebalance")
for server in servs_inout:
status = x509main(server)._validate_ssl_login()
self.assertEqual(status,200,"Not able to login via SSL code")
rest.fail_over(serv_out,graceful=True)
self.assertTrue(self.check_rebalance_complete(rest),"Issue with rebalance")
rest.set_recovery_type(serv_out,recovery_type)
rest.add_back_node(serv_out)
rest.rebalance(known_nodes)
self.assertTrue(self.check_rebalance_complete(rest),"Issue with rebalance")
for server in servs_inout:
status = x509main(server)._validate_ssl_login()
self.assertEqual(status,200,"Not able to login via SSL code")
示例4: test_capi_with_failover
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import add_back_node [as 别名]
def test_capi_with_failover(self):
repl_id = self._start_es_replication()
rest_conn = RestConnection(self.src_master)
rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')
gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}', xrange(100), start=0, end=self._num_items)
self.src_cluster.load_all_buckets_from_generator(gen)
rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')
graceful = self._input.param("graceful", False)
self.recoveryType = self._input.param("recoveryType", None)
self.src_cluster.failover(graceful=graceful)
self.sleep(30)
if self.recoveryType:
server_nodes = rest_conn.node_statuses()
for node in server_nodes:
if node.ip == self._input.servers[1].ip:
rest_conn.set_recovery_type(otpNode=node.id, recoveryType=self.recoveryType)
self.sleep(30)
rest_conn.add_back_node(otpNode=node.id)
rebalance = self.cluster.async_rebalance(self.src_cluster.get_nodes(), [], [])
rebalance.result()
self._verify_es_results()
示例5: test_failover_add_back
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import add_back_node [as 别名]
def test_failover_add_back(self):
try:
rest = RestConnection(self.master)
recoveryType = self.input.param("recoveryType", "full")
servr_out = self.nodes_out_list
self._run_initial_index_tasks()
failover_task =self.cluster.async_failover([self.master],
failover_nodes = servr_out, graceful=self.graceful)
failover_task.result()
kvOps_tasks = self._run_kvops_tasks()
before_index_ops = self._run_before_index_tasks()
nodes_all = rest.node_statuses()
nodes = []
if servr_out[0].ip == "127.0.0.1":
for failover_node in servr_out:
nodes.extend([node for node in nodes_all
if (str(node.port) == failover_node.port)])
else:
for failover_node in servr_out:
nodes.extend([node for node in nodes_all
if node.ip == failover_node.ip])
for node in nodes:
self.log.info(node)
rest.add_back_node(node.id)
rest.set_recovery_type(otpNode=node.id, recoveryType=recoveryType)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [])
in_between_index_ops = self._run_in_between_tasks()
rebalance.result()
self.sleep(120)
self._run_tasks([kvOps_tasks, before_index_ops, in_between_index_ops])
self._run_after_index_tasks()
except Exception, ex:
raise
示例6: test_failover_add_back
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import add_back_node [as 别名]
def test_failover_add_back(self):
try:
self.run_async_data()
rest = RestConnection(self.master)
recoveryType = self.input.param("recoveryType", "full")
servr_out = self.nodes_out_list
nodes_all = rest.node_statuses()
failover_task =self.cluster.async_failover([self.master],
failover_nodes = servr_out, graceful=self.graceful)
failover_task.result()
nodes_all = rest.node_statuses()
nodes = []
if servr_out[0].ip == "127.0.0.1":
for failover_node in servr_out:
nodes.extend([node for node in nodes_all
if (str(node.port) == failover_node.port)])
else:
for failover_node in servr_out:
nodes.extend([node for node in nodes_all
if node.ip == failover_node.ip])
for node in nodes:
self.log.info(node)
rest.add_back_node(node.id)
rest.set_recovery_type(otpNode=node.id, recoveryType=recoveryType)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [])
self.run_mutation_operations_for_situational_tests()
self.sleep(120, "Wait for rebalance")
for t in self.load_thread_list:
if t.is_alive():
if t != None:
t.signal = False
except Exception, ex:
raise
示例7: test_failover_indexer_add_back
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import add_back_node [as 别名]
def test_failover_indexer_add_back(self):
"""
Indexer add back scenarios
:return:
"""
rest = RestConnection(self.master)
recoveryType = self.input.param("recoveryType", "full")
indexer_out = int(self.input.param("nodes_out", 0))
nodes = self.get_nodes_from_services_map(service_type="index",
get_all_nodes=True)
self.assertGreaterEqual(len(nodes), indexer_out,
"Existing Indexer Nodes less than Indexer out nodes")
pre_recovery_tasks = self.async_run_operations(phase="before")
self._run_tasks([pre_recovery_tasks])
self.get_dgm_for_plasma()
kvOps_tasks = self._run_kvops_tasks()
try:
self.use_replica = False
self._create_replica_indexes()
servr_out = nodes[:indexer_out]
failover_task =self.cluster.async_failover(
[self.master], failover_nodes=servr_out,
graceful=self.graceful)
failover_task.result()
nodes_all = rest.node_statuses()
nodes = []
if servr_out[0].ip == "127.0.0.1":
for failover_node in servr_out:
nodes.extend([node for node in nodes_all
if (str(node.port) == failover_node.port)])
else:
for failover_node in servr_out:
nodes.extend([node for node in nodes_all
if node.ip == failover_node.ip])
for node in nodes:
log.info("Adding back {0} with recovery type {1}...".format(
node.ip, recoveryType))
rest.add_back_node(node.id)
rest.set_recovery_type(otpNode=node.id,
recoveryType=recoveryType)
log.info("Rebalancing nodes in...")
mid_recovery_tasks = self.async_run_operations(phase="in_between")
rebalance = self.cluster.async_rebalance(
self.servers[:self.nodes_init], [], [])
rebalance.result()
self._run_tasks([mid_recovery_tasks, kvOps_tasks])
#check if the nodes in cluster are healthy
msg = "Cluster not in Healthy state"
self.assertTrue(self.wait_until_cluster_is_healthy(), msg)
log.info("==== Cluster in healthy state ====")
self._check_all_bucket_items_indexed()
post_recovery_tasks = self.async_run_operations(phase="after")
self._run_tasks([post_recovery_tasks])
except Exception, ex:
log.info(str(ex))
raise
示例8: test_online_upgrade_with_failover
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import add_back_node [as 别名]
def test_online_upgrade_with_failover(self):
upgrade_nodes = self.servers[:self.nodes_init]
if self.disable_plasma_upgrade:
self._install(self.nodes_in_list, version=self.upgrade_to)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[self.nodes_in_list[0]], [],
services=["index"])
rebalance.result()
self.sleep(100)
self.disable_upgrade_to_plasma(self.nodes_in_list[0])
for node in upgrade_nodes:
node_rest = RestConnection(node)
node_info = "{0}:{1}".format(node.ip, node.port)
node_services_list = node_rest.get_nodes_services()[node_info]
if "index" in node_services_list:
self._create_equivalent_indexes(node)
failover_task = self.cluster.async_failover([self.master], failover_nodes=[node], graceful=False)
failover_task.result()
self.sleep(100)
log.info("Node Failed over...")
upgrade_th = self._async_update(self.upgrade_to, [node])
for th in upgrade_th:
th.join()
log.info("==== Upgrade Complete ====")
self.sleep(120)
rest = RestConnection(self.master)
nodes_all = rest.node_statuses()
for cluster_node in nodes_all:
if cluster_node.ip == node.ip:
log.info("Adding Back: {0}".format(node))
rest.add_back_node(cluster_node.id)
rest.set_recovery_type(otpNode=cluster_node.id, recoveryType="full")
log.info("Adding node back to cluster...")
active_nodes = [srvr for srvr in self.servers if srvr.ip != node.ip]
rebalance = self.cluster.async_rebalance(active_nodes, [], [])
rebalance.result()
self.sleep(100)
self._remove_equivalent_indexes(node)
self.sleep(60)
msg = "Cluster is not healthy after upgrade"
self.assertTrue(self.wait_until_cluster_is_healthy(), msg)
log.info("Cluster is healthy")
self.add_built_in_server_user()
self.sleep(20)
if self.initial_version.split("-")[0] in UPGRADE_VERS:
self.multi_drop_index()
self.sleep(100)
self._create_indexes()
self.sleep(100)
self.assertTrue(self.wait_until_indexes_online(), "Some indexes are not online")
log.info("All indexes are online")
self._query_index("post_upgrade")
self._verify_post_upgrade_results()
self._update_int64_dataset()
self._query_for_long_num()
示例9: test_failover_indexer_add_back
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import add_back_node [as 别名]
def test_failover_indexer_add_back(self):
"""
Indexer add back scenarios
:return:
"""
self._calculate_scan_vector()
rest = RestConnection(self.master)
recoveryType = self.input.param("recoveryType", "full")
indexer_out = int(self.input.param("nodes_out", 0))
nodes = self.get_nodes_from_services_map(service_type="index", get_all_nodes=True)
self.assertGreaterEqual(len(nodes), indexer_out,
"Existing Indexer Nodes less than Indexer out nodes")
log.info("Running kv Mutations...")
kvOps_tasks = self.kv_mutations()
servr_out = nodes[:indexer_out]
failover_task =self.cluster.async_failover([self.master],
failover_nodes = servr_out, graceful=self.graceful)
self._run_tasks([[failover_task], kvOps_tasks])
before_index_ops = self._run_before_index_tasks()
nodes_all = rest.node_statuses()
nodes = []
if servr_out[0].ip == "127.0.0.1":
for failover_node in servr_out:
nodes.extend([node for node in nodes_all
if (str(node.port) == failover_node.port)])
else:
for failover_node in servr_out:
nodes.extend([node for node in nodes_all
if node.ip == failover_node.ip])
for node in nodes:
log.info("Adding back {0} with recovery type {1}...".format(node.ip, recoveryType))
rest.add_back_node(node.id)
rest.set_recovery_type(otpNode=node.id, recoveryType=recoveryType)
log.info("Rebalancing nodes in...")
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [])
log.info("Running KV mutations...")
kvOps_tasks = self.kv_mutations()
self._run_tasks([[rebalance], kvOps_tasks])
self.sleep(100)
self._verify_bucket_count_with_index_count(self.load_query_definitions)
self.multi_query_using_index(buckets=self.buckets,
query_definitions=self.load_query_definitions)
示例10: test_failover_add_back
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import add_back_node [as 别名]
def test_failover_add_back(self):
try:
rest = RestConnection(self.master)
recoveryType = self.input.param("recoveryType", "full")
servr_out = self.nodes_out_list
failover_task =self.cluster.async_failover([self.master],
failover_nodes=servr_out, graceful=self.graceful)
failover_task.result()
pre_recovery_tasks = self.async_run_operations(phase="before")
self._run_tasks([pre_recovery_tasks])
self.get_dgm_for_plasma()
kvOps_tasks = self._run_kvops_tasks()
nodes_all = rest.node_statuses()
nodes = []
if servr_out[0].ip == "127.0.0.1":
for failover_node in servr_out:
nodes.extend([node for node in nodes_all
if (str(node.port) == failover_node.port)])
else:
for failover_node in servr_out:
nodes.extend([node for node in nodes_all
if node.ip == failover_node.ip])
for node in nodes:
log.info("Adding Back: {0}".format(node))
rest.add_back_node(node.id)
rest.set_recovery_type(otpNode=node.id,
recoveryType=recoveryType)
rebalance = self.cluster.async_rebalance(
self.servers[:self.nodes_init], [], [])
mid_recovery_tasks = self.async_run_operations(phase="in_between")
rebalance.result()
self._run_tasks([kvOps_tasks, mid_recovery_tasks])
#check if the nodes in cluster are healthy
msg = "Cluster not in Healthy state"
self.assertTrue(self.wait_until_cluster_is_healthy(), msg)
log.info("==== Cluster in healthy state ====")
self._check_all_bucket_items_indexed()
post_recovery_tasks = self.async_run_operations(phase="after")
self._run_tasks([post_recovery_tasks])
except Exception, ex:
log.info(str(ex))
raise
示例11: online_upgrade_with_failover
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import add_back_node [as 别名]
def online_upgrade_with_failover(self, upgrade_servers):
self.log.info("online upgrade servers: {0}".format(str(upgrade_servers)))
for server in upgrade_servers:
self.log.info("upgrading: {0}".format(str(server)))
participating_servers = [s for s in self.servers]
failover_task = self.cluster.async_failover([self.master], failover_nodes=[server], graceful=False)
failover_task.result()
upgrade_th = self._async_update(self.upgrade_versions[0], [server])
for th in upgrade_th:
th.join()
rest = RestConnection(self.master)
nodes_all = rest.node_statuses()
for cluster_node in nodes_all:
if cluster_node.ip == server.ip:
rest.add_back_node(cluster_node.id)
rest.set_recovery_type(otpNode=cluster_node.id, recoveryType="full")
participating_servers.remove(server)
self.log.info("participating servers: {0}".format(str(participating_servers)))
rebalance = self.cluster.async_rebalance(participating_servers, [], [])
rebalance.result()
示例12: perform_failover
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import add_back_node [as 别名]
def perform_failover(self):
rest = RestConnection(self.master)
nodes = rest.node_statuses()
failover_servers = self.servers[:self.nodes_init][-self.failover_factor:]
failover_nodes = []
for server in failover_servers:
for node in nodes:
if node.ip == server.ip and str(node.port) == server.port:
failover_nodes.append(node)
for node in failover_nodes:
rest.fail_over(node.id)
self.sleep(5)
if self.failover == GetrTests.FAILOVER_REBALANCE:
self.cluster.rebalance(self.servers[:self.nodes_init],
[], failover_servers)
if self.failover == GetrTests.FAILOVER_ADD_BACK:
for node in failover_nodes:
rest.add_back_node(node.id)
self.cluster.rebalance(self.servers[:self.nodes_init],
[], [])
示例13: test_volume_with_rebalance
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import add_back_node [as 别名]
#.........这里部分代码省略.........
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load, args=(self.master, self.num_items, b, self.num_items*5)))
for t in load_thread:
t.start()
new_server_list=list(set(new_server_list + servers_in) - set(servers_out))
self.log.info("==========Rebalance out of 1 nodes and Rebalance In 2 nodes=========")
#Rebalance out of 1 nodes and Rebalance In 2 nodes
servers_in = list(set(self.servers) - set(new_server_list))[0:2]
servers_out = list(set(new_server_list) - set([self.master]))[0:1]
rebalance = self.cluster.async_rebalance(servers_init, servers_in, servers_out)
rebalance.result()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss(self.master, b,self.num_items*6)
self.sleep(30)
# load more document
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load, args=(self.master, self.num_items, b, self.num_items*6)))
for t in load_thread:
t.start()
new_server_list=list(set(new_server_list + servers_in) - set(servers_out))
self.log.info("==========Rebalance in 4 nodes =========")
#Rebalance in 4 nodes
servers_in = list(set(self.servers) - set(new_server_list))[0:4]
rebalance = self.cluster.async_rebalance(servers_init, servers_in, [])
rebalance.result()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss(self.master, b,self.num_items*7)
self.sleep(30)
# load more document
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load, args=(self.master, self.num_items, b, self.num_items*7)))
for t in load_thread:
t.start()
new_server_list=list(set(new_server_list + servers_in))
self.log.info("==========Rebalance out 4 nodes =========")
#Rebalance out 4 nodes
servers_out = list(set(new_server_list) - set([self.master]))[0:4]
rebalance = self.cluster.async_rebalance(servers_init, [], servers_out)
rebalance.result()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss(self.master, b,self.num_items*8)
self.sleep(30)
# load more document
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load, args=(self.master, self.num_items, b, self.num_items*8)))
for t in load_thread:
t.start()
new_server_list = list(set(new_server_list) - set(servers_out))
self.log.info("======Rebalance in 4 nodes (8 nodes) wait for rebalance to finish and move between server groups=========")
#Rebalance in 4 nodes (8 nodes) wait for rebalance to finish and move between server groups
servers_in = list(set(self.servers) - set(new_server_list))[0:4]
rebalance = self.cluster.async_rebalance(servers_init, servers_in, [])
rebalance.result()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss(self.master, b,self.num_items*9)
self.sleep(30)
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load, args=(self.master, self.num_items, b, self.num_items * 9)))
for t in load_thread:
t.start()
self.shuffle_nodes_between_zones_and_rebalance()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss(self.master, b,self.num_items*10)
self.sleep(30)
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load, args=(self.master, self.num_items, b, self.num_items * 10)))
for t in load_thread:
t.start()
self.log.info("======Graceful failover 1 KV node and add back(Delta and Full)=========")
#Graceful failover 1 KV node and add back(Delta and Full)
kv_server = self.get_nodes_from_services_map(service_type="kv", get_all_nodes=False)
fail_over_task = self.cluster.async_failover([self.master], failover_nodes=[kv_server], graceful=True)
fail_over_task.result()
self.sleep(120)
# do a recovery and rebalance
rest.set_recovery_type('[email protected]' + kv_server.ip, recoveryType=self.recoveryType)
rest.add_back_node('[email protected]' + kv_server.ip)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [])
rebalance.result()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss(self.master, b,self.num_items*11)
self.sleep(30)
示例14: FailoverTests
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import add_back_node [as 别名]
#.........这里部分代码省略.........
self.data_analysis_all(record_static_data_set, _servers_, self.buckets, path = None)
# Check Cluster Stats and Data as well if max_verify > 0
self.verify_cluster_stats(_servers_, self.referenceNode)
# If views were created they can be verified
if self.runViews:
if self.runViewsDuringFailover:
self.monitor_view_tasks(_servers_)
self.verify_query_task()
# Check Failover logs :: Not sure about this logic, currently not checking, will update code once confirmed
# Currently, only for checking case where we have graceful failover
if self.version_greater_than_2_5 and self.graceful and self.upr_check:
new_failover_stats = self.compare_failovers_logs(prev_failover_stats, _servers_, self.buckets)
new_vbucket_stats = self.compare_vbucket_seqnos(prev_vbucket_stats, _servers_, self.buckets)
self.compare_vbucketseq_failoverlogs(new_vbucket_stats, new_failover_stats)
self.log.info("End VERIFICATION for Rebalance after Failover Only")
finally:
if self.during_ops:
if self.during_ops == "change_password":
self.change_password(new_password=old_pass)
elif self.during_ops == "change_port":
self.change_port(new_port='8091',
current_port=self.input.param("new_port", "9090"))
def run_add_back_operation_and_verify(self, chosen, prev_vbucket_stats, record_static_data_set, prev_failover_stats):
"""
Method to run add-back operation with recovery type = (delta/full)
It also verifies if the operations are correct with data verificaiton steps
"""
serverMap = self.get_server_map(self.servers)
recoveryTypeMap = self.define_maps_during_failover(self.recoveryType)
fileMapsForVerification = self.create_file(chosen, self.buckets, serverMap)
index = 0
for node in chosen:
self.rest.add_back_node(node.id)
self.sleep(5)
if self.recoveryType:
# define precondition for recoverytype
self.rest.set_recovery_type(otpNode=node.id, recoveryType=self.recoveryType[index])
index += 1
self.sleep(20, "After failover before invoking rebalance...")
self.rest.rebalance(otpNodes=[node.id for node in self.nodes],
ejectedNodes=[])
msg = "rebalance failed while removing failover nodes {0}".format(chosen)
# Run operations if required during rebalance after failover
if self.withOps:
for task in self.ops_tasks:
task.result()
self.assertTrue(self.rest.monitorRebalance(stop_if_loop=True), msg=msg)
# Drain ep_queue and make sure that intra-cluster replication is complete
self._verify_stats_all_buckets(self.servers,timeout = 120)
self._wait_for_stats_all_buckets(self.servers)
self.log.info("Begin VERIFICATION for Add-back and rebalance")
# Verify recovery Type succeeded if we added-back nodes
self.verify_for_recovery_type(chosen, serverMap, self.buckets,
recoveryTypeMap, fileMapsForVerification)
# Comparison of all data if required
if not self.withOps:
self.data_analysis_all(record_static_data_set,self.servers, self.buckets, path = None)
# Verify Stats of cluster and Data is max_verify > 0
self.verify_cluster_stats(self.servers, self.referenceNode)
示例15: common_test_body
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import add_back_node [as 别名]
def common_test_body(self, keys_count, failover_reason):
log = logger.Logger.get_logger()
log.info("keys_count : {0}".format(keys_count))
log.info("replicas : {0}".format(self.num_replicas))
log.info("failover_reason : {0}".format(failover_reason))
log.info('picking server : {0} as the master'.format(self.master))
self._load_all_buckets(self.master, self.gen_create, "create", 0,
batch_size=10000, pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers)
_servers_ = self.servers
rest = RestConnection(self.master)
nodes = rest.node_statuses()
RebalanceHelper.wait_for_replication(self.servers, self.cluster)
chosen = RebalanceHelper.pick_nodes(self.master, howmany=self.num_replicas)
for node in chosen:
# let's do op
if failover_reason == 'stop_server':
self.stop_server(node)
log.info("10 seconds delay to wait for membase-server to shutdown")
# wait for 5 minutes until node is down
self.assertTrue(RestHelper(rest).wait_for_node_status(node, "unhealthy", 300),
msg="node status is not unhealthy even after waiting for 5 minutes")
elif failover_reason == "firewall":
server = [srv for srv in self.servers if node.ip == srv.ip][0]
RemoteUtilHelper.enable_firewall(server, bidirectional=self.bidirectional)
status = RestHelper(rest).wait_for_node_status(node, "unhealthy", 300)
if status:
log.info("node {0}:{1} is 'unhealthy' as expected".format(node.ip, node.port))
else:
# verify iptables on the node if something wrong
for server in self.servers:
if server.ip == node.ip:
shell = RemoteMachineShellConnection(server)
info = shell.extract_remote_info()
if info.type.lower() == "windows":
o, r = shell.execute_command("netsh advfirewall show allprofiles")
else:
o, r = shell.execute_command("/sbin/iptables --list")
shell.log_command_output(o, r)
shell.disconnect()
for i in rest.get_logs(): self.log.error(i)
api = rest.baseUrl + 'nodeStatuses'
status, content, header = rest._http_request(api)
json_parsed = json.loads(content)
self.log.info("nodeStatuses: {0}".format(json_parsed))
self.fail("node status is not unhealthy even after waiting for 5 minutes")
failed_over = rest.fail_over(node.id)
if not failed_over:
self.log.info("unable to failover the node the first time. try again in 60 seconds..")
# try again in 75 seconds
time.sleep(75)
failed_over = rest.fail_over(node.id)
self.assertTrue(failed_over, "unable to failover node after {0}".format(failover_reason))
log.info("failed over node : {0}".format(node.id))
self._failed_nodes.append(node)
if self.add_back_flag:
for node in self._failed_nodes:
rest.add_back_node(node.id)
time.sleep(5)
log.info("10 seconds sleep after failover before invoking rebalance...")
time.sleep(10)
rest.rebalance(otpNodes=[node.id for node in nodes],
ejectedNodes=[])
msg = "rebalance failed while removing failover nodes {0}".format(chosen)
self.assertTrue(rest.monitorRebalance(stop_if_loop=True), msg=msg)
else:
# Need a delay > min because MB-7168
log.info("60 seconds sleep after failover before invoking rebalance...")
time.sleep(60)
rest.rebalance(otpNodes=[node.id for node in nodes],
ejectedNodes=[node.id for node in chosen])
if self.during_ops:
self.sleep(5, "Wait for some progress in rebalance")
if self.during_ops == "change_password":
old_pass = self.master.rest_password
self.change_password(new_password=self.input.param("new_password", "new_pass"))
rest = RestConnection(self.master)
elif self.during_ops == "change_port":
self.change_port(new_port=self.input.param("new_port", "9090"))
rest = RestConnection(self.master)
try:
msg = "rebalance failed while removing failover nodes {0}".format(chosen)
self.assertTrue(rest.monitorRebalance(stop_if_loop=True), msg=msg)
for failed in chosen:
for server in _servers_:
if server.ip == failed.ip:
_servers_.remove(server)
self._cleanup_nodes.append(server)
log.info("Begin VERIFICATION ...")
RebalanceHelper.wait_for_replication(_servers_, self.cluster)
self.verify_cluster_stats(_servers_, self.master)
finally:
if self.during_ops:
if self.during_ops == "change_password":
#.........这里部分代码省略.........