本文整理汇总了Python中membase.api.rest_client.RestConnection.set_recovery_type方法的典型用法代码示例。如果您正苦于以下问题:Python RestConnection.set_recovery_type方法的具体用法?Python RestConnection.set_recovery_type怎么用?Python RestConnection.set_recovery_type使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类membase.api.rest_client.RestConnection
的用法示例。
在下文中一共展示了RestConnection.set_recovery_type方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_failover_add_back
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import set_recovery_type [as 别名]
def test_failover_add_back(self):
try:
self.run_async_data()
rest = RestConnection(self.master)
recoveryType = self.input.param("recoveryType", "full")
servr_out = self.nodes_out_list
nodes_all = rest.node_statuses()
failover_task =self.cluster.async_failover([self.master],
failover_nodes = servr_out, graceful=self.graceful)
failover_task.result()
nodes_all = rest.node_statuses()
nodes = []
if servr_out[0].ip == "127.0.0.1":
for failover_node in servr_out:
nodes.extend([node for node in nodes_all
if (str(node.port) == failover_node.port)])
else:
for failover_node in servr_out:
nodes.extend([node for node in nodes_all
if node.ip == failover_node.ip])
for node in nodes:
self.log.info(node)
rest.add_back_node(node.id)
rest.set_recovery_type(otpNode=node.id, recoveryType=recoveryType)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [])
self.run_mutation_operations_for_situational_tests()
self.sleep(120, "Wait for rebalance")
for t in self.load_thread_list:
if t.is_alive():
if t != None:
t.signal = False
except Exception, ex:
raise
示例2: test_capi_with_failover
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import set_recovery_type [as 别名]
def test_capi_with_failover(self):
repl_id = self._start_es_replication()
rest_conn = RestConnection(self.src_master)
rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'true')
gen = DocumentGenerator('es', '{{"key":"value","mutated":0}}', xrange(100), start=0, end=self._num_items)
self.src_cluster.load_all_buckets_from_generator(gen)
rest_conn.pause_resume_repl_by_id(repl_id, REPL_PARAM.PAUSE_REQUESTED, 'false')
graceful = self._input.param("graceful", False)
self.recoveryType = self._input.param("recoveryType", None)
self.src_cluster.failover(graceful=graceful)
self.sleep(30)
if self.recoveryType:
server_nodes = rest_conn.node_statuses()
for node in server_nodes:
if node.ip == self._input.servers[1].ip:
rest_conn.set_recovery_type(otpNode=node.id, recoveryType=self.recoveryType)
self.sleep(30)
rest_conn.add_back_node(otpNode=node.id)
rebalance = self.cluster.async_rebalance(self.src_cluster.get_nodes(), [], [])
rebalance.result()
self._verify_es_results()
示例3: test_failover_add_back
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import set_recovery_type [as 别名]
def test_failover_add_back(self):
try:
rest = RestConnection(self.master)
recoveryType = self.input.param("recoveryType", "full")
servr_out = self.nodes_out_list
self._run_initial_index_tasks()
failover_task =self.cluster.async_failover([self.master],
failover_nodes = servr_out, graceful=self.graceful)
failover_task.result()
kvOps_tasks = self._run_kvops_tasks()
before_index_ops = self._run_before_index_tasks()
nodes_all = rest.node_statuses()
nodes = []
if servr_out[0].ip == "127.0.0.1":
for failover_node in servr_out:
nodes.extend([node for node in nodes_all
if (str(node.port) == failover_node.port)])
else:
for failover_node in servr_out:
nodes.extend([node for node in nodes_all
if node.ip == failover_node.ip])
for node in nodes:
self.log.info(node)
rest.add_back_node(node.id)
rest.set_recovery_type(otpNode=node.id, recoveryType=recoveryType)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [])
in_between_index_ops = self._run_in_between_tasks()
rebalance.result()
self.sleep(120)
self._run_tasks([kvOps_tasks, before_index_ops, in_between_index_ops])
self._run_after_index_tasks()
except Exception, ex:
raise
示例4: test_failover_add_back
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import set_recovery_type [as 别名]
def test_failover_add_back(self):
try:
rest = RestConnection(self.master)
recoveryType = self.input.param("recoveryType", "full")
servr_out = self.nodes_out_list
nodes_all = rest.node_statuses()
tasks = self.async_check_and_run_operations(buckets=self.buckets, before=True)
for task in tasks:
task.result()
failover_task = self.cluster.async_failover([self.master], failover_nodes=servr_out, graceful=self.graceful)
failover_task.result()
nodes_all = rest.node_statuses()
nodes = []
if servr_out[0].ip == "127.0.0.1":
for failover_node in servr_out:
nodes.extend([node for node in nodes_all if (str(node.port) == failover_node.port)])
else:
for failover_node in servr_out:
nodes.extend([node for node in nodes_all if node.ip == failover_node.ip])
for node in nodes:
self.log.info(node)
rest.add_back_node(node.id)
rest.set_recovery_type(otpNode=node.id, recoveryType=recoveryType)
rebalance = self.cluster.async_rebalance(self.servers[: self.nodes_init], [], [])
self._run_aync_tasks()
rebalance.result()
self.run_after_operations()
except Exception, ex:
raise
示例5: test_add_remove_graceful_add_back_node_with_cert
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import set_recovery_type [as 别名]
def test_add_remove_graceful_add_back_node_with_cert(self,recovery_type=None):
recovery_type = self.input.param('recovery_type')
rest = RestConnection(self.master)
known_nodes = ['[email protected]'+self.master.ip]
progress = None
count = 0
servs_inout = self.servers[1:]
serv_out = '[email protected]' + servs_inout[1].ip
rest.create_bucket(bucket='default', ramQuotaMB=100)
x509main(self.master).setup_master()
x509main().setup_cluster_nodes_ssl(servs_inout)
for server in servs_inout:
rest.add_node('Administrator','password',server.ip)
known_nodes.append('[email protected]' + server.ip)
rest.rebalance(known_nodes)
self.assertTrue(self.check_rebalance_complete(rest),"Issue with rebalance")
for server in servs_inout:
status = x509main(server)._validate_ssl_login()
self.assertEqual(status,200,"Not able to login via SSL code")
rest.fail_over(serv_out,graceful=True)
self.assertTrue(self.check_rebalance_complete(rest),"Issue with rebalance")
rest.set_recovery_type(serv_out,recovery_type)
rest.add_back_node(serv_out)
rest.rebalance(known_nodes)
self.assertTrue(self.check_rebalance_complete(rest),"Issue with rebalance")
for server in servs_inout:
status = x509main(server)._validate_ssl_login()
self.assertEqual(status,200,"Not able to login via SSL code")
示例6: test_failover_indexer_add_back
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import set_recovery_type [as 别名]
def test_failover_indexer_add_back(self):
"""
Indexer add back scenarios
:return:
"""
rest = RestConnection(self.master)
recoveryType = self.input.param("recoveryType", "full")
indexer_out = int(self.input.param("nodes_out", 0))
nodes = self.get_nodes_from_services_map(service_type="index",
get_all_nodes=True)
self.assertGreaterEqual(len(nodes), indexer_out,
"Existing Indexer Nodes less than Indexer out nodes")
pre_recovery_tasks = self.async_run_operations(phase="before")
self._run_tasks([pre_recovery_tasks])
self.get_dgm_for_plasma()
kvOps_tasks = self._run_kvops_tasks()
try:
self.use_replica = False
self._create_replica_indexes()
servr_out = nodes[:indexer_out]
failover_task =self.cluster.async_failover(
[self.master], failover_nodes=servr_out,
graceful=self.graceful)
failover_task.result()
nodes_all = rest.node_statuses()
nodes = []
if servr_out[0].ip == "127.0.0.1":
for failover_node in servr_out:
nodes.extend([node for node in nodes_all
if (str(node.port) == failover_node.port)])
else:
for failover_node in servr_out:
nodes.extend([node for node in nodes_all
if node.ip == failover_node.ip])
for node in nodes:
log.info("Adding back {0} with recovery type {1}...".format(
node.ip, recoveryType))
rest.add_back_node(node.id)
rest.set_recovery_type(otpNode=node.id,
recoveryType=recoveryType)
log.info("Rebalancing nodes in...")
mid_recovery_tasks = self.async_run_operations(phase="in_between")
rebalance = self.cluster.async_rebalance(
self.servers[:self.nodes_init], [], [])
rebalance.result()
self._run_tasks([mid_recovery_tasks, kvOps_tasks])
#check if the nodes in cluster are healthy
msg = "Cluster not in Healthy state"
self.assertTrue(self.wait_until_cluster_is_healthy(), msg)
log.info("==== Cluster in healthy state ====")
self._check_all_bucket_items_indexed()
post_recovery_tasks = self.async_run_operations(phase="after")
self._run_tasks([post_recovery_tasks])
except Exception, ex:
log.info(str(ex))
raise
示例7: test_online_upgrade_with_failover
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import set_recovery_type [as 别名]
def test_online_upgrade_with_failover(self):
upgrade_nodes = self.servers[:self.nodes_init]
if self.disable_plasma_upgrade:
self._install(self.nodes_in_list, version=self.upgrade_to)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[self.nodes_in_list[0]], [],
services=["index"])
rebalance.result()
self.sleep(100)
self.disable_upgrade_to_plasma(self.nodes_in_list[0])
for node in upgrade_nodes:
node_rest = RestConnection(node)
node_info = "{0}:{1}".format(node.ip, node.port)
node_services_list = node_rest.get_nodes_services()[node_info]
if "index" in node_services_list:
self._create_equivalent_indexes(node)
failover_task = self.cluster.async_failover([self.master], failover_nodes=[node], graceful=False)
failover_task.result()
self.sleep(100)
log.info("Node Failed over...")
upgrade_th = self._async_update(self.upgrade_to, [node])
for th in upgrade_th:
th.join()
log.info("==== Upgrade Complete ====")
self.sleep(120)
rest = RestConnection(self.master)
nodes_all = rest.node_statuses()
for cluster_node in nodes_all:
if cluster_node.ip == node.ip:
log.info("Adding Back: {0}".format(node))
rest.add_back_node(cluster_node.id)
rest.set_recovery_type(otpNode=cluster_node.id, recoveryType="full")
log.info("Adding node back to cluster...")
active_nodes = [srvr for srvr in self.servers if srvr.ip != node.ip]
rebalance = self.cluster.async_rebalance(active_nodes, [], [])
rebalance.result()
self.sleep(100)
self._remove_equivalent_indexes(node)
self.sleep(60)
msg = "Cluster is not healthy after upgrade"
self.assertTrue(self.wait_until_cluster_is_healthy(), msg)
log.info("Cluster is healthy")
self.add_built_in_server_user()
self.sleep(20)
if self.initial_version.split("-")[0] in UPGRADE_VERS:
self.multi_drop_index()
self.sleep(100)
self._create_indexes()
self.sleep(100)
self.assertTrue(self.wait_until_indexes_online(), "Some indexes are not online")
log.info("All indexes are online")
self._query_index("post_upgrade")
self._verify_post_upgrade_results()
self._update_int64_dataset()
self._query_for_long_num()
示例8: test_failover_indexer_add_back
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import set_recovery_type [as 别名]
def test_failover_indexer_add_back(self):
"""
Indexer add back scenarios
:return:
"""
self._calculate_scan_vector()
rest = RestConnection(self.master)
recoveryType = self.input.param("recoveryType", "full")
indexer_out = int(self.input.param("nodes_out", 0))
nodes = self.get_nodes_from_services_map(service_type="index", get_all_nodes=True)
self.assertGreaterEqual(len(nodes), indexer_out,
"Existing Indexer Nodes less than Indexer out nodes")
log.info("Running kv Mutations...")
kvOps_tasks = self.kv_mutations()
servr_out = nodes[:indexer_out]
failover_task =self.cluster.async_failover([self.master],
failover_nodes = servr_out, graceful=self.graceful)
self._run_tasks([[failover_task], kvOps_tasks])
before_index_ops = self._run_before_index_tasks()
nodes_all = rest.node_statuses()
nodes = []
if servr_out[0].ip == "127.0.0.1":
for failover_node in servr_out:
nodes.extend([node for node in nodes_all
if (str(node.port) == failover_node.port)])
else:
for failover_node in servr_out:
nodes.extend([node for node in nodes_all
if node.ip == failover_node.ip])
for node in nodes:
log.info("Adding back {0} with recovery type {1}...".format(node.ip, recoveryType))
rest.add_back_node(node.id)
rest.set_recovery_type(otpNode=node.id, recoveryType=recoveryType)
log.info("Rebalancing nodes in...")
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [])
log.info("Running KV mutations...")
kvOps_tasks = self.kv_mutations()
self._run_tasks([[rebalance], kvOps_tasks])
self.sleep(100)
self._verify_bucket_count_with_index_count(self.load_query_definitions)
self.multi_query_using_index(buckets=self.buckets,
query_definitions=self.load_query_definitions)
示例9: test_failover_add_back
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import set_recovery_type [as 别名]
def test_failover_add_back(self):
try:
rest = RestConnection(self.master)
recoveryType = self.input.param("recoveryType", "full")
servr_out = self.nodes_out_list
failover_task =self.cluster.async_failover([self.master],
failover_nodes=servr_out, graceful=self.graceful)
failover_task.result()
pre_recovery_tasks = self.async_run_operations(phase="before")
self._run_tasks([pre_recovery_tasks])
self.get_dgm_for_plasma()
kvOps_tasks = self._run_kvops_tasks()
nodes_all = rest.node_statuses()
nodes = []
if servr_out[0].ip == "127.0.0.1":
for failover_node in servr_out:
nodes.extend([node for node in nodes_all
if (str(node.port) == failover_node.port)])
else:
for failover_node in servr_out:
nodes.extend([node for node in nodes_all
if node.ip == failover_node.ip])
for node in nodes:
log.info("Adding Back: {0}".format(node))
rest.add_back_node(node.id)
rest.set_recovery_type(otpNode=node.id,
recoveryType=recoveryType)
rebalance = self.cluster.async_rebalance(
self.servers[:self.nodes_init], [], [])
mid_recovery_tasks = self.async_run_operations(phase="in_between")
rebalance.result()
self._run_tasks([kvOps_tasks, mid_recovery_tasks])
#check if the nodes in cluster are healthy
msg = "Cluster not in Healthy state"
self.assertTrue(self.wait_until_cluster_is_healthy(), msg)
log.info("==== Cluster in healthy state ====")
self._check_all_bucket_items_indexed()
post_recovery_tasks = self.async_run_operations(phase="after")
self._run_tasks([post_recovery_tasks])
except Exception, ex:
log.info(str(ex))
raise
示例10: online_upgrade_with_failover
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import set_recovery_type [as 别名]
def online_upgrade_with_failover(self, upgrade_servers):
self.log.info("online upgrade servers: {0}".format(str(upgrade_servers)))
for server in upgrade_servers:
self.log.info("upgrading: {0}".format(str(server)))
participating_servers = [s for s in self.servers]
failover_task = self.cluster.async_failover([self.master], failover_nodes=[server], graceful=False)
failover_task.result()
upgrade_th = self._async_update(self.upgrade_versions[0], [server])
for th in upgrade_th:
th.join()
rest = RestConnection(self.master)
nodes_all = rest.node_statuses()
for cluster_node in nodes_all:
if cluster_node.ip == server.ip:
rest.add_back_node(cluster_node.id)
rest.set_recovery_type(otpNode=cluster_node.id, recoveryType="full")
participating_servers.remove(server)
self.log.info("participating servers: {0}".format(str(participating_servers)))
rebalance = self.cluster.async_rebalance(participating_servers, [], [])
rebalance.result()
示例11: replicate_correct_data_after_rollback
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import set_recovery_type [as 别名]
def replicate_correct_data_after_rollback(self):
'''
@attention: This test case has some issue with docker runs. It
passes without any issue on VMs.
'''
NUMBER_OF_DOCS = 10000
# populate the kvs, they will look like ...
"""
key: keyname-x
value:
{
"mutated": 0,
"_id": "keyname-x",
"val-field-name": "serial-vals-100"
}
"""
vals = ['serial-vals-' + str(i) for i in xrange(NUMBER_OF_DOCS)]
template = '{{ "val-field-name": "{0}" }}'
gen_load = DocumentGenerator('keyname', template, vals, start=0,
end=NUMBER_OF_DOCS)
rc = self.cluster.load_gen_docs(self.servers[0], self.buckets[0].name, gen_load,
self.buckets[0].kvs[1], "create", exp=0, flag=0, batch_size=1000)
# store the KVs which were modified and active on node 1
modified_kvs_active_on_node1 = {}
vbucket_client = VBucketAwareMemcached(RestConnection(self.master), 'default')
client = MemcachedClientHelper.direct_client(self.servers[0], 'default')
for i in range(NUMBER_OF_DOCS/100):
keyname = 'keyname-' + str(i)
vbId = ((zlib.crc32(keyname) >> 16) & 0x7fff) & (self.vbuckets- 1)
if vbucket_client.vBucketMap[vbId].split(':')[0] == self.servers[0].ip:
rc = client.get( keyname )
modified_kvs_active_on_node1[ keyname ] = rc[2]
# stop persistence
for bucket in self.buckets:
for s in self.servers[:self.nodes_init]:
client = MemcachedClientHelper.direct_client(s, bucket)
try:
client.stop_persistence()
except MemcachedError as e:
if self.bucket_type == 'ephemeral':
self.assertTrue(
"Memcached error #4 'Invalid': Flusher not running. for vbucket :0 to mc " in e.message)
return
else:
raise
# modify less than 1/2 of the keys
vals = ['modified-serial-vals-' + str(i) for i in xrange(NUMBER_OF_DOCS/100)]
template = '{{ "val-field-name": "{0}" }}'
gen_load = DocumentGenerator('keyname', template, vals, start=0,
end=NUMBER_OF_DOCS/100)
rc = self.cluster.load_gen_docs(self.servers[0], self.buckets[0].name, gen_load,
self.buckets[0].kvs[1], "create", exp=0, flag=0, batch_size=1000)
# kill memcached, when it comes back because persistence is disabled it will have lost the second set of mutations
shell = RemoteMachineShellConnection(self.servers[0])
shell.kill_memcached()
time.sleep(10)
# start persistence on the second node
client = MemcachedClientHelper.direct_client(self.servers[1], 'default')
client.start_persistence()
time.sleep(5)
# failover to the second node
rc = self.cluster.failover(self.servers, self.servers[1:2], graceful=True)
time.sleep(30) # give time for the failover to complete
# check the values, they should be what they were prior to the second update
client = MemcachedClientHelper.direct_client(self.servers[0], 'default')
for k,v in modified_kvs_active_on_node1.iteritems():
rc = client.get( k )
self.assertTrue( v == rc[2], 'Expected {0}, actual {1}'.format(v, rc[2]))
# need to rebalance the node back into the cluster
# def rebalance(self, servers, to_add, to_remove, timeout=None, use_hostnames=False, services = None):
rest_obj = RestConnection(self.servers[0])
nodes_all = rest_obj.node_statuses()
for node in nodes_all:
if node.ip == self.servers[1].ip:
break
node_id_for_recovery = node.id
status = rest_obj.add_back_node(node_id_for_recovery)
if status:
rest_obj.set_recovery_type(node_id_for_recovery,
recoveryType='delta')
rc = self.cluster.rebalance(self.servers[:self.nodes_init], [],[])
示例12: FailoverTests
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import set_recovery_type [as 别名]
#.........这里部分代码省略.........
if not self.withMutationOps:
self.sleep(60)
self.data_analysis_all(record_static_data_set, _servers_, self.buckets, path = None, addedItems = None)
# Check Cluster Stats and Data as well if max_verify > 0
# Check Failover logs :: Not sure about this logic, currently not checking, will update code once confirmed
# Currently, only for checking case where we have graceful failover
if self.version_greater_than_2_5 and self.graceful and self.upr_check:
new_failover_stats = self.compare_failovers_logs(prev_failover_stats, _servers_, self.buckets)
new_vbucket_stats = self.compare_vbucket_seqnos(prev_vbucket_stats, _servers_, self.buckets)
self.compare_vbucketseq_failoverlogs(new_vbucket_stats, new_failover_stats)
# Verify Active and Replica Bucket Count
if self.num_replicas > 0:
nodes = self.get_nodes_in_cluster(self.master)
self.vb_distribution_analysis(servers = nodes, buckets = self.buckets, std = 20.0 , total_vbuckets = self.total_vbuckets)
self.log.info("End VERIFICATION for Rebalance after Failover Only")
def run_add_back_operation_and_verify(self, chosen, prev_vbucket_stats, record_static_data_set, prev_failover_stats):
"""
Method to run add-back operation with recovery type = (delta/full)
It also verifies if the operations are correct with data verificaiton steps
"""
_servers_ = self.filter_servers(self.servers, chosen)
self._wait_for_stats_all_buckets(_servers_, check_ep_items_remaining = True)
serverMap = self.get_server_map(self.servers)
recoveryTypeMap = self.define_maps_during_failover(self.recoveryType)
fileMapsForVerification = self.create_file(chosen, self.buckets, serverMap)
index = 0
for node in chosen:
self.rest.add_back_node(node.id)
self.sleep(5)
if self.recoveryType:
# define precondition for recoverytype
self.rest.set_recovery_type(otpNode=node.id, recoveryType=self.recoveryType[index])
index += 1
self.sleep(20, "After failover before invoking rebalance...")
self.rest.rebalance(otpNodes=[node.id for node in self.nodes],ejectedNodes=[],deltaRecoveryBuckets = self.deltaRecoveryBuckets)
# Perform Compaction
if self.compact:
for bucket in self.buckets:
self.cluster.compact_bucket(self.master,bucket)
# Peform View Validation if Supported
nodes = self.filter_servers(self.servers,chosen)
if self.withViewsOps:
self.query_and_monitor_view_tasks(nodes)
# Run operations if required during rebalance after failover
if self.withMutationOps:
self.run_mutation_operations_after_failover()
# Kill or restart operations
if self.killNodes or self.stopNodes or self.firewallOnNodes:
self.victim_node_operations(node = chosen[0])
self.log.info(" Start Rebalance Again !")
self.rest.rebalance(otpNodes=[node.id for node in self.nodes],ejectedNodes=[],deltaRecoveryBuckets = self.deltaRecoveryBuckets)
# Check if node has to be killed or restarted during rebalance
# Monitor Rebalance
msg = "rebalance failed while removing failover nodes {0}".format(chosen)
self.assertTrue(self.rest.monitorRebalance(stop_if_loop=True), msg=msg)
# Drain ep_queue and make sure that intra-cluster replication is complete
self._wait_for_stats_all_buckets(self.servers, check_ep_items_remaining = True)
示例13: test_volume_with_rebalance
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import set_recovery_type [as 别名]
#.........这里部分代码省略.........
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load, args=(self.master, self.num_items, b, self.num_items*5)))
for t in load_thread:
t.start()
new_server_list=list(set(new_server_list + servers_in) - set(servers_out))
self.log.info("==========Rebalance out of 1 nodes and Rebalance In 2 nodes=========")
#Rebalance out of 1 nodes and Rebalance In 2 nodes
servers_in = list(set(self.servers) - set(new_server_list))[0:2]
servers_out = list(set(new_server_list) - set([self.master]))[0:1]
rebalance = self.cluster.async_rebalance(servers_init, servers_in, servers_out)
rebalance.result()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss(self.master, b,self.num_items*6)
self.sleep(30)
# load more document
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load, args=(self.master, self.num_items, b, self.num_items*6)))
for t in load_thread:
t.start()
new_server_list=list(set(new_server_list + servers_in) - set(servers_out))
self.log.info("==========Rebalance in 4 nodes =========")
#Rebalance in 4 nodes
servers_in = list(set(self.servers) - set(new_server_list))[0:4]
rebalance = self.cluster.async_rebalance(servers_init, servers_in, [])
rebalance.result()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss(self.master, b,self.num_items*7)
self.sleep(30)
# load more document
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load, args=(self.master, self.num_items, b, self.num_items*7)))
for t in load_thread:
t.start()
new_server_list=list(set(new_server_list + servers_in))
self.log.info("==========Rebalance out 4 nodes =========")
#Rebalance out 4 nodes
servers_out = list(set(new_server_list) - set([self.master]))[0:4]
rebalance = self.cluster.async_rebalance(servers_init, [], servers_out)
rebalance.result()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss(self.master, b,self.num_items*8)
self.sleep(30)
# load more document
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load, args=(self.master, self.num_items, b, self.num_items*8)))
for t in load_thread:
t.start()
new_server_list = list(set(new_server_list) - set(servers_out))
self.log.info("======Rebalance in 4 nodes (8 nodes) wait for rebalance to finish and move between server groups=========")
#Rebalance in 4 nodes (8 nodes) wait for rebalance to finish and move between server groups
servers_in = list(set(self.servers) - set(new_server_list))[0:4]
rebalance = self.cluster.async_rebalance(servers_init, servers_in, [])
rebalance.result()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss(self.master, b,self.num_items*9)
self.sleep(30)
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load, args=(self.master, self.num_items, b, self.num_items * 9)))
for t in load_thread:
t.start()
self.shuffle_nodes_between_zones_and_rebalance()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss(self.master, b,self.num_items*10)
self.sleep(30)
load_thread = []
for b in bucket:
load_thread.append(Thread(target=self.load, args=(self.master, self.num_items, b, self.num_items * 10)))
for t in load_thread:
t.start()
self.log.info("======Graceful failover 1 KV node and add back(Delta and Full)=========")
#Graceful failover 1 KV node and add back(Delta and Full)
kv_server = self.get_nodes_from_services_map(service_type="kv", get_all_nodes=False)
fail_over_task = self.cluster.async_failover([self.master], failover_nodes=[kv_server], graceful=True)
fail_over_task.result()
self.sleep(120)
# do a recovery and rebalance
rest.set_recovery_type('[email protected]' + kv_server.ip, recoveryType=self.recoveryType)
rest.add_back_node('[email protected]' + kv_server.ip)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [])
rebalance.result()
for t in load_thread:
t.join()
for b in bucket:
self.check_dataloss(self.master, b,self.num_items*11)
self.sleep(30)
示例14: test_clusterOps
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import set_recovery_type [as 别名]
def test_clusterOps(self):
Audit = audit(eventID=self.eventID, host=self.master)
ops = self.input.param('ops', None)
servs_inout = self.servers[1:self.nodes_in + 1]
source = 'ns_server'
if (ops in ['addNodeKV']):
self.cluster.rebalance(self.servers, servs_inout, [])
print servs_inout
print servs_inout[0].ip
expectedResults = {"services":['kv'], 'port':8091, 'hostname':servs_inout[0].ip,
'groupUUID':"0", 'node':'[email protected]' + servs_inout[0].ip, 'source':source,
'user':self.master.rest_username, "ip":self.ipAddress, "remote:port":57457}
if (ops in ['addNodeN1QL']):
rest = RestConnection(self.master)
rest.add_node(user=self.master.rest_username, password=self.master.rest_password, remoteIp=servs_inout[0].ip, services=['n1ql'])
expectedResults = {"services":['n1ql'], 'port':8091, 'hostname':servs_inout[0].ip,
'groupUUID':"0", 'node':'[email protected]' + servs_inout[0].ip, 'source':source,
'user':self.master.rest_username, "ip":self.ipAddress, "remote:port":57457}
if (ops in ['addNodeIndex']):
rest = RestConnection(self.master)
rest.add_node(user=self.master.rest_username, password=self.master.rest_password, remoteIp=servs_inout[0].ip, services=['index'])
expectedResults = {"services":['index'], 'port':8091, 'hostname':servs_inout[0].ip,
'groupUUID':"0", 'node':'[email protected]' + servs_inout[0].ip, 'source':source,
'user':self.master.rest_username, "ip":self.ipAddress, "remote:port":57457}
if (ops in ['removeNode']):
self.cluster.rebalance(self.servers, [], servs_inout)
shell = RemoteMachineShellConnection(self.master)
os_type = shell.extract_remote_info().distribution_type
log.info ("OS type is {0}".format(os_type))
if os_type == 'windows':
expectedResults = {"delta_recovery_buckets":"all", 'known_nodes':["[email protected]" + servs_inout[0].ip, "[email protected]" + self.master.ip], 'ejected_nodes':['[email protected]' + servs_inout[0].ip], 'source':'ns_server', \
'source':source, 'user':self.master.rest_username, "ip":self.ipAddress, "port":57457}
else:
expectedResults = {"delta_recovery_buckets":"all", 'known_nodes':["[email protected]" + self.master.ip, "[email protected]" + servs_inout[0].ip], 'ejected_nodes':['[email protected]' + servs_inout[0].ip], 'source':'ns_server', \
'source':source, 'user':self.master.rest_username, "ip":self.ipAddress, "port":57457}
if (ops in ['rebalanceIn']):
self.cluster.rebalance(self.servers, servs_inout, [])
shell = RemoteMachineShellConnection(self.master)
os_type = shell.extract_remote_info().distribution_type
log.info ("OS type is {0}".format(os_type))
if os_type == 'windows':
expectedResults = {"delta_recovery_buckets":"all", 'known_nodes':["[email protected]" + servs_inout[0].ip, "[email protected]" + self.master.ip], 'ejected_nodes':[], 'source':'ns_server', \
'source':source, 'user':self.master.rest_username, "ip":self.ipAddress, "port":57457}
else:
expectedResults = {"delta_recovery_buckets":"all", 'known_nodes':["[email protected]" + self.master.ip, "[email protected]" + servs_inout[0].ip], 'ejected_nodes':[], 'source':'ns_server', \
'source':source, 'user':self.master.rest_username, "ip":self.ipAddress, "port":57457}
if (ops in ['rebalanceOut']):
self.cluster.rebalance(self.servers, [], servs_inout)
shell = RemoteMachineShellConnection(self.master)
os_type = shell.extract_remote_info().distribution_type
log.info ("OS type is {0}".format(os_type))
if os_type == 'windows':
expectedResults = {"delta_recovery_buckets":"all", 'known_nodes':["[email protected]" + servs_inout[0].ip, "[email protected]" + self.master.ip], 'ejected_nodes':['[email protected]' + servs_inout[0].ip], 'source':'ns_server', \
'source':source, 'user':self.master.rest_username, "ip":self.ipAddress, "port":57457}
else:
expectedResults = {"delta_recovery_buckets":"all", 'known_nodes':["[email protected]" + self.master.ip, "[email protected]" + servs_inout[0].ip], 'ejected_nodes':['[email protected]' + servs_inout[0].ip], 'source':'ns_server', \
'source':source, 'user':self.master.rest_username, "ip":self.ipAddress, "port":57457}
if (ops in ['failover']):
type = self.input.param('type', None)
self.cluster.failover(self.servers, servs_inout)
self.cluster.rebalance(self.servers, [], [])
expectedResults = {'source':source, 'user':self.master.rest_username, "ip":self.ipAddress, "port":57457, 'type':type, 'node':'[email protected]' + servs_inout[0].ip}
if (ops == 'nodeRecovery'):
expectedResults = {'node':'[email protected]' + servs_inout[0].ip, 'type':'delta', 'source':source, 'user':self.master.rest_username, "ip":self.ipAddress, "port":57457}
self.cluster.failover(self.servers, servs_inout)
rest = RestConnection(self.master)
rest.set_recovery_type(expectedResults['node'], 'delta')
# Pending of failover - soft
self.checkConfig(self.eventID, self.master, expectedResults)
示例15: FailoverTests
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import set_recovery_type [as 别名]
#.........这里部分代码省略.........
if self.runViews:
if self.runViewsDuringFailover:
self.monitor_view_tasks(_servers_)
self.verify_query_task()
# Check Failover logs :: Not sure about this logic, currently not checking, will update code once confirmed
# Currently, only for checking case where we have graceful failover
if self.version_greater_than_2_5 and self.graceful and self.upr_check:
new_failover_stats = self.compare_failovers_logs(prev_failover_stats, _servers_, self.buckets)
new_vbucket_stats = self.compare_vbucket_seqnos(prev_vbucket_stats, _servers_, self.buckets)
self.compare_vbucketseq_failoverlogs(new_vbucket_stats, new_failover_stats)
self.log.info("End VERIFICATION for Rebalance after Failover Only")
finally:
if self.during_ops:
if self.during_ops == "change_password":
self.change_password(new_password=old_pass)
elif self.during_ops == "change_port":
self.change_port(new_port='8091',
current_port=self.input.param("new_port", "9090"))
def run_add_back_operation_and_verify(self, chosen, prev_vbucket_stats, record_static_data_set, prev_failover_stats):
"""
Method to run add-back operation with recovery type = (delta/full)
It also verifies if the operations are correct with data verificaiton steps
"""
serverMap = self.get_server_map(self.servers)
recoveryTypeMap = self.define_maps_during_failover(self.recoveryType)
fileMapsForVerification = self.create_file(chosen, self.buckets, serverMap)
index = 0
for node in chosen:
self.rest.add_back_node(node.id)
self.sleep(5)
if self.recoveryType:
# define precondition for recoverytype
self.rest.set_recovery_type(otpNode=node.id, recoveryType=self.recoveryType[index])
index += 1
self.sleep(20, "After failover before invoking rebalance...")
self.rest.rebalance(otpNodes=[node.id for node in self.nodes],
ejectedNodes=[])
msg = "rebalance failed while removing failover nodes {0}".format(chosen)
# Run operations if required during rebalance after failover
if self.withOps:
for task in self.ops_tasks:
task.result()
self.assertTrue(self.rest.monitorRebalance(stop_if_loop=True), msg=msg)
# Drain ep_queue and make sure that intra-cluster replication is complete
self._verify_stats_all_buckets(self.servers,timeout = 120)
self._wait_for_stats_all_buckets(self.servers)
self.log.info("Begin VERIFICATION for Add-back and rebalance")
# Verify recovery Type succeeded if we added-back nodes
self.verify_for_recovery_type(chosen, serverMap, self.buckets,
recoveryTypeMap, fileMapsForVerification)
# Comparison of all data if required
if not self.withOps:
self.data_analysis_all(record_static_data_set,self.servers, self.buckets, path = None)
# Verify Stats of cluster and Data is max_verify > 0
self.verify_cluster_stats(self.servers, self.referenceNode)
# Verify if vbucket sequence numbers and failover logs are as expected
# We will check only for version > 2.5.* and if the failover is graceful
if self.version_greater_than_2_5 and self.graceful and self.upr_check:
new_vbucket_stats = self.compare_vbucket_seqnos(prev_vbucket_stats, self.servers, self.buckets,perNode= False)