本文整理汇总了Python中membase.api.rest_client.RestConnection.reset_autofailover方法的典型用法代码示例。如果您正苦于以下问题:Python RestConnection.reset_autofailover方法的具体用法?Python RestConnection.reset_autofailover怎么用?Python RestConnection.reset_autofailover使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类membase.api.rest_client.RestConnection
的用法示例。
在下文中一共展示了RestConnection.reset_autofailover方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _cluster_setup
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import reset_autofailover [as 别名]
def _cluster_setup(self):
replicas = self.input.param("replicas", 1)
keys_count = self.input.param("keys-count", 0)
num_buckets = self.input.param("num-buckets", 1)
bucket_name = "default"
master = self.servers[0]
credentials = self.input.membase_settings
rest = RestConnection(self.master)
info = rest.get_nodes_self()
rest.init_cluster(username=self.master.rest_username,
password=self.master.rest_password)
rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
rest.reset_autofailover()
ClusterOperationHelper.add_and_rebalance(self.servers, True)
if num_buckets == 1:
bucket_ram = info.memoryQuota * 2 / 3
rest.create_bucket(bucket=bucket_name,
ramQuotaMB=bucket_ram,
replicaNumber=replicas,
proxyPort=info.moxi)
else:
created = BucketOperationHelper.create_multiple_buckets(self.master, replicas, howmany=num_buckets)
self.assertTrue(created, "unable to create multiple buckets")
buckets = rest.get_buckets()
for bucket in buckets:
ready = BucketOperationHelper.wait_for_memcached(self.master, bucket.name)
self.assertTrue(ready, msg="wait_for_memcached failed")
for bucket in buckets:
inserted_keys_cnt = self.load_data(self.master, bucket.name, keys_count)
log.info('inserted {0} keys'.format(inserted_keys_cnt))
示例2: test_reset_count
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import reset_autofailover [as 别名]
def test_reset_count(self):
# AUTOFAIL_TEST_8
timeout = 30
self._cluster_setup()
master = self._servers[0]
server_fail1 = self._servers[1]
server_fail2 = self._servers[2]
rest = RestConnection(master)
rest.update_autofailover_settings(True, timeout)
self.log.info("stopping the first server")
time_start = time.time()
self._stop_couchbase(server_fail1)
AutoFailoverBaseTest.wait_for_failover_or_assert(master, 1, timeout, self)
time_end = time.time()
msg = "{0} != {1}".format(time_end-time_start, timeout)
self.assertTrue(abs((time_end-time_start) - timeout) <= AutoFailoverBaseTest.MAX_FAIL_DETECT_TIME, msg)
self.log.info("expected failover in {0} seconds, actual time {1} seconds".format(timeout, time_end-time_start))
self.log.info("resetting the autofailover count")
rest.reset_autofailover()
self.log.info("stopping the second server")
time_start = time.time()
self._stop_couchbase(server_fail2)
AutoFailoverBaseTest.wait_for_failover_or_assert(master, 2, timeout, self)
time_end = time.time()
msg = "{0} != {1}".format(time_end-time_start, timeout)
self.assertTrue(abs((time_end-time_start) - timeout) <= AutoFailoverBaseTest.MAX_FAIL_DETECT_TIME, msg)
self.log.info("expected failover in {0} seconds, actual time {1} seconds".format(timeout, time_end-time_start))
示例3: setUp
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import reset_autofailover [as 别名]
def setUp(self):
super(cbrecovery, self).setUp()
self._failover_count = self._input.param("fail_count", 0)
self._add_count = self._input.param("add_count", 0)
self.failover_reason = self._input.param("failover_reason", "stop_server") # or firewall_block
self.flag_val = self._input.param("setflag", 0)
self.failed_nodes = []
self._ifautofail = False
for server in self._servers:
rest = RestConnection(server)
rest.reset_autofailover()
shell = RemoteMachineShellConnection(server)
o, r = shell.execute_command("iptables -F")
shell.log_command_output(o, r)
shell.disconnect()
示例4: _cluster_setup
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import reset_autofailover [as 别名]
def _cluster_setup(self):
bucket_name = "default"
master = self._servers[0]
rest = RestConnection(master)
info = rest.get_nodes_self()
rest.init_cluster(username=master.rest_username,
password=master.rest_password)
rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
rest.reset_autofailover()
bucket_ram = info.memoryQuota * 2 / 3
rest.create_bucket(bucket=bucket_name,
ramQuotaMB=bucket_ram,
proxyPort=info.moxi)
ready = BucketOperationHelper.wait_for_memcached(master, bucket_name)
self.assertTrue(ready, "wait_for_memcached failed")
示例5: _cluster_setup
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import reset_autofailover [as 别名]
def _cluster_setup(self):
log = logger.Logger.get_logger()
replicas = self._input.param("replicas", 1)
keys_count = self._input.param("keys-count", 0)
num_buckets = self._input.param("num-buckets", 1)
bucket_name = "default"
master = self._servers[0]
credentials = self._input.membase_settings
rest = RestConnection(master)
info = rest.get_nodes_self()
rest.init_cluster(username=master.rest_username,
password=master.rest_password)
rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
rest.reset_autofailover()
ClusterOperationHelper.add_all_nodes_or_assert(master, self._servers, credentials, self)
bucket_ram = info.memoryQuota * 2 / 3
if num_buckets == 1:
rest.create_bucket(bucket=bucket_name,
ramQuotaMB=bucket_ram,
replicaNumber=replicas,
proxyPort=info.moxi)
ready = BucketOperationHelper.wait_for_memcached(master, bucket_name)
nodes = rest.node_statuses()
rest.rebalance(otpNodes=[node.id for node in nodes], ejectedNodes=[])
buckets = rest.get_buckets()
else:
created = BucketOperationHelper.create_multiple_buckets(master, replicas, howmany=num_buckets)
self.assertTrue(created, "unable to create multiple buckets")
buckets = rest.get_buckets()
for bucket in buckets:
ready = BucketOperationHelper.wait_for_memcached(master, bucket.name)
self.assertTrue(ready, msg="wait_for_memcached failed")
nodes = rest.node_statuses()
rest.rebalance(otpNodes=[node.id for node in nodes], ejectedNodes=[])
# self.load_data(master, bucket_name, keys_count)
for bucket in buckets:
inserted_keys_cnt = self.load_data(master, bucket.name, keys_count)
log.info('inserted {0} keys'.format(inserted_keys_cnt))
msg = "rebalance failed after adding these nodes {0}".format(nodes)
self.assertTrue(rest.monitorRebalance(), msg=msg)
self.assertTrue(ready, "wait_for_memcached failed")
示例6: auto_fail_over
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import reset_autofailover [as 别名]
def auto_fail_over(self, master):
_count_ = 1
rest = RestConnection(master)
if "stop_server" in self.failover_reason:
for node in self.failed_nodes:
"""
Autofailover will not auto failover nodes, if it could
result in data loss, so force failover
"""
if _count_ > self._num_replicas:
self.sleep(10)
for item in rest.node_statuses():
if node.ip == item.ip:
rest.fail_over(item.id)
break
_count_ += 1
continue
shell = RemoteMachineShellConnection(node)
shell.stop_couchbase()
shell.disconnect()
self.wait_for_failover_or_assert(master, _count_, self.wait_timeout)
rest.reset_autofailover()
_count_ += 1
elif "firewall_block" in self.failover_reason:
for node in self.failed_nodes:
"""
Autofailover will not auto failover nodes, if it could
result in data loss, so force failover
"""
if _count_ > self._num_replicas:
time.sleep(10)
for item in rest.node_statuses():
if node.ip == item.ip:
rest.fail_over(item.id)
break
_count_ += 1
continue
shell = RemoteMachineShellConnection(node)
o, r = shell.execute_command("/sbin/iptables -A INPUT -p tcp -i eth0 --dport 1000:65535 -j REJECT")
shell.disconnect()
self.wait_for_failover_or_assert(master, _count_, self.wait_timeout)
rest.reset_autofailover()
_count_ += 1
示例7: _cluster_setup
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import reset_autofailover [as 别名]
def _cluster_setup(self):
bucket_name = "default"
master = self._servers[0]
credentials = self._input.membase_settings
rest = RestConnection(master)
info = rest.get_nodes_self()
rest.init_cluster(username=master.rest_username,
password=master.rest_password)
rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
rest.reset_autofailover()
ClusterOperationHelper.add_all_nodes_or_assert(master, self._servers, credentials, self)
bucket_ram = info.memoryQuota * 2 / 3
rest.create_bucket(bucket=bucket_name,
ramQuotaMB=bucket_ram,
proxyPort=info.moxi)
ready = BucketOperationHelper.wait_for_memcached(master, bucket_name)
nodes = rest.node_statuses()
rest.rebalance(otpNodes=[node.id for node in nodes], ejectedNodes=[])
msg = "rebalance failed after adding these nodes {0}".format(nodes)
self.assertTrue(rest.monitorRebalance(), msg=msg)
self.assertTrue(ready, "wait_for_memcached failed")
示例8: test_settingsCluster
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import reset_autofailover [as 别名]
def test_settingsCluster(self):
ops = self.input.param("ops", None)
source = 'ns_server'
user = self.master.rest_username
password = self.master.rest_password
rest = RestConnection(self.master)
if (ops == 'memoryQuota'):
expectedResults = {'memory_quota':512, 'source':source, 'user':user, 'ip':self.ipAddress, 'port':12345, 'cluster_name':'', 'index_memory_quota':256}
rest.init_cluster_memoryQuota(expectedResults['user'], password, expectedResults['memory_quota'])
elif (ops == 'loadSample'):
expectedResults = {'name':'gamesim-sample', 'source':source, "user":user, 'ip':self.ipAddress, 'port':12345}
rest.addSamples()
#Get a REST Command for loading sample
elif (ops == 'enableAutoFailover'):
expectedResults = {'max_nodes':1, "timeout":120, 'source':source, "user":user, 'ip':self.ipAddress, 'port':12345}
rest.update_autofailover_settings(True, expectedResults['timeout'])
elif (ops == 'disableAutoFailover'):
expectedResults = {'max_nodes':1, "timeout":120, 'source':source, "user":user, 'ip':self.ipAddress, 'port':12345}
rest.update_autofailover_settings(False, expectedResults['timeout'])
elif (ops == 'resetAutoFailover'):
expectedResults = {'source':source, "user":user, 'ip':self.ipAddress, 'port':12345}
rest.reset_autofailover()
elif (ops == 'enableClusterAlerts'):
expectedResults = {"encrypt":False, "email_server:port":25, "host":"localhost", "email_server:user":"ritam", "alerts":["auto_failover_node", "auto_failover_maximum_reached"], \
"recipients":["[email protected]"], "sender":"[email protected]", "source":"ns_server", "user":"Administrator", 'ip':self.ipAddress, 'port':1234}
rest.set_alerts_settings('[email protected]', '[email protected]', 'ritam', 'password',)
elif (ops == 'disableClusterAlerts'):
rest.set_alerts_settings('[email protected]', '[email protected]', 'ritam', 'password',)
expectedResults = {'source':source, "user":user, 'ip':self.ipAddress, 'port':1234}
rest.disable_alerts()
elif (ops == 'modifyCompactionSettingsPercentage'):
expectedResults = {"parallel_db_and_view_compaction":False,
"database_fragmentation_threshold:percentage":50,
"view_fragmentation_threshold:percentage":50,
"purge_interval":3,
"source":"ns_server",
"user":"Administrator",
'source':source,
"user":user,
'ip':self.ipAddress,
'port':1234}
rest.set_auto_compaction(dbFragmentThresholdPercentage=50, viewFragmntThresholdPercentage=50)
elif (ops == 'modifyCompactionSettingsPercentSize'):
expectedResults = {"parallel_db_and_view_compaction":False,
"database_fragmentation_threshold:percentage":50,
"database_fragmentation_threshold:size":10,
"view_fragmentation_threshold:percentage":50,
"view_fragmentation_threshold:size":10,
"purge_interval":3,
"source":"ns_server",
"user":"Administrator",
'source':source,
"user":user,
'ip':self.ipAddress,
'port':1234}
rest.set_auto_compaction(dbFragmentThresholdPercentage=50,
viewFragmntThresholdPercentage=50,
dbFragmentThreshold=10,
viewFragmntThreshold=10)
elif (ops == 'modifyCompactionSettingsTime'):
expectedResults = {"parallel_db_and_view_compaction":False,
"database_fragmentation_threshold:percentage":50,
"database_fragmentation_threshold:size":10,
"view_fragmentation_threshold:percentage":50,
"view_fragmentation_threshold:size":10,
"allowed_time_period:abort_outside":True,
"allowed_time_period:to_minute":15,
"allowed_time_period:from_minute":12,
"allowed_time_period:to_hour":1,
"allowed_time_period:from_hour":1,
"purge_interval":3,
"source":"ns_server",
"user":"Administrator",
'source':source,
"user":user,
'ip':self.ipAddress,
'port':1234,
}
rest.set_auto_compaction(dbFragmentThresholdPercentage=50,
viewFragmntThresholdPercentage=50,
dbFragmentThreshold=10,
viewFragmntThreshold=10,
allowedTimePeriodFromHour=1,
allowedTimePeriodFromMin=12,
allowedTimePeriodToHour=1,
allowedTimePeriodToMin=15,
allowedTimePeriodAbort='true')
elif (ops == "AddGroup"):
expectedResults = {'group_name':'add group', 'source':source, 'user':user, 'ip':self.ipAddress, 'port':1234}
#.........这里部分代码省略.........
示例9: AutoFailoverTests
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import reset_autofailover [as 别名]
class AutoFailoverTests(unittest.TestCase):
def setUp(self):
self._input = TestInputSingleton.input
self.case_number = self._input.param("case_number", 0)
self._servers = self._input.servers
self.log = logger.Logger().get_logger()
self.master = self._servers[0]
self.rest = RestConnection(self.master)
self.timeout = 60
AutoFailoverBaseTest.common_setup(self._input, self)
self._cluster_setup()
def tearDown(self):
AutoFailoverBaseTest.common_tearDown(self._servers, self)
def sleep(self, timeout=1, message=""):
self.log.info("sleep for {0} secs. {1} ...".format(timeout, message))
time.sleep(timeout)
def test_enable(self):
status = self.rest.update_autofailover_settings(True, self.timeout / 2)
if not status:
self.fail('failed to change autofailover_settings! See MB-7282')
#read settings and verify
settings = self.rest.get_autofailover_settings()
self.assertEquals(settings.enabled, True)
def test_disable(self):
status = self.rest.update_autofailover_settings(False, self.timeout)
if not status:
self.fail('failed to change autofailover_settings! See MB-7282')
#read settings and verify
settings = self.rest.get_autofailover_settings()
self.assertEquals(settings.enabled, False)
def test_valid_timeouts(self):
timeouts = [30, 31, 300, 3600]
for timeout in timeouts:
status = self.rest.update_autofailover_settings(True, timeout)
if not status:
self.fail('failed to change autofailover_settings! See MB-7282')
#read settings and verify
settings = self.rest.get_autofailover_settings()
self.assertTrue(settings.timeout == timeout)
def test_30s_timeout_firewall(self):
timeout = self.timeout / 2
server_fail = self._servers[1]
status = self.rest.update_autofailover_settings(True, timeout)
if not status:
self.fail('failed to change autofailover_settings! See MB-7282')
self.sleep(5)
RemoteUtilHelper.enable_firewall(server_fail)
AutoFailoverBaseTest.wait_for_failover_or_assert(self.master, 1, timeout + AutoFailoverBaseTest.MAX_FAIL_DETECT_TIME, self)
def test_60s_timeout_firewall(self):
timeout = self.timeout
server_fail = self._servers[1]
status = self.rest.update_autofailover_settings(True, timeout)
if not status:
self.fail('failed to change autofailover_settings! See MB-7282')
self.sleep(5)
RemoteUtilHelper.enable_firewall(server_fail)
AutoFailoverBaseTest.wait_for_failover_or_assert(self.master, 1, timeout + AutoFailoverBaseTest.MAX_FAIL_DETECT_TIME, self)
def test_30s_timeout_stop(self):
timeout = self.timeout
server_fail = self._servers[1]
status = self.rest.update_autofailover_settings(True, timeout)
if not status:
self.fail('failed to change autofailover_settings! See MB-7282')
self.sleep(5)
self._stop_couchbase(server_fail)
AutoFailoverBaseTest.wait_for_failover_or_assert(self.master, 1, timeout + AutoFailoverBaseTest.MAX_FAIL_DETECT_TIME, self)
def test_60s_timeout_stop(self):
timeout = self.timeout
server_fail = self._servers[1]
status = self.rest.update_autofailover_settings(True, timeout)
if not status:
self.fail('failed to change autofailover_settings! See MB-7282')
self.sleep(5)
self._stop_couchbase(server_fail)
AutoFailoverBaseTest.wait_for_failover_or_assert(self.master, 1, timeout + AutoFailoverBaseTest.MAX_FAIL_DETECT_TIME, self)
def test_reset_count(self):
timeout = self.timeout / 2
server_fail1 = self._servers[1]
server_fail2 = self._servers[2]
status = self.rest.update_autofailover_settings(True, timeout)
if not status:
self.fail('failed to change autofailover_settings! See MB-7282')
self.sleep(5)
self.log.info("stopping the first server")
self._stop_couchbase(server_fail1)
AutoFailoverBaseTest.wait_for_failover_or_assert(self.master, 1, timeout + AutoFailoverBaseTest.MAX_FAIL_DETECT_TIME, self)
self.log.info("resetting the autofailover count")
if not self.rest.reset_autofailover():
self.fail('failed to reset autofailover count!')
#.........这里部分代码省略.........
示例10: AutoFailoverBaseTest
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import reset_autofailover [as 别名]
class AutoFailoverBaseTest(BaseTestCase):
MAX_FAIL_DETECT_TIME = 120
ORCHESTRATOR_TIMEOUT_BUFFER = 60
def setUp(self):
super(AutoFailoverBaseTest, self).setUp()
self._get_params()
self.rest = RestConnection(self.orchestrator)
self.task_manager = TaskManager("Autofailover_thread")
self.task_manager.start()
self.node_failure_task_manager = TaskManager(
"Nodes_failure_detector_thread")
self.node_failure_task_manager.start()
self.initial_load_gen = BlobGenerator('auto-failover',
'auto-failover-',
self.value_size,
end=self.num_items)
self.update_load_gen = BlobGenerator('auto-failover',
'auto-failover-',
self.value_size,
end=self.update_items)
self.delete_load_gen = BlobGenerator('auto-failover',
'auto-failover-',
self.value_size,
start=self.update_items,
end=self.delete_items)
self._load_all_buckets(self.servers[0], self.initial_load_gen,
"create", 0)
self._async_load_all_buckets(self.orchestrator,
self.update_load_gen, "update", 0)
self._async_load_all_buckets(self.orchestrator,
self.delete_load_gen, "delete", 0)
self.server_to_fail = self._servers_to_fail()
self.servers_to_add = self.servers[self.nodes_init:self.nodes_init +
self.nodes_in]
self.servers_to_remove = self.servers[self.nodes_init -
self.nodes_out:self.nodes_init]
# self.node_monitor_task = self.start_node_monitors_task()
def tearDown(self):
self.log.info("============AutoFailoverBaseTest teardown============")
self._get_params()
self.task_manager = TaskManager("Autofailover_thread")
self.task_manager.start()
self.server_to_fail = self._servers_to_fail()
self.start_couchbase_server()
self.sleep(10)
self.disable_firewall()
self.rest = RestConnection(self.orchestrator)
self.rest.reset_autofailover()
self.disable_autofailover()
self._cleanup_cluster()
super(AutoFailoverBaseTest, self).tearDown()
if hasattr(self, "node_monitor_task"):
if self.node_monitor_task._exception:
self.fail("{}".format(self.node_monitor_task._exception))
self.node_monitor_task.stop = True
self.task_manager.shutdown(force=True)
def enable_autofailover(self):
"""
Enable the autofailover setting with the given timeout.
:return: True If the setting was set with the timeout, else return
False
"""
status = self.rest.update_autofailover_settings(True,
self.timeout)
return status
def disable_autofailover(self):
"""
Disable the autofailover setting.
:return: True If the setting was disabled, else return
False
"""
status = self.rest.update_autofailover_settings(False, 120)
return status
def enable_autofailover_and_validate(self):
"""
Enable autofailover with given timeout and then validate if the
settings.
:return: Nothing
"""
status = self.enable_autofailover()
self.assertTrue(status, "Failed to enable autofailover_settings!")
self.sleep(5)
settings = self.rest.get_autofailover_settings()
self.assertTrue(settings.enabled, "Failed to enable "
"autofailover_settings!")
self.assertEqual(self.timeout, settings.timeout,
"Incorrect timeout set. Expected timeout : {0} "
"Actual timeout set : {1}".format(self.timeout,
settings.timeout))
def disable_autofailover_and_validate(self):
"""
Disable autofailover setting and then validate if the setting was
disabled.
:return: Nothing
#.........这里部分代码省略.........
示例11: common_test_body
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import reset_autofailover [as 别名]
def common_test_body(self, replica, failover_reason, load_ratio, age, max_nodes):
log = logger.Logger.get_logger()
bucket_name = "default"
log.info("replica : {0}".format(replica))
log.info("failover_reason : {0}".format(failover_reason))
log.info("load_ratio : {0}".format(load_ratio))
log.info("age : {0}".format(age))
log.info("max_nodes : {0}".format(max_nodes))
master = self._servers[0]
log.info('picking server : {0} as the master'.format(master))
rest = RestConnection(master)
info = rest.get_nodes_self()
rest.init_cluster(username=master.rest_username,
password=master.rest_password)
rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
rest.update_autofailover_settings(True, age, max_nodes)
rest.reset_autofailover()
bucket_ram = info.memoryQuota * 2 / 3
rest.create_bucket(bucket=bucket_name,
ramQuotaMB=bucket_ram,
replicaNumber=replica,
proxyPort=info.moxi)
ready = BucketOperationHelper.wait_for_memcached(master, bucket_name)
self.assertTrue(ready, "wait_for_memcached failed")
credentials = self._input.membase_settings
log.info("inserting some items in the master before adding any nodes")
distribution = {512: 0.4, 1 * 1024: 0.59, 5 * 1024: 0.01}
if load_ratio > 10:
distribution = {5 * 1024: 0.4, 10 * 1024: 0.5, 20 * 1024: 0.1}
ClusterOperationHelper.add_all_nodes_or_assert(master, self._servers, credentials, self)
nodes = rest.node_statuses()
rest.rebalance(otpNodes=[node.id for node in nodes], ejectedNodes=[])
msg = "rebalance failed after adding these nodes {0}".format(nodes)
self.assertTrue(rest.monitorRebalance(), msg=msg)
inserted_count, rejected_count =\
MemcachedClientHelper.load_bucket(servers=self._servers,
ram_load_ratio=load_ratio,
value_size_distribution=distribution,
number_of_threads=1)
log.info('inserted {0} keys'.format(inserted_count))
nodes = rest.node_statuses()
# why are we in this while loop?
while (len(nodes) - replica) >= 1:
final_replication_state = RestHelper(rest).wait_for_replication(900)
msg = "replication state after waiting for up to 15 minutes : {0}"
self.log.info(msg.format(final_replication_state))
chosen = AutoFailoverBaseTest.choose_nodes(master, nodes, replica)
for node in chosen:
#let's do op
if failover_reason == 'stop_membase':
self.stop_membase(node)
log.info("10 seconds delay to wait for membase-server to shutdown")
#wait for 5 minutes until node is down
self.assertTrue(RestHelper(rest).wait_for_node_status(node, "unhealthy", 300),
msg="node status is not unhealthy even after waiting for 5 minutes")
elif failover_reason == "firewall":
self.enable_firewall(node)
self.assertTrue(RestHelper(rest).wait_for_node_status(node, "unhealthy", 300),
msg="node status is not unhealthy even after waiting for 5 minutes")
# list pre-autofailover stats
stats = rest.get_bucket_stats()
self.log.info("pre-autofail - curr_items : {0} versus {1}".format(stats["curr_items"], inserted_count))
AutoFailoverBaseTest.wait_for_failover_or_assert(master, replica, age, self)
# manually fail over any unhealthy:active nodes left, max that we should need to manually failover is replica-max_nodes
manual_failover_count = replica - max_nodes
for node in chosen:
self.log.info("checking {0}".format(node.ip))
if node.status.lower() == "unhealthy" and node.clusterMembership == "active":
msg = "node {0} not failed over and we are over out manual failover limit of {1}"
self.assertTrue(manual_failover_count > 0, msg.format(node.ip, (replica - max_nodes)))
self.log.info("manual failover {0}".format(node.ip))
rest.fail_over(node.id)
manual_failover_count -= 1
stats = rest.get_bucket_stats()
self.log.info("post-autofail - curr_items : {0} versus {1}".format(stats["curr_items"], inserted_count))
self.assertTrue(stats["curr_items"] == inserted_count, "failover completed but curr_items ({0}) does not match inserted items ({1})".format(stats["curr_items"], inserted_count))
log.info("10 seconds sleep after autofailover before invoking rebalance...")
time.sleep(10)
rest.rebalance(otpNodes=[node.id for node in nodes],
ejectedNodes=[node.id for node in chosen])
msg="rebalance failed while removing failover nodes {0}".format(chosen)
self.assertTrue(rest.monitorRebalance(), msg=msg)
nodes = rest.node_statuses()
if len(nodes) / (1 + replica) >= 1:
final_replication_state = RestHelper(rest).wait_for_replication(900)
msg = "replication state after waiting for up to 15 minutes : {0}"
self.log.info(msg.format(final_replication_state))
self.assertTrue(RebalanceHelper.wait_till_total_numbers_match(master,bucket_name,600),
msg="replication was completed but sum(curr_items) dont match the curr_items_total")
start_time = time.time()
stats = rest.get_bucket_stats()
#.........这里部分代码省略.........