本文整理汇总了Python中membase.api.rest_client.RestConnection.cluster_status方法的典型用法代码示例。如果您正苦于以下问题:Python RestConnection.cluster_status方法的具体用法?Python RestConnection.cluster_status怎么用?Python RestConnection.cluster_status使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类membase.api.rest_client.RestConnection
的用法示例。
在下文中一共展示了RestConnection.cluster_status方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: setUp
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import cluster_status [as 别名]
def setUp(self):
super(XDCRTests, self).setUp()
self.bucket = Bucket()
self._initialize_nodes()
self.master = self.servers[0]
for server in self.servers:
rest=RestConnection(server)
cluster_status = rest.cluster_status()
self.log.info("Initial status of {0} cluster is {1}".format(server.ip,
cluster_status['nodes'][0]['status']))
while cluster_status['nodes'][0]['status'] == 'warmup':
self.log.info("Waiting for cluster to become healthy")
self.sleep(5)
cluster_status = rest.cluster_status()
self.log.info("current status of {0} is {1}".format(server.ip,
cluster_status['nodes'][0]['status']))
# Delete all buckets before creating new buckets
self.log.info("Deleting all existing buckets")
BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
self.log.info("Creating new buckets")
src_bucket = self.input.param('src_bucket', self.bucket)
dest_bucket = self.input.param('dest_bucket', self.bucket)
if src_bucket:
RestConnection(self.servers[0]).create_bucket(bucket='default', ramQuotaMB=500)
if dest_bucket:
RestConnection(self.servers[1]).create_bucket(bucket='default', ramQuotaMB=500)
helper = BaseHelper(self)
helper.login()
示例2: get_indexer_mem_quota
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import cluster_status [as 别名]
def get_indexer_mem_quota(self):
"""
Sets Indexer memory Quota
:param memQuota:
:return:
int indexer memory quota
"""
rest = RestConnection(self.oomServer)
content = rest.cluster_status()
return int(content['indexMemoryQuota'])
示例3: get_failover_count
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import cluster_status [as 别名]
def get_failover_count(master):
rest = RestConnection(master)
cluster_status = rest.cluster_status()
failover_count = 0
# check for inactiveFailed
for node in cluster_status['nodes']:
if node['clusterMembership'] == "inactiveFailed":
failover_count += 1
return failover_count
示例4: get_failover_count
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import cluster_status [as 别名]
def get_failover_count(self, master):
rest = RestConnection(master)
cluster_status = rest.cluster_status()
failover_count = 0
# check for inactiveFailed
for node in cluster_status['nodes']:
self.log.info("'clusterMembership' for node {0} is {1}".format(node["otpNode"], node['clusterMembership']))
if node['clusterMembership'] == "inactiveFailed":
failover_count += 1
return failover_count
示例5: get_failover_count
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import cluster_status [as 别名]
def get_failover_count(master):
rest = RestConnection(master)
cluster_status = rest.cluster_status()
log = logger.Logger.get_logger()
failover_count = 0
# check for inactiveFailed
for node in cluster_status["nodes"]:
log.info("'clusterMembership' for node {0} is {1}".format(node["otpNode"], node["clusterMembership"]))
if node["clusterMembership"] == "inactiveFailed":
failover_count += 1
return failover_count
示例6: wait_for_failover_or_assert
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import cluster_status [as 别名]
def wait_for_failover_or_assert(master, autofailover_count, age, testcase):
testcase.log.info("waiting for {0} seconds for autofailover".format((age + 30)))
time.sleep(age + 30)
rest = RestConnection(master)
cluster_status = rest.cluster_status()
failover_count = 0
# check for inactiveFailed
for node in cluster_status['nodes']:
testcase.log.info("{0} is in state {1} and {2}".format(node['hostname'],node['status'],node['clusterMembership']))
if node['clusterMembership'] == "inactiveFailed":
failover_count += 1
testcase.assertTrue(failover_count == autofailover_count, "{0} nodes failed over, expected {1}".format(failover_count, autofailover_count))
示例7: wait_for_failover_or_assert
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import cluster_status [as 别名]
def wait_for_failover_or_assert(self, master, autofailover_count, timeout):
time_start = time.time()
time_max_end = time_start + 300
failover_count = 0
while time.time() < time_max_end:
failover_count = self.get_failover_count(master)
if failover_count == autofailover_count:
break
self.sleep(30)
if failover_count != autofailover_count:
rest = RestConnection(master)
self.log.warn("pools/default from {0} : {1}".format(master.ip, rest.cluster_status()))
self.fail("{0} node(s) failed over, expected {1} in {2} seconds".
format(failover_count, autofailover_count, time.time() - time_start))
else:
self.log.info("{0} node(s) failed over as expected".format(failover_count))
示例8: _get_current_auto_compaction_percentage
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import cluster_status [as 别名]
def _get_current_auto_compaction_percentage(self):
""" check at bucket level and cluster level for compaction percentage """
auto_compact_percentage = None
rest = RestConnection(self.server)
content = rest.get_bucket_json(self.bucket)
if content["autoCompactionSettings"] != False:
auto_compact_percentage =\
content["autoCompactionSettings"]["viewFragmentationThreshold"]["percentage"]
else:
# try to read cluster level compaction settings
content = rest.cluster_status()
auto_compact_percentage =\
content["autoCompactionSettings"]["viewFragmentationThreshold"]["percentage"]
return auto_compact_percentage
示例9: wait_for_failover_or_assert
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import cluster_status [as 别名]
def wait_for_failover_or_assert(master, autofailover_count, timeout, testcase):
time_start = time.time()
time_max_end = time_start + timeout
failover_count = 0
while time.time() < time_max_end:
failover_count = AutoFailoverBaseTest.get_failover_count(master)
if failover_count == autofailover_count:
testcase.log.info("{0} nodes failed over as expected".format(failover_count))
testcase.log.info("expected failover in {0} seconds, actual time {1} seconds".format\
(timeout - AutoFailoverBaseTest.MAX_FAIL_DETECT_TIME, time.time() - time_start))
return
time.sleep(2)
rest = RestConnection(master)
rest.print_UI_logs()
testcase.log.warn("pools/default from {0} : {1}".format(master.ip, rest.cluster_status()))
testcase.fail("{0} nodes failed over, expected {1} in {2} seconds".
format(failover_count, autofailover_count, time.time() - time_start))
示例10: wait_for_failover_or_assert
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import cluster_status [as 别名]
def wait_for_failover_or_assert(master, autofailover_count, timeout, testcase):
time_start = time.time()
time_max_end = time_start + timeout + 60
failover_count = 0
while time.time() < time_max_end:
failover_count = AutoFailoverBaseTest.get_failover_count(master)
if failover_count == autofailover_count:
break
time.sleep(2)
if failover_count != autofailover_count:
rest = RestConnection(master)
testcase.log.info("Latest logs from UI:")
for i in rest.get_logs(): testcase.log.error(i)
testcase.log.warn("pools/default from {0} : {1}".format(master.ip, rest.cluster_status()))
testcase.fail("{0} nodes failed over, expected {1} in {2} seconds".
format(failover_count, autofailover_count, time.time() - time_start))
else:
testcase.log.info("{O} nodes failed over as expected")
示例11: wait_for_warmup_or_assert
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import cluster_status [as 别名]
def wait_for_warmup_or_assert(master, warmup_count, timeout, testcase):
time_start = time.time()
time_max_end = time_start + timeout
bucket_name = testcase.rest.get_buckets()[0].name
while time.time() < time_max_end:
num_nodes_with_warmup = 0
for node in testcase.rest.get_bucket(bucket_name).nodes:
if node.status == 'warmup':
num_nodes_with_warmup += 1
if num_nodes_with_warmup == warmup_count:
testcase.log.info("{0} nodes warmup as expected".format(num_nodes_with_warmup))
testcase.log.info("expected warmup in {0} seconds, actual time {1} seconds".format \
(timeout - AutoReprovisionBaseTest.MAX_FAIL_DETECT_TIME,
time.time() - time_start))
return
time.sleep(2)
rest = RestConnection(master)
rest.print_UI_logs()
testcase.log.warn("pools/default from {0} : {1}".format(master.ip, rest.cluster_status()))
testcase.fail("{0} nodes warmup, expected {1} in {2} seconds".
format(num_nodes_with_warmup, warmup_count, time.time() - time_start))
示例12: testSettingCompacttion
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import cluster_status [as 别名]
def testSettingCompacttion(self):
'''setting-compacttion OPTIONS:
--compaction-db-percentage=PERCENTAGE at which point database compaction is triggered
--compaction-db-size=SIZE[MB] at which point database compaction is triggered
--compaction-view-percentage=PERCENTAGE at which point view compaction is triggered
--compaction-view-size=SIZE[MB] at which point view compaction is triggered
--compaction-period-from=HH:MM allow compaction time period from
--compaction-period-to=HH:MM allow compaction time period to
--enable-compaction-abort=[0|1] allow compaction abort when time expires
--enable-compaction-parallel=[0|1] allow parallel compaction for database and view'''
compaction_db_percentage = self.input.param("compaction-db-percentage", None)
compaction_db_size = self.input.param("compaction-db-size", None)
compaction_view_percentage = self.input.param("compaction-view-percentage", None)
compaction_view_size = self.input.param("compaction-view-size", None)
compaction_period_from = self.input.param("compaction-period-from", None)
compaction_period_to = self.input.param("compaction-period-to", None)
enable_compaction_abort = self.input.param("enable-compaction-abort", None)
enable_compaction_parallel = self.input.param("enable-compaction-parallel", None)
bucket = self.input.param("bucket", "default")
output = self.input.param("output", '')
rest = RestConnection(self.master)
remote_client = RemoteMachineShellConnection(self.master)
self.testBucketCreation()
cli_command = "setting-compacttion"
options = "--bucket={0}".format(bucket)
options += (" --compaction-db-percentage={0}".format(compaction_db_percentage), "")[compaction_db_percentage is None]
options += (" --compaction-db-size={0}".format(compaction_db_size), "")[compaction_db_size is None]
options += (" --compaction-view-percentage={0}".format(compaction_view_percentage), "")[compaction_view_percentage is None]
options += (" --compaction-view-size={0}".format(compaction_view_size), "")[compaction_view_size is None]
options += (" --compaction-period-from={0}".format(compaction_period_from), "")[compaction_period_from is None]
options += (" --compaction-period-to={0}".format(compaction_period_to), "")[compaction_period_to is None]
options += (" --enable-compaction-abort={0}".format(enable_compaction_abort), "")[enable_compaction_abort is None]
options += (" --enable-compaction-parallel={0}".format(enable_compaction_parallel), "")[enable_compaction_parallel is None]
output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, options=options, cluster_host="localhost", user="Administrator", password="password")
self.assertEqual(output, ['SUCCESS: bucket-edit'])
cluster_status = rest.cluster_status()
remote_client.disconnect()