本文整理汇总了Python中lib.membase.api.rest_client.RestConnection.node_statuses方法的典型用法代码示例。如果您正苦于以下问题:Python RestConnection.node_statuses方法的具体用法?Python RestConnection.node_statuses怎么用?Python RestConnection.node_statuses使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类lib.membase.api.rest_client.RestConnection
的用法示例。
在下文中一共展示了RestConnection.node_statuses方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: CBASBaseTest
# 需要导入模块: from lib.membase.api.rest_client import RestConnection [as 别名]
# 或者: from lib.membase.api.rest_client.RestConnection import node_statuses [as 别名]
class CBASBaseTest(BaseTestCase):
def setUp(self, add_defualt_cbas_node = True):
self.log = logger.Logger.get_logger()
if self._testMethodDoc:
self.log.info("\n\nStarting Test: %s \n%s"%(self._testMethodName,self._testMethodDoc))
else:
self.log.info("\n\nStarting Test: %s"%(self._testMethodName))
super(CBASBaseTest, self).setUp()
self.cbas_node = self.input.cbas
self.cbas_servers = []
self.kv_servers = []
for server in self.servers:
if "cbas" in server.services:
self.cbas_servers.append(server)
if "kv" in server.services:
self.kv_servers.append(server)
self.analytics_helper = AnalyticsHelper()
self._cb_cluster = self.cluster
self.travel_sample_docs_count = 31591
self.beer_sample_docs_count = 7303
invalid_ip = '10.111.151.109'
self.cb_bucket_name = self.input.param('cb_bucket_name', 'travel-sample')
self.cbas_bucket_name = self.input.param('cbas_bucket_name', 'travel')
self.cb_bucket_password = self.input.param('cb_bucket_password', None)
self.expected_error = self.input.param("error", None)
if self.expected_error:
self.expected_error = self.expected_error.replace("INVALID_IP",invalid_ip)
self.expected_error = self.expected_error.replace("PORT",self.master.port)
self.cb_server_ip = self.input.param("cb_server_ip", None)
self.cb_server_ip = self.cb_server_ip.replace('INVALID_IP',invalid_ip) if self.cb_server_ip is not None else None
self.cbas_dataset_name = self.input.param("cbas_dataset_name", 'travel_ds')
self.cbas_bucket_name_invalid = self.input.param('cbas_bucket_name_invalid', self.cbas_bucket_name)
self.cbas_dataset2_name = self.input.param('cbas_dataset2_name', None)
self.skip_create_dataset = self.input.param('skip_create_dataset', False)
self.disconnect_if_connected = self.input.param('disconnect_if_connected', False)
self.cbas_dataset_name_invalid = self.input.param('cbas_dataset_name_invalid', self.cbas_dataset_name)
self.skip_drop_connection = self.input.param('skip_drop_connection',False)
self.skip_drop_dataset = self.input.param('skip_drop_dataset', False)
self.query_id = self.input.param('query_id',None)
self.mode = self.input.param('mode',None)
self.num_concurrent_queries = self.input.param('num_queries', 5000)
self.concurrent_batch_size = self.input.param('concurrent_batch_size', 100)
self.compiler_param = self.input.param('compiler_param', None)
self.compiler_param_val = self.input.param('compiler_param_val', None)
self.expect_reject = self.input.param('expect_reject', False)
self.expect_failure = self.input.param('expect_failure', False)
self.index_name = self.input.param('index_name', None)
self.index_fields = self.input.param('index_fields', None)
if self.index_fields:
self.index_fields = self.index_fields.split("-")
self.otpNodes = []
self.rest = RestConnection(self.master)
self.log.info("Setting the min possible memory quota so that adding mode nodes to the cluster wouldn't be a problem.")
self.rest.set_service_memoryQuota(service='memoryQuota', memoryQuota=MIN_KV_QUOTA)
self.rest.set_service_memoryQuota(service='ftsMemoryQuota', memoryQuota=FTS_QUOTA)
self.rest.set_service_memoryQuota(service='indexMemoryQuota', memoryQuota=INDEX_QUOTA)
self.rest.set_service_memoryQuota(service='cbasMemoryQuota', memoryQuota=CBAS_QUOTA)
# Drop any existing buckets and datasets
if self.cbas_node:
self.cleanup_cbas()
if not self.cbas_node and len(self.cbas_servers)>=1:
self.cbas_node = self.cbas_servers[0]
if "cbas" in self.master.services:
self.cleanup_cbas()
if add_defualt_cbas_node:
if self.master.ip != self.cbas_node.ip:
self.otpNodes.append(self.add_node(self.cbas_node))
else:
self.otpNodes = self.rest.node_statuses()
''' This cbas cleanup is actually not needed.
When a node is added to the cluster, it is automatically cleaned-up.'''
self.cleanup_cbas()
self.cbas_servers.remove(self.cbas_node)
self.log.info("============== CBAS_BASE setup was finished for test #{0} {1} ==============" \
.format(self.case_number, self._testMethodName))
def create_default_bucket(self):
node_info = self.rest.get_nodes_self()
if node_info.memoryQuota and int(node_info.memoryQuota) > 0 :
ram_available = node_info.memoryQuota
self.bucket_size = ram_available - 1
default_params=self._create_bucket_params(server=self.master, size=self.bucket_size,
replicas=self.num_replicas, bucket_type=self.bucket_type,
enable_replica_index=self.enable_replica_index,
eviction_policy=self.eviction_policy, lww=self.lww)
self.cluster.create_default_bucket(default_params)
self.buckets.append(Bucket(name="default", authType="sasl", saslPassword="",
num_replicas=self.num_replicas, bucket_size=self.bucket_size,
eviction_policy=self.eviction_policy, lww=self.lww,
type=self.bucket_type))
if self.enable_time_sync:
self._set_time_sync_on_buckets( ['default'] )
#.........这里部分代码省略.........