本文整理汇总了Python中lib.membase.api.rest_client.RestConnection.get_nodes_self方法的典型用法代码示例。如果您正苦于以下问题:Python RestConnection.get_nodes_self方法的具体用法?Python RestConnection.get_nodes_self怎么用?Python RestConnection.get_nodes_self使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类lib.membase.api.rest_client.RestConnection
的用法示例。
在下文中一共展示了RestConnection.get_nodes_self方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: start
# 需要导入模块: from lib.membase.api.rest_client import RestConnection [as 别名]
# 或者: from lib.membase.api.rest_client.RestConnection import get_nodes_self [as 别名]
def start(self, nodes, bucket, pnames, name, client_id='',
collect_server_stats=True, ddoc=None, clusters=None):
"""This function starts collecting stats from all nodes with the given
interval"""
self._task = {"state": "running", "threads": [], "name": name,
"time": time.time(), "ops": [], "totalops": [],
"ops-temp": [], "latency": {}, "data_size_stats": []}
rest = RestConnection(nodes[0])
info = rest.get_nodes_self()
self.data_path = info.storage[0].get_data_path()
self.client_id = str(client_id)
self.nodes = nodes
self.bucket = bucket
if collect_server_stats:
self._task["threads"].append(
Thread(target=self.membase_stats, name="membase")
)
self._task["threads"].append(
Thread(target=self.system_stats, name="system", args=(pnames, ))
)
self._task["threads"].append(
Thread(target=self.iostats, name="iostats")
)
self._task["threads"].append(
Thread(target=self.ns_server_stats, name="ns_server")
)
self._task["threads"].append(
Thread(target=self.get_bucket_size, name="bucket_size")
)
self._task["threads"].append(
Thread(target=self.rebalance_progress, name="rebalance_progress")
)
if ddoc is not None:
self._task["threads"].append(
Thread(target=self.indexing_time_stats, name="index_time", args=(ddoc, ))
)
self._task["threads"].append(
Thread(target=self.indexing_throughput_stats, name="index_thr")
)
if clusters:
self.clusters = clusters
self._task["threads"].append(
Thread(target=self.xdcr_lag_stats, name="xdcr_lag_stats")
)
for thread in self._task["threads"]:
thread.daemon = True
thread.start()
# Getting build/machine stats from only one node in the cluster
self.build_stats(nodes)
self.machine_stats(nodes)
# Start atop
self.start_atop()
示例2: start
# 需要导入模块: from lib.membase.api.rest_client import RestConnection [as 别名]
# 或者: from lib.membase.api.rest_client.RestConnection import get_nodes_self [as 别名]
def start(self, nodes, bucket, pnames, name, frequency, client_id='',
collect_server_stats=True, ddoc=None):
"""This function starts collecting stats from all nodes with the given
frequency"""
self._task = {"state": "running", "threads": [], "name": name,
"time": time.time(), "ops": [], "totalops": [],
"ops-temp": [], "latency": {}, "data_size_stats": []}
rest = RestConnection(nodes[0])
info = rest.get_nodes_self()
self.data_path = info.storage[0].get_data_path()
self.client_id = str(client_id)
self.nodes = nodes
if collect_server_stats:
mbstats_thread = Thread(target=self.membase_stats,
args=(nodes, bucket, 60, self._verbosity))
mbstats_thread.start()
sysstats_thread = Thread(target=self.system_stats,
args=(nodes, pnames, frequency, self._verbosity))
sysstats_thread.start()
iostats_thread = Thread(target=self.iostats,
args=(nodes, 10, self._verbosity))
iostats_thread.start()
ns_server_stats_thread = Thread(target=self.ns_server_stats,
args=(nodes, bucket, 60))
ns_server_stats_thread.start()
bucket_size_thead = Thread(target=self.get_bucket_size,
args=(bucket, nodes, frequency))
bucket_size_thead.start()
self._task["threads"] = [sysstats_thread, ns_server_stats_thread,
bucket_size_thead, mbstats_thread]
if ddoc is not None:
view_stats_thread = Thread(target=self.collect_indexing_stats,
args=(nodes, bucket, ddoc, frequency))
indexing_stats_thread = Thread(target=self.measure_indexing_throughput,
args=(nodes, ))
view_stats_thread.start()
indexing_stats_thread.start()
self._task["threads"].append(view_stats_thread)
self._task["threads"].append(indexing_stats_thread)
# Getting build/machine stats from only one node in the cluster
self.build_stats(nodes)
self.machine_stats(nodes)
# Start atop
self.start_atop()
示例3: CBASBaseTest
# 需要导入模块: from lib.membase.api.rest_client import RestConnection [as 别名]
# 或者: from lib.membase.api.rest_client.RestConnection import get_nodes_self [as 别名]
class CBASBaseTest(BaseTestCase):
def setUp(self, add_defualt_cbas_node = True):
self.log = logger.Logger.get_logger()
if self._testMethodDoc:
self.log.info("\n\nStarting Test: %s \n%s"%(self._testMethodName,self._testMethodDoc))
else:
self.log.info("\n\nStarting Test: %s"%(self._testMethodName))
super(CBASBaseTest, self).setUp()
self.cbas_node = self.input.cbas
self.cbas_servers = []
self.kv_servers = []
for server in self.servers:
if "cbas" in server.services:
self.cbas_servers.append(server)
if "kv" in server.services:
self.kv_servers.append(server)
self.analytics_helper = AnalyticsHelper()
self._cb_cluster = self.cluster
self.travel_sample_docs_count = 31591
self.beer_sample_docs_count = 7303
invalid_ip = '10.111.151.109'
self.cb_bucket_name = self.input.param('cb_bucket_name', 'travel-sample')
self.cbas_bucket_name = self.input.param('cbas_bucket_name', 'travel')
self.cb_bucket_password = self.input.param('cb_bucket_password', None)
self.expected_error = self.input.param("error", None)
if self.expected_error:
self.expected_error = self.expected_error.replace("INVALID_IP",invalid_ip)
self.expected_error = self.expected_error.replace("PORT",self.master.port)
self.cb_server_ip = self.input.param("cb_server_ip", None)
self.cb_server_ip = self.cb_server_ip.replace('INVALID_IP',invalid_ip) if self.cb_server_ip is not None else None
self.cbas_dataset_name = self.input.param("cbas_dataset_name", 'travel_ds')
self.cbas_bucket_name_invalid = self.input.param('cbas_bucket_name_invalid', self.cbas_bucket_name)
self.cbas_dataset2_name = self.input.param('cbas_dataset2_name', None)
self.skip_create_dataset = self.input.param('skip_create_dataset', False)
self.disconnect_if_connected = self.input.param('disconnect_if_connected', False)
self.cbas_dataset_name_invalid = self.input.param('cbas_dataset_name_invalid', self.cbas_dataset_name)
self.skip_drop_connection = self.input.param('skip_drop_connection',False)
self.skip_drop_dataset = self.input.param('skip_drop_dataset', False)
self.query_id = self.input.param('query_id',None)
self.mode = self.input.param('mode',None)
self.num_concurrent_queries = self.input.param('num_queries', 5000)
self.concurrent_batch_size = self.input.param('concurrent_batch_size', 100)
self.compiler_param = self.input.param('compiler_param', None)
self.compiler_param_val = self.input.param('compiler_param_val', None)
self.expect_reject = self.input.param('expect_reject', False)
self.expect_failure = self.input.param('expect_failure', False)
self.index_name = self.input.param('index_name', None)
self.index_fields = self.input.param('index_fields', None)
if self.index_fields:
self.index_fields = self.index_fields.split("-")
self.otpNodes = []
self.rest = RestConnection(self.master)
self.log.info("Setting the min possible memory quota so that adding mode nodes to the cluster wouldn't be a problem.")
self.rest.set_service_memoryQuota(service='memoryQuota', memoryQuota=MIN_KV_QUOTA)
self.rest.set_service_memoryQuota(service='ftsMemoryQuota', memoryQuota=FTS_QUOTA)
self.rest.set_service_memoryQuota(service='indexMemoryQuota', memoryQuota=INDEX_QUOTA)
self.rest.set_service_memoryQuota(service='cbasMemoryQuota', memoryQuota=CBAS_QUOTA)
# Drop any existing buckets and datasets
if self.cbas_node:
self.cleanup_cbas()
if not self.cbas_node and len(self.cbas_servers)>=1:
self.cbas_node = self.cbas_servers[0]
if "cbas" in self.master.services:
self.cleanup_cbas()
if add_defualt_cbas_node:
if self.master.ip != self.cbas_node.ip:
self.otpNodes.append(self.add_node(self.cbas_node))
else:
self.otpNodes = self.rest.node_statuses()
''' This cbas cleanup is actually not needed.
When a node is added to the cluster, it is automatically cleaned-up.'''
self.cleanup_cbas()
self.cbas_servers.remove(self.cbas_node)
self.log.info("============== CBAS_BASE setup was finished for test #{0} {1} ==============" \
.format(self.case_number, self._testMethodName))
def create_default_bucket(self):
node_info = self.rest.get_nodes_self()
if node_info.memoryQuota and int(node_info.memoryQuota) > 0 :
ram_available = node_info.memoryQuota
self.bucket_size = ram_available - 1
default_params=self._create_bucket_params(server=self.master, size=self.bucket_size,
replicas=self.num_replicas, bucket_type=self.bucket_type,
enable_replica_index=self.enable_replica_index,
eviction_policy=self.eviction_policy, lww=self.lww)
self.cluster.create_default_bucket(default_params)
self.buckets.append(Bucket(name="default", authType="sasl", saslPassword="",
num_replicas=self.num_replicas, bucket_size=self.bucket_size,
eviction_policy=self.eviction_policy, lww=self.lww,
type=self.bucket_type))
if self.enable_time_sync:
self._set_time_sync_on_buckets( ['default'] )
#.........这里部分代码省略.........