本文整理汇总了Python中membase.api.rest_client.RestConnection.get_nodes方法的典型用法代码示例。如果您正苦于以下问题:Python RestConnection.get_nodes方法的具体用法?Python RestConnection.get_nodes怎么用?Python RestConnection.get_nodes使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类membase.api.rest_client.RestConnection
的用法示例。
在下文中一共展示了RestConnection.get_nodes方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: direct_mc_bin_client
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_nodes [as 别名]
def direct_mc_bin_client(self, server, bucket, timeout=30):
# USE MC BIN CLIENT WHEN NOT USING SDK CLIENT
rest = RestConnection(server)
node = None
try:
node = rest.get_nodes_self()
except ValueError as e:
self.log.info("could not connect to server {0}, will try scanning all nodes".format(server))
if not node:
nodes = rest.get_nodes()
for n in nodes:
if n.ip == server.ip and n.port == server.port:
node = n
if isinstance(server, dict):
self.log.info("dict:{0}".format(server))
self.log.info("creating direct client {0}:{1} {2}".format(server["ip"], node.memcached, bucket))
else:
self.log.info("creating direct client {0}:{1} {2}".format(server.ip, node.memcached, bucket))
RestHelper(rest).vbucket_map_ready(bucket, 60)
vBuckets = RestConnection(server).get_vbuckets(bucket)
if isinstance(server, dict):
client = MemcachedClient(server["ip"], node.memcached, timeout=timeout)
else:
client = MemcachedClient(server.ip, node.memcached, timeout=timeout)
if vBuckets != None:
client.vbucket_count = len(vBuckets)
else:
client.vbucket_count = 0
bucket_info = rest.get_bucket(bucket)
return client
示例2: persistence_verification
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_nodes [as 别名]
def persistence_verification(servers, bucket, timeout_in_seconds=1260):
log = logger.Logger.get_logger()
verification_threads = []
queue = Queue.Queue()
rest = RestConnection(servers[0])
nodes = rest.get_nodes()
nodes_ip = []
for node in nodes:
nodes_ip.append(node.ip)
for i in range(len(servers)):
if servers[i].ip in nodes_ip:
log.info("Server {0}:{1} part of cluster".format(
servers[i].ip, servers[i].port))
rest = RestConnection(servers[i])
t = Thread(target=ClusterOperationHelper.persistence_verification_per_node,
name="verification-thread-{0}".format(servers[i]),
args=(rest, bucket, queue, timeout_in_seconds))
verification_threads.append(t)
for t in verification_threads:
t.start()
for t in verification_threads:
t.join()
log.info("thread {0} finished".format(t.name))
while not queue.empty():
item = queue.get()
if item is False:
return False
return True
示例3: failover
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_nodes [as 别名]
def failover(self, howmany):
#chekck if all nodes are part of the cluster
rest = RestConnection(self.servers[0])
nodes = rest.node_statuses()
if len(nodes) != len(self.servers):
self.test.fail(num_nodes_mismatch.format(len(self.servers), len(nodes)))
if len(nodes) - howmany < 2:
self.test.fail(num_nodes_mismatch.format(len(nodes), howmany))
master_node = rest.get_nodes_self()
#when selecting make sure we dont pick the master node
selection = [n for n in nodes if n.id != master_node.id]
shuffle(selection)
failed = selection[0:howmany]
for f in failed:
self.log.info("will fail over node : {0}".format(f.id))
if len(nodes) / (1 + howmany) >= 1:
self.test.assertTrue(RebalanceHelper.wait_for_replication(rest.get_nodes(), timeout=900),
msg="replication did not finish after 15 minutes")
for f in failed:
self._stop_server(f)
self.log.info("10 seconds delay to wait for membase-server to shutdown")
#wait for 5 minutes until node is down
for f in failed:
if f.port == 8091:
self.test.assertTrue(RestHelper(rest).wait_for_node_status(f, "unhealthy", 300),
msg="node status is not unhealthy even after waiting for 5 minutes")
self.test.assertTrue(rest.fail_over(f.id), msg="failover did not complete")
self.log.info("failed over node : {0}".format(f.id))
return failed
示例4: verify_items_count
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_nodes [as 别名]
def verify_items_count(master, bucket):
#get the #of buckets from rest
rest = RestConnection(master)
bucket_info = rest.get_bucket(bucket)
replica_factor = bucket_info.numReplicas
#print out vb_pending_num,vb_active_num,vb_replica_num as well
master_stats = rest.get_bucket_stats(bucket)
vbucket_active_sum = 0
vbucket_replica_sum = 0
vbucket_pending_sum = 0
all_server_stats = []
stats_received = 0
nodes = rest.get_nodes()
for server in nodes:
#get the stats
server_stats = rest.get_bucket_stats_for_node(bucket, server)
if not server_stats:
log.info("unable to get stats from {0}:{1}".format(server.ip, server.port))
else:
stats_received += 1
all_server_stats.append((server, server_stats))
if not stats_received:
raise StatsUnavailableException()
sum = 0
for server, single_stats in all_server_stats:
if not single_stats or "curr_items" not in single_stats:
continue
sum += single_stats["curr_items"]
log.info("curr_items from {0}:{1} : {2}".format(server.ip, server.port, single_stats["curr_items"]))
if 'vb_pending_num' in single_stats:
vbucket_pending_sum += single_stats['vb_pending_num']
log.info(
"vb_pending_num from {0}:{1} : {2}".format(server.ip, server.port, single_stats["vb_pending_num"]))
if 'vb_active_num' in single_stats:
vbucket_active_sum += single_stats['vb_active_num']
log.info(
"vb_active_num from {0}:{1} : {2}".format(server.ip, server.port, single_stats["vb_active_num"]))
if 'vb_replica_num' in single_stats:
vbucket_replica_sum += single_stats['vb_replica_num']
log.info(
"vb_replica_num from {0}:{1} : {2}".format(server.ip, server.port, single_stats["vb_replica_num"]))
msg = "summation of vb_active_num : {0} vb_pending_num : {1} vb_replica_num : {2}"
log.info(msg.format(vbucket_active_sum, vbucket_pending_sum, vbucket_replica_sum))
msg = 'sum : {0} and sum * replica_factor ({1}) : {2}'
log.info(msg.format(sum, replica_factor, (sum * (replica_factor + 1))))
log.info('master_stats : {0}'.format(master_stats["curr_items_tot"]))
delta = sum * (replica_factor + 1) - master_stats["curr_items_tot"]
delta = abs(delta)
if sum > 0:
missing_percentage = delta * 1.0 / sum * (replica_factor + 1)
else:
missing_percentage = 100
log.info("delta : {0} missing_percentage : {1} replica_factor : {2}".format(delta,missing_percentage,replica_factor))
if replica_factor > 1:
if delta == 0 or missing_percentage < 0.005:
return True
return False
else:
return (sum * (replica_factor + 1)) == master_stats["curr_items_tot"]
示例5: direct_client
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_nodes [as 别名]
def direct_client(server, bucket, timeout=30):
log = logger.Logger.get_logger()
rest = RestConnection(server)
node = None
try:
node = rest.get_nodes_self()
except ValueError as e:
log.info("could not connect to server {0}, will try scanning all nodes".format(server))
if not node:
nodes = rest.get_nodes()
for n in nodes:
if n.ip == server.ip and n.port == server.port:
node = n
if isinstance(server, dict):
log.info("dict:{0}".format(server))
log.info("creating direct client {0}:{1} {2}".format(server["ip"], node.memcached, bucket))
else:
log.info("creating direct client {0}:{1} {2}".format(server.ip, node.memcached, bucket))
RestHelper(rest).vbucket_map_ready(bucket, 60)
vBuckets = RestConnection(server).get_vbuckets(bucket)
if isinstance(server, dict):
client = MemcachedClient(server["ip"], node.memcached, timeout=timeout)
else:
client = MemcachedClient(server.ip, node.memcached, timeout=timeout)
client.vbucket_count = len(vBuckets)
bucket_info = rest.get_bucket(bucket)
# todo raise exception for not bucket_info
client.sasl_auth_plain(bucket_info.name.encode("ascii"), bucket_info.saslPassword.encode("ascii"))
return client
示例6: flushctl_set
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_nodes [as 别名]
def flushctl_set(master, key, val, bucket='default'):
rest = RestConnection(master)
servers = rest.get_nodes()
for server in servers:
_server = {"ip": server.ip, "port": server.port,
"username": master.rest_username,
"password": master.rest_password}
ClusterOperationHelper.flushctl_set_per_node(_server, key, val, bucket)
示例7: wait_for_vbuckets_ready_state
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_nodes [as 别名]
def wait_for_vbuckets_ready_state(node, bucket, timeout_in_seconds=300, log_msg=''):
log = logger.Logger.get_logger()
start_time = time.time()
end_time = start_time + timeout_in_seconds
ready_vbuckets = {}
rest = RestConnection(node)
servers = rest.get_nodes()
RestHelper(rest).vbucket_map_ready(bucket, 60)
vbucket_count = len(rest.get_vbuckets(bucket))
vbuckets = rest.get_vbuckets(bucket)
obj = VBucketAwareMemcached(rest, bucket)
memcacheds, vbucket_map, vbucket_map_replica = obj.request_map(rest, bucket)
#Create dictionary with key:"ip:port" and value: a list of vbuckets
server_dict = defaultdict(list)
for everyID in range(0, vbucket_count):
memcached_ip_port = str(vbucket_map[everyID])
server_dict[memcached_ip_port].append(everyID)
while time.time() < end_time and len(ready_vbuckets) < vbucket_count:
for every_ip_port in server_dict:
#Retrieve memcached ip and port
ip, port = every_ip_port.split(":")
client = MemcachedClient(ip, int(port), timeout=30)
client.vbucket_count = len(vbuckets)
bucket_info = rest.get_bucket(bucket)
client.sasl_auth_plain(bucket_info.name.encode('ascii'),
bucket_info.saslPassword.encode('ascii'))
for i in server_dict[every_ip_port]:
try:
(a, b, c) = client.get_vbucket_state(i)
except mc_bin_client.MemcachedError as e:
ex_msg = str(e)
if "Not my vbucket" in log_msg:
log_msg = log_msg[:log_msg.find("vBucketMap") + 12] + "..."
if e.status == memcacheConstants.ERR_NOT_MY_VBUCKET:
# May receive this while waiting for vbuckets, continue and retry...S
continue
log.error("%s: %s" % (log_msg, ex_msg))
continue
except exceptions.EOFError:
# The client was disconnected for some reason. This can
# happen just after the bucket REST API is returned (before
# the buckets are created in each of the memcached processes.)
# See here for some details: http://review.couchbase.org/#/c/49781/
# Longer term when we don't disconnect clients in this state we
# should probably remove this code.
log.error("got disconnected from the server, reconnecting")
client.reconnect()
client.sasl_auth_plain(bucket_info.name.encode('ascii'),
bucket_info.saslPassword.encode('ascii'))
continue
if c.find("\x01") > 0 or c.find("\x02") > 0:
ready_vbuckets[i] = True
elif i in ready_vbuckets:
log.warning("vbucket state changed from active to {0}".format(c))
del ready_vbuckets[i]
client.close()
return len(ready_vbuckets) == vbucket_count
示例8: test_start_collect_log
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_nodes [as 别名]
def test_start_collect_log(self):
rest = RestConnection(self.master)
shell = RemoteMachineShellConnection(self.master)
""" add service nodes to cluster. Test will add with or without data service in """
if self.add_services:
self.add_services = [x.replace(":", ",") or x for x in self.add_services]
if len(self.servers) > 1:
nodes = rest.get_nodes()
cwc_servers = copy.deepcopy(self.servers)
if len(self.add_services) == 1:
self._add_service_node(cwc_servers, len(self.add_services),
services=self.add_services)
elif len(self.add_services) > 1 and \
(len(nodes) + len(self.add_services)) <= len(cwc_servers):
for service in self.add_services:
nodes = rest.get_nodes()
self._add_service_node(cwc_servers, len(self.add_services),
services=[service])
elif (len(nodes) + len(self.add_services)) > len(cwc_servers):
self.fail("Not enough servers to add services nodes")
else:
self.log.error("Need more than 2 servers to run this test")
if "*" not in str(self.collect_nodes) and self.nodes_init > 1:
self.collect_nodes = self._generate_random_collecting_node(rest)
status, content = rest.start_cluster_logs_collection(nodes=self.collect_nodes, \
upload=self.upload, uploadHost=self.uploadHost, \
customer=self.customer, ticket=self.ticket)
if status:
collected, uploaded, cancel_collect = \
self._monitor_collecting_log(rest, timeout=1200)
if collected:
self._verify_log_file(rest)
if self.upload and uploaded:
self._verify_log_uploaded(rest)
if self.cancel_collect:
if cancel_collect:
self.log.info("Logs collection were cancelled")
else:
self.fail("Failed to cancel log collection")
shell.disconnect()
else:
self.fail("ERROR: {0}".format(content))
示例9: test_warmup_with_expiration
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_nodes [as 别名]
def test_warmup_with_expiration(self):
self.num_items = self.input.param("items", 1000)
expiry = self.input.param("expiry", 120)
self._load_doc_data_all_buckets('create', expiry=expiry)
#wait for draining of data before restart and warm up
rest = RestConnection(self.servers[0])
self.servers = rest.get_nodes()
self._wait_for_stats_all_buckets(self.servers)
self._stats_befor_warmup()
time.sleep(120)
self._restart_memcache()
self._warmup()
示例10: get_checkpoints_from_cluster
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_nodes [as 别名]
def get_checkpoints_from_cluster(self, master, bucket):
parser = CheckpointStatParser()
rest = RestConnection(master)
servers = rest.get_nodes()
merged = {}
for server in servers:
mc = MemcachedClientHelper.direct_client(server, bucket)
per_node_checkpoint = mc.stats("checkpoint")
parsed = parser.parse_output(per_node_checkpoint, server.id)
merged = parser.merge_results(merged, parsed)
mc.close()
return merged
示例11: test_observe_basic_data_load_delete
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_nodes [as 别名]
def test_observe_basic_data_load_delete(self):
tasks = []
self._load_doc_data_all_buckets('create', 0, self.num_items)
rest = RestConnection(self.servers[0])
self.servers = rest.get_nodes()
query_set = "true"
#Persist all the loaded data item
self._wait_for_stats_all_buckets(self.servers)
self.cluster.create_view(
self.master, self.default_design_doc, self.default_view,
self.default_bucket_name, self.wait_timeout * 2)
client = MemcachedClientHelper.direct_client(self.servers[0], self.default_bucket_name)
observe_with = self.input.param("observe_with", "")
keys = ["observe%s" % (i) for i in range(0, self.num_items)]
for key in keys:
opaque, rep_time, persist_time, persisted = client.observe(key)
self.log.info("##########key:-%s################" % (key))
self.log.info("Persisted:- %s" % (persisted))
self.log.info("Persist_Time:- %s" % (rep_time))
query = {"stale" : "false", "full_set" : "true", "connection_timeout" : 60000}
self.cluster.query_view(self.master, "dev_Doc1", self.default_view.name, query, 10000, self.default_bucket_name)
self.log.info("Observe Validation:- view: %s in design doc dev_Doc1 and in bucket %s" % (self.default_view, self.default_bucket_name))
# check whether observe has to run with delete and delete parallel with observe or not
if len (observe_with) > 0 :
if observe_with == "delete" :
self.log.info("Deleting 0- %s number of items" % (self.num_items / 2))
self._load_doc_data_all_buckets('delete', 0, self.num_items / 2)
query_set = "true"
elif observe_with == "delete_parallel":
self.log.info("Deleting Parallel 0- %s number of items" % (self.num_items / 2))
tasks = self._async_load_doc_data_all_buckets('delete', 0, self.num_items / 2)
query_set = "false"
for key in keys:
opaque, rep_time, persist_time, persisted = client.observe(key)
self.log.info("##########key:-%s################" % (key))
self.log.info("Persisted:- %s" % (persisted))
self.log.info("Persist_Time:- %s" % (rep_time))
for task in tasks:
task.result()
#verify the persistence of data by querying view
stale = self.input.param("stale", "ok")
if stale == "ok" :
query = {"stale" : "ok", "full_set" : query_set, "connection_timeout" : 60000}
self.cluster.query_view(self.master, "dev_Doc1", self.default_view.name, query, 10000, self.default_bucket_name)
query = {"stale" : "false", "full_set" : query_set, "connection_timeout" : 60000}
self.cluster.query_view(self.master, "dev_Doc1", self.default_view.name, query, 10000, self.default_bucket_name)
self.log.info("Observe Validation:- view: %s in design doc dev_Doc1 and in bucket %s" % (self.default_view, self.default_bucket_name))
示例12: set_expiry_pager_sleep_time
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_nodes [as 别名]
def set_expiry_pager_sleep_time(master, bucket, value=30):
log = logger.Logger.get_logger()
rest = RestConnection(master)
servers = rest.get_nodes()
for server in servers:
#this is not bucket specific so no need to pass in the bucketname
log.info("connecting to memcached {0}:{1}".format(server.ip, server.memcached))
mc = MemcachedClientHelper.direct_client(server, bucket)
log.info("Set exp_pager_stime flush param on server {0}:{1}".format(server.ip, server.port))
try:
mc.set_flush_param("exp_pager_stime", str(value))
log.info("Set exp_pager_stime flush param on server {0}:{1}".format(server.ip, server.port))
except Exception as ex:
traceback.print_exc()
log.error("Unable to set exp_pager_stime flush param on memcached {0}:{1}".format(server.ip, server.memcached))
示例13: verification_phase
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_nodes [as 别名]
def verification_phase(test, master):
# Stop loaders
SwapRebalanceBase.stop_load(test.loaders)
test.log.info("DONE DATA ACCESS PHASE")
test.log.info("VERIFICATION PHASE")
rest = RestConnection(master)
servers_in_cluster = []
nodes = rest.get_nodes()
for server in test.servers:
for node in nodes:
if node.ip == server.ip:
servers_in_cluster.append(server)
RebalanceHelper.wait_for_replication(servers_in_cluster, test.cluster_helper)
SwapRebalanceBase.items_verification(test, master)
示例14: verify_es_results
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_nodes [as 别名]
def verify_es_results(self, verification_count=10000):
xd_ref = self.xd_ref
rest = RestConnection(self.src_nodes[0])
# Checking replication at destination clusters
dest_key_index = 1
for key in xd_ref.ord_keys[1:]:
if dest_key_index == xd_ref.ord_keys_len:
break
dest_key = xd_ref.ord_keys[dest_key_index]
dest_nodes = xd_ref._clusters_dic[dest_key]
src_nodes = rest.get_nodes()
self.verify_es_stats(src_nodes, dest_nodes, verification_count)
dest_key_index += 1
示例15: wait_for_stats_on_all
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_nodes [as 别名]
def wait_for_stats_on_all(master, bucket, stat_key, stat_value, timeout_in_seconds=120,
fn=None):
fn = fn or RebalanceHelper.wait_for_stats
rest = RestConnection(master)
servers = rest.get_nodes()
verified = False
start_time = time.time()
for server in servers:
verified = fn(server, bucket, stat_key, stat_value, \
timeout_in_seconds=timeout_in_seconds)
if not verified:
log.info("bucket {0}: stat_key {1} for server {2} timed out in {3}".format(bucket, stat_key, \
server.ip, time.time()-start_time))
break
return verified