本文整理汇总了Python中membase.api.rest_client.RestConnection.node_statuses方法的典型用法代码示例。如果您正苦于以下问题:Python RestConnection.node_statuses方法的具体用法?Python RestConnection.node_statuses怎么用?Python RestConnection.node_statuses使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类membase.api.rest_client.RestConnection
的用法示例。
在下文中一共展示了RestConnection.node_statuses方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_failover_add_back
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import node_statuses [as 别名]
def test_failover_add_back(self):
try:
self.run_async_data()
rest = RestConnection(self.master)
recoveryType = self.input.param("recoveryType", "full")
servr_out = self.nodes_out_list
nodes_all = rest.node_statuses()
failover_task =self.cluster.async_failover([self.master],
failover_nodes = servr_out, graceful=self.graceful)
failover_task.result()
nodes_all = rest.node_statuses()
nodes = []
if servr_out[0].ip == "127.0.0.1":
for failover_node in servr_out:
nodes.extend([node for node in nodes_all
if (str(node.port) == failover_node.port)])
else:
for failover_node in servr_out:
nodes.extend([node for node in nodes_all
if node.ip == failover_node.ip])
for node in nodes:
self.log.info(node)
rest.add_back_node(node.id)
rest.set_recovery_type(otpNode=node.id, recoveryType=recoveryType)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [])
self.run_mutation_operations_for_situational_tests()
self.sleep(120, "Wait for rebalance")
for t in self.load_thread_list:
if t.is_alive():
if t != None:
t.signal = False
except Exception, ex:
raise
示例2: test_failover_add_back
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import node_statuses [as 别名]
def test_failover_add_back(self):
try:
rest = RestConnection(self.master)
recoveryType = self.input.param("recoveryType", "full")
servr_out = self.nodes_out_list
nodes_all = rest.node_statuses()
tasks = self.async_check_and_run_operations(buckets=self.buckets, before=True)
for task in tasks:
task.result()
failover_task = self.cluster.async_failover([self.master], failover_nodes=servr_out, graceful=self.graceful)
failover_task.result()
nodes_all = rest.node_statuses()
nodes = []
if servr_out[0].ip == "127.0.0.1":
for failover_node in servr_out:
nodes.extend([node for node in nodes_all if (str(node.port) == failover_node.port)])
else:
for failover_node in servr_out:
nodes.extend([node for node in nodes_all if node.ip == failover_node.ip])
for node in nodes:
self.log.info(node)
rest.add_back_node(node.id)
rest.set_recovery_type(otpNode=node.id, recoveryType=recoveryType)
rebalance = self.cluster.async_rebalance(self.servers[: self.nodes_init], [], [])
self._run_aync_tasks()
rebalance.result()
self.run_after_operations()
except Exception, ex:
raise
示例3: rebalance_in
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import node_statuses [as 别名]
def rebalance_in(self, how_many):
rest = RestConnection(self._servers[0])
nodes = rest.node_statuses()
#choose how_many nodes from self._servers which are not part of
# nodes
nodeIps = [node.ip for node in nodes]
self.log.info("current nodes : {0}".format(nodeIps))
toBeAdded = []
selection = self._servers[1:]
shuffle(selection)
for server in selection:
if not server.ip in nodeIps:
toBeAdded.append(server)
if len(toBeAdded) == how_many:
break
for server in toBeAdded:
rest.add_node('Administrator', 'password', server.ip)
#check if its added ?
nodes = rest.node_statuses()
otpNodes = [node.id for node in nodes]
started = rest.rebalance(otpNodes, [])
msg = "rebalance operation started ? {0}"
self.log.info(msg.format(started))
if started:
result = rest.monitorRebalance()
msg = "successfully rebalanced out selected nodes from the cluster ? {0}"
self.log.info(msg.format(result))
return result
return False
示例4: common_test_body
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import node_statuses [as 别名]
def common_test_body(self, replica, load_ratio, timeout=10):
log = logger.Logger.get_logger()
start_time = time.time()
log.info("replica : {0}".format(replica))
log.info("load_ratio : {0}".format(load_ratio))
master = self._servers[0]
log.info('picking server : {0} as the master'.format(master))
rest = RestConnection(master)
while time.time() < ( start_time + 60 * timeout):
#rebalance out step nodes
#let's add some items ?
nodes = rest.node_statuses()
delta = len(self._servers) - len(nodes)
if delta > 0:
if delta > 1:
how_many_add = Random().randint(1, delta)
else:
how_many_add = 1
self.log.info("going to add {0} nodes".format(how_many_add))
self.rebalance_in(how_many=how_many_add)
else:
self.log.info("all nodes already joined the cluster")
time.sleep(30 * 60)
#dont rebalance out if there are not too many nodes
if len(nodes) >= (3.0 / 4.0 * len(self._servers)):
nodes = rest.node_statuses()
how_many_out = Random().randint(1, len(nodes) - 1)
self.log.info("going to remove {0} nodes".format(how_many_out))
self.rebalance_out(how_many=how_many_out)
示例5: test_failover_add_back
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import node_statuses [as 别名]
def test_failover_add_back(self):
try:
rest = RestConnection(self.master)
recoveryType = self.input.param("recoveryType", "full")
servr_out = self.nodes_out_list
nodes_all = rest.node_statuses()
self._run_initial_index_tasks()
failover_task =self.cluster.async_failover([self.master],
failover_nodes = servr_out, graceful=self.graceful)
failover_task.result()
kvOps_tasks = self._run_kvops_tasks()
before_index_ops = self._run_before_index_tasks()
nodes_all = rest.node_statuses()
nodes = []
if servr_out[0].ip == "127.0.0.1":
for failover_node in servr_out:
nodes.extend([node for node in nodes_all
if (str(node.port) == failover_node.port)])
else:
for failover_node in servr_out:
nodes.extend([node for node in nodes_all
if node.ip == failover_node.ip])
for node in nodes:
self.log.info(node)
rest.add_back_node(node.id)
rest.set_recovery_type(otpNode=node.id, recoveryType=recoveryType)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [])
in_between_index_ops = self._run_in_between_tasks()
rebalance.result()
self._run_tasks([kvOps_tasks, before_index_ops, in_between_index_ops])
self._run_after_index_tasks()
except Exception, ex:
raise
示例6: rebalance_in
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import node_statuses [as 别名]
def rebalance_in(servers, how_many, do_shuffle=True, monitor=True):
servers_rebalanced = []
log = logger.Logger.get_logger()
rest = RestConnection(servers[0])
nodes = rest.node_statuses()
nodeIps = ["{0}:{1}".format(node.ip,node.port) for node in nodes]
log.info("current nodes : {0}".format(nodeIps))
toBeAdded = []
master = servers[0]
selection = servers[1:]
if do_shuffle:
shuffle(selection)
for server in selection:
if not "{0}:{1}".format(server.ip,server.port) in nodeIps:
toBeAdded.append(server)
servers_rebalanced.append(server)
if len(toBeAdded) == int(how_many):
break
for server in toBeAdded:
otpNode = rest.add_node(master.rest_username, master.rest_password,
server.ip, server.port)
otpNodes = [node.id for node in rest.node_statuses()]
started = rest.rebalance(otpNodes, [])
msg = "rebalance operation started ? {0}"
log.info(msg.format(started))
if monitor is not True:
return True, servers_rebalanced
if started:
result = rest.monitorRebalance()
msg = "successfully rebalanced in selected nodes from the cluster ? {0}"
log.info(msg.format(result))
return result, servers_rebalanced
return False, servers_rebalanced
示例7: test_rebalance_in
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import node_statuses [as 别名]
def test_rebalance_in(self):
log = logger.Logger().get_logger()
master = self._servers[0]
num_of_docs = TestInputSingleton.input.param("num_of_docs", 100000)
replica = TestInputSingleton.input.param("replica", 100000)
add_items_count = TestInputSingleton.input.param("num_of_creates", 30000)
rebalance_in = TestInputSingleton.input.param("rebalance_in", 1)
size = TestInputSingleton.input.param("item_size", 256)
params = {"sizes": [size], "count": num_of_docs, "seed": str(uuid.uuid4())[:7]}
RebalanceBaseTest.common_setup(self._input, self, replica=1)
rest = RestConnection(master)
buckets = rest.get_buckets()
bucket_data = {}
generators = {}
for bucket in buckets:
bucket_data[bucket.name] = {"kv_store": ClientKeyValueStore()}
while len(rest.node_statuses()) < len(self._servers):
for bucket in buckets:
kv_store = bucket_data[bucket.name]["kv_store"]
add_items_seed = str(uuid.uuid4())[:7]
self._add_items(add_items_seed, bucket, add_items_count, kv_store)
errors = RebalanceDataGenerator.do_verification(kv_store, rest, bucket.name)
if errors:
log.error("verification returned {0} errors".format(len(errors)))
load_set_ops = {"ops": "set", "bucket": bucket.name}
load_set_ops.update(params)
load_delete_ops = {
"ops": "delete",
"bucket": bucket.name,
"sizes": [size],
"count": add_items_count / 5,
"seed": add_items_seed,
}
thread = RebalanceDataGenerator.start_load(
rest, bucket.name, RebalanceDataGenerator.create_loading_tasks(load_set_ops), kv_store
)
generators["set"] = {"thread": thread}
# restart three times
generators["set"]["thread"].start()
thread = RebalanceDataGenerator.start_load(
rest, bucket.name, RebalanceDataGenerator.create_loading_tasks(load_delete_ops), kv_store
)
generators["delete"] = {"thread": thread}
generators["delete"]["thread"].start()
self.log.info("current nodes : {0}".format([node.id for node in rest.node_statuses()]))
rebalanced_in, which_servers = RebalanceBaseTest.rebalance_in(self._servers, rebalance_in)
self.assertTrue(rebalanced_in, msg="unable to add and rebalance more nodes")
for bucket in buckets:
kv_store = bucket_data[bucket.name]["kv_store"]
errors = RebalanceDataGenerator.do_verification(kv_store, rest, bucket.name)
if errors:
log.error("verification returned {0} errors".format(len(errors)))
generators["set"]["thread"].join()
generators["delete"]["thread"].join()
for bucket in buckets:
kv_store = bucket_data[bucket.name]["kv_store"]
bucket_data[bucket.name]["items_inserted_count"] = len(kv_store.valid_items())
RebalanceBaseTest.replication_verification(master, bucket_data, replica, self)
示例8: common_test_body
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import node_statuses [as 别名]
def common_test_body(self, replica, load_ratio, timeout=10):
log = logger.Logger.get_logger()
start_time = time.time()
log.info("replica : {0}".format(replica))
log.info("load_ratio : {0}".format(load_ratio))
master = self._servers[0]
log.info('picking server : {0} as the master'.format(master))
rest = RestConnection(master)
info = rest.get_nodes_self()
rest.init_cluster(username=master.rest_username,
password=master.rest_password)
rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
bucket_ram = info.mcdMemoryReserved * 2 / 3
rest.create_bucket(bucket='default',
ramQuotaMB=bucket_ram,
replicaNumber=replica,
proxyPort=11211)
json_bucket = {'name': 'default', 'port': 11211, 'password': ''}
BucketOperationHelper.wait_for_memcached(master, json_bucket)
log.info("inserting some items in the master before adding any nodes")
distribution = {1024: 0.4, 2 * 1024: 0.5, 512: 0.1}
threads = MemcachedClientHelper.create_threads(servers=[master],
value_size_distribution=distribution,
number_of_threads=len(self._servers),
number_of_items=400000000,
moxi=False,
write_only=True,
async_write=True)
for thread in threads:
thread.terminate_in_minutes = 24 * 60
thread.start()
while time.time() < ( start_time + 60 * timeout):
#rebalance out step nodes
#let's add some items ?
nodes = rest.node_statuses()
delta = len(self._servers) - len(nodes)
if delta > 0:
if delta > 1:
how_many_add = Random().randint(1, delta)
else:
how_many_add = 1
self.log.info("going to add {0} nodes".format(how_many_add))
self.rebalance_in(how_many=how_many_add)
else:
self.log.info("all nodes already joined the clustr")
time.sleep(240)
RestHelper(rest).wait_for_replication(600)
#dont rebalance out if there are not too many nodes
if len(nodes) >= (3.0 / 4.0 * len(self._servers)):
nodes = rest.node_statuses()
how_many_out = Random().randint(1, len(nodes) - 1)
self.log.info("going to remove {0} nodes".format(how_many_out))
self.rebalance_out(how_many=how_many_out)
for t in threads:
t.aborted = True
t.join()
示例9: rebalance_in
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import node_statuses [as 别名]
def rebalance_in(servers, how_many, do_shuffle=True, monitor=True, do_check=True):
servers_rebalanced = []
log = logger.Logger.get_logger()
rest = RestConnection(servers[0])
nodes = rest.node_statuses()
#are all ips the same
nodes_on_same_ip = True
firstIp = nodes[0].ip
if len(nodes) == 1:
nodes_on_same_ip = False
else:
for node in nodes:
if node.ip != firstIp:
nodes_on_same_ip = False
break
nodeIps = ["{0}:{1}".format(node.ip,node.port) for node in nodes]
log.info("current nodes : {0}".format(nodeIps))
toBeAdded = []
master = servers[0]
selection = servers[1:]
if do_shuffle:
shuffle(selection)
for server in selection:
if nodes_on_same_ip:
if not "{0}:{1}".format(firstIp,server.port) in nodeIps:
toBeAdded.append(server)
servers_rebalanced.append(server)
log.info("choosing {0}:{1}".format(server.ip, server.port))
elif not "{0}:{1}".format(server.ip,server.port) in nodeIps:
toBeAdded.append(server)
servers_rebalanced.append(server)
log.info("choosing {0}:{1}".format(server.ip, server.port))
if len(toBeAdded) == int(how_many):
break
if do_check and len(toBeAdded) < how_many:
raise Exception("unable to find {0} nodes to rebalance_in".format(how_many))
for server in toBeAdded:
otpNode = rest.add_node(master.rest_username, master.rest_password,
server.ip, server.port)
otpNodes = [node.id for node in rest.node_statuses()]
started = rest.rebalance(otpNodes, [])
msg = "rebalance operation started ? {0}"
log.info(msg.format(started))
if monitor is not True:
return True, servers_rebalanced
if started:
try:
result = rest.monitorRebalance()
except RebalanceFailedException as e:
log.error("rebalance failed: {0}".format(e))
return False, servers_rebalanced
msg = "successfully rebalanced in selected nodes from the cluster ? {0}"
log.info(msg.format(result))
return result, servers_rebalanced
return False, servers_rebalanced
示例10: _common_test_body
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import node_statuses [as 别名]
def _common_test_body(self):
master = self.servers[0]
rest = RestConnection(master)
creds = self.input.membase_settings
rebalanced_servers = [master]
bucket_data = RebalanceBaseTest.bucket_data_init(rest)
self.log.info("INTIAL LOAD")
RebalanceBaseTest.load_all_buckets_task(rest, self.task_manager, bucket_data, self.load_ratio,
keys_count=self.keys_count)
for name in bucket_data:
for thread in bucket_data[name]["threads"]:
bucket_data[name]["items_inserted_count"] += thread.inserted_keys_count()
for server in self.servers[1:]:
self.log.info("current nodes : {0}".format(RebalanceHelper.getOtpNodeIds(master)))
#do this 2 times , start rebalance , failover the node , remove the node and rebalance
for i in range(0, self.num_rebalance):
distribution = RebalanceBaseTest.get_distribution(self.load_ratio)
RebalanceBaseTest.load_data_for_buckets(rest, self.load_ratio, distribution, [master], bucket_data,
self)
self.log.info("adding node {0} and rebalance afterwards".format(server.ip))
otpNode = rest.add_node(creds.rest_username, creds.rest_password, server.ip, server.port)
msg = "unable to add node {0} to the cluster {1}"
self.assertTrue(otpNode, msg.format(server.ip, master.ip))
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()], ejectedNodes=[])
self.assertTrue(rest.monitorRebalance(),
msg="rebalance operation failed after adding node {0}".format(server.ip))
rebalanced_servers.append(server)
RebalanceBaseTest.replication_verification(master, bucket_data, self.replica, self, True)
rest.fail_over(otpNode.id)
self.log.info("failed over {0}".format(otpNode.id))
time.sleep(10)
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()],
ejectedNodes=[otpNode.id])
msg = "rebalance failed while removing failover nodes {0}".format(otpNode.id)
self.assertTrue(rest.monitorRebalance(), msg=msg)
#now verify the numbers again ?
RebalanceBaseTest.replication_verification(master, bucket_data, self.replica, self, True)
#wait 6 minutes
time.sleep(6 * 60)
self.log.info("adding node {0} and rebalance afterwards".format(server.ip))
otpNode = rest.add_node(creds.rest_username, creds.rest_password, server.ip, server.port)
msg = "unable to add node {0} to the cluster {1}"
self.assertTrue(otpNode, msg.format(server.ip, master.ip))
distribution = RebalanceBaseTest.get_distribution(self.load_ratio)
RebalanceBaseTest.load_data_for_buckets(rest, self.load_ratio, distribution, rebalanced_servers, bucket_data, self)
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()], ejectedNodes=[])
self.assertTrue(rest.monitorRebalance(),
msg="rebalance operation failed after adding node {0}".format(server.ip))
rebalanced_servers.append(server)
RebalanceBaseTest.replication_verification(master, bucket_data, self.replica, self, True)
示例11: test_start_stop_rebalance_with_mutations
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import node_statuses [as 别名]
def test_start_stop_rebalance_with_mutations(self):
"""
Start-stop rebalance in/out with adding/removing aditional after stopping rebalance with data mutations
in background.
This test begins by loading a given number of items into the cluster. It then
add servs_in nodes and remove servs_out nodes and start rebalance. Then rebalance
is stopped when its progress reached 20%. After we add extra_nodes_in and remove
extra_nodes_out. Restart rebalance with new cluster configuration. Later rebalance
will be stop/restart on progress 40/60/80%.Before each iteration, we start data mutations
and end the mutations before data validations. After each iteration we wait for
the disk queues to drain, and then verify that there has been no data loss,
sum(curr_items) match the curr_items_total. Once cluster was rebalanced the test is finished.
The oder of add/remove nodes looks like:
self.nodes_init|servs_in|extra_nodes_in|extra_nodes_out|servs_out
"""
rest = RestConnection(self.master)
self._wait_for_stats_all_buckets(self.servs_init)
self.log.info("Current nodes : {0}".format([node.id for node in rest.node_statuses()]))
self.log.info("Adding nodes {0} to cluster".format(self.servs_in))
self.log.info("Removing nodes {0} from cluster".format(self.servs_out))
add_in_once = self.extra_servs_in
result_nodes = set(self.servs_init + self.servs_in) - set(self.servs_out)
# the last iteration will be with i=5, for this case rebalance should be completed,
# that also is verified and tracked
for i in range(1, 6):
if self.withMutationOps:
tasks = self._async_load_all_buckets(self.master, self.gen_update, "update", 0)
if i == 1:
rebalance = self.cluster.async_rebalance(self.servs_init[:self.nodes_init], self.servs_in,
self.servs_out)
else:
rebalance = self.cluster.async_rebalance(self.servs_init[:self.nodes_init] + self.servs_in, add_in_once,
self.servs_out + self.extra_servs_out)
add_in_once = []
result_nodes = set(self.servs_init + self.servs_in + self.extra_servs_in) - set(
self.servs_out + self.extra_servs_out)
self.sleep(20)
expected_progress = 20 * i
reached = RestHelper(rest).rebalance_reached(expected_progress)
self.assertTrue(reached, "Rebalance failed or did not reach {0}%".format(expected_progress))
if not RestHelper(rest).is_cluster_rebalanced():
self.log.info("Stop the rebalance")
stopped = rest.stop_rebalance(wait_timeout=self.wait_timeout / 3)
self.assertTrue(stopped, msg="Unable to stop rebalance")
if self.withMutationOps:
for tasks in tasks:
tasks.result(self.wait_timeout * 20)
self.sleep(5)
rebalance.result()
if RestHelper(rest).is_cluster_rebalanced():
self.verify_cluster_stats(result_nodes)
self.log.info(
"Rebalance was completed when tried to stop rebalance on {0}%".format(str(expected_progress)))
break
else:
self.log.info("Rebalance is still required. Verifying the data in the buckets")
self._verify_all_buckets(self.master, timeout=None, max_verify=self.max_verify, batch_size=1)
self.verify_cluster_stats(result_nodes, check_bucket_stats=False, verify_total_items=False)
self.verify_unacked_bytes_all_buckets()
示例12: backup
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import node_statuses [as 别名]
def backup(self):
while True:
try:
x = self.queue.get_nowait()
self.log.info("get_nowait : {0}".format(x))
break
#things are notmal just do another back aafter
#waiting for self.interval
except Exception:
master = self.servers[0]
rest = RestConnection(master)
nodes = rest.node_statuses()
map = self.node_server_map(nodes, self.servers)
self.log.info("cluster has {0} nodes".format(len(nodes)))
for node in nodes:
try:
from Crypto.Random import atfork
atfork()
BackupHelper(map[node]).backup('default', "/tmp")
BackupHelper(map[node]).backup('default', "/tmp")
except Exception as ex:
print ex
self.log.info("backed up the data into ")
time.sleep(self.interval)
示例13: rebalance_swap
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import node_statuses [as 别名]
def rebalance_swap(servers, how_many, monitor=True):
if how_many < 1:
log.error("failed to swap rebalance %s servers - invalid count" % how_many)
return False, []
rest = RestConnection(servers[0])
cur_nodes = rest.node_statuses()
cur_ips = map(lambda node: node.ip, cur_nodes)
cur_ids = map(lambda node: node.id, cur_nodes)
free_servers = filter(lambda server: server.ip not in cur_ips, servers)
if len(cur_ids) <= how_many or len(free_servers) < how_many:
log.error("failed to swap rebalance %s servers - not enough servers" % how_many)
return False, []
ejections = cur_ids[-how_many:]
additions = free_servers[:how_many]
log.info("swap rebalance: cur: %s, eject: %s, add: %s" % (cur_ids, ejections, additions))
try:
map(
lambda server: rest.add_node(
servers[0].rest_username, servers[0].rest_password, server.ip, server.port
),
additions,
)
except (ServerAlreadyJoinedException, ServerSelfJoinException, AddNodeException), e:
log.error("failed to swap rebalance - addition failed %s: %s" % (additions, e))
return False, []
示例14: get_vBuckets_info
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import node_statuses [as 别名]
def get_vBuckets_info(master):
"""
return state and count items for all vbuckets for each node
format: dict: {u'1node_ip1': {'vb_79': ['replica', '0'], 'vb_78': ['active', '0']..}, u'1node_ip1':....}
"""
rest = RestConnection(master)
port = rest.get_nodes_self().memcached
nodes = rest.node_statuses()
_nodes_stats= {}
for node in nodes:
stat={}
buckets = []
_server = {"ip": node.ip, "port": node.port, "username": master.rest_username,
"password": master.rest_password}
try:
buckets = rest.get_buckets()
mc = MemcachedClient(node.ip, port)
stat_hash = mc.stats("hash")
except Exception:
if not buckets:
log.error("There are not any buckets in {0}:{1} node".format(node.ip, node.port))
else:
log.error("Impossible to get vBucket's information for {0}:{1} node".format(node.ip, node.port))
_nodes_stats[node.ip+":"+str(node.port)]
continue
mc.close()
vb_names=[key[:key.index(":")] for key in stat_hash.keys()]
for name in vb_names:
stat[name]=[stat_hash[name + ":state"], stat_hash[name+":counted"]]
_nodes_stats[node.ip+":"+str(port)] = stat
log.info(_nodes_stats)
return _nodes_stats
示例15: replication_verification
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import node_statuses [as 别名]
def replication_verification(master, bucket, replica, inserted_count, test):
rest = RestConnection(master)
nodes = rest.node_statuses()
if len(nodes) / (1 + replica) >= 1:
final_replication_state = RestHelper(rest).wait_for_replication(900)
msg = "replication state after waiting for up to 15 minutes : {0}"
test.log.info(msg.format(final_replication_state))
# in windows, we need to set timeout_in_seconds to 15+ minutes
test.assertTrue(RebalanceHelper.wait_till_total_numbers_match(master=master,
bucket=bucket,
timeout_in_seconds=1200),
msg="replication was completed but sum(curr_items) dont match the curr_items_total")
start_time = time.time()
stats = rest.get_bucket_stats()
while time.time() < (start_time + 120) and stats["curr_items"] != inserted_count:
test.log.info("curr_items : {0} versus {1}".format(stats["curr_items"], inserted_count))
time.sleep(5)
stats = rest.get_bucket_stats()
RebalanceHelper.print_taps_from_all_nodes(rest, bucket)
test.log.info("curr_items : {0} versus {1}".format(stats["curr_items"], inserted_count))
stats = rest.get_bucket_stats()
msg = "curr_items : {0} is not equal to actual # of keys inserted : {1}"
test.assertEquals(stats["curr_items"], inserted_count,
msg=msg.format(stats["curr_items"], inserted_count))