本文整理汇总了Python中membase.api.rest_client.RestConnection.get_nodes_self方法的典型用法代码示例。如果您正苦于以下问题:Python RestConnection.get_nodes_self方法的具体用法?Python RestConnection.get_nodes_self怎么用?Python RestConnection.get_nodes_self使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类membase.api.rest_client.RestConnection
的用法示例。
在下文中一共展示了RestConnection.get_nodes_self方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: setUp_bucket
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_nodes_self [as 别名]
def setUp_bucket(self, unittest):
self.log = logger.Logger.get_logger()
self.input = TestInputSingleton.input
unittest.assertTrue(self.input, msg="input parameters missing...")
self.test = unittest
self.master = self.input.servers[0]
rest = RestConnection(self.master)
rest.init_cluster(username=self.master.rest_username, password=self.master.rest_password)
rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
ClusterOperationHelper.cleanup_cluster([self.master])
BucketOperationHelper.delete_all_buckets_or_assert([self.master], self.test)
serverInfo = self.master
rest = RestConnection(serverInfo)
info = rest.get_nodes_self()
rest.init_cluster(username=serverInfo.rest_username,
password=serverInfo.rest_password)
rest.init_cluster_memoryQuota(memoryQuota=info.memoryQuota)
# Add built-in user
testuser = [{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'password': 'password'}]
RbacBase().create_user_source(testuser, 'builtin', self.master)
time.sleep(10)
# Assign user to role
role_list = [{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'roles': 'admin'}]
RbacBase().add_user_role(role_list, RestConnection(self.master), 'builtin')
time.sleep(10)
示例2: common_setup
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_nodes_self [as 别名]
def common_setup(self, replica):
self._input = TestInputSingleton.input
self._servers = self._input.servers
first = self._servers[0]
self.log = logger.Logger().get_logger()
self.log.info(self._input)
rest = RestConnection(first)
for server in self._servers:
RestHelper(RestConnection(server)).is_ns_server_running()
ClusterOperationHelper.cleanup_cluster(self._servers)
BucketOperationHelper.delete_all_buckets_or_assert(self._servers, self)
ClusterOperationHelper.add_all_nodes_or_assert(self._servers[0], self._servers, self._input.membase_settings, self)
nodes = rest.node_statuses()
otpNodeIds = []
for node in nodes:
otpNodeIds.append(node.id)
info = rest.get_nodes_self()
bucket_ram = info.mcdMemoryReserved * 3 / 4
rest.create_bucket(bucket="default",
ramQuotaMB=int(bucket_ram),
replicaNumber=replica,
proxyPort=rest.get_nodes_self().moxi)
msg = "wait_for_memcached fails"
ready = BucketOperationHelper.wait_for_memcached(first, "default"),
self.assertTrue(ready, msg)
rebalanceStarted = rest.rebalance(otpNodeIds, [])
self.assertTrue(rebalanceStarted,
"unable to start rebalance on master node {0}".format(first.ip))
self.log.info('started rebalance operation on master node {0}'.format(first.ip))
rebalanceSucceeded = rest.monitorRebalance()
# without a bucket this seems to fail
self.assertTrue(rebalanceSucceeded,
"rebalance operation for nodes: {0} was not successful".format(otpNodeIds))
self.awareness = VBucketAwareMemcached(rest, "default")
示例3: test_max_buckets
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_nodes_self [as 别名]
def test_max_buckets(self):
log = logger.Logger.get_logger()
serverInfo = self.servers[0]
log.info('picking server : {0} as the master'.format(serverInfo))
rest = RestConnection(serverInfo)
proxyPort = rest.get_nodes_self().moxi
info = rest.get_nodes_self()
rest.init_cluster(username=serverInfo.rest_username,
password=serverInfo.rest_password)
rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
bucket_ram = 100
bucket_count = info.mcdMemoryReserved / bucket_ram
for i in range(bucket_count):
bucket_name = 'max_buckets-{0}'.format(uuid.uuid4())
rest.create_bucket(bucket=bucket_name,
ramQuotaMB=bucket_ram,
authType='sasl', proxyPort=proxyPort)
ready = BucketOperationHelper.wait_for_memcached(serverInfo, bucket_name)
self.assertTrue(ready, "wait_for_memcached failed")
buckets = []
try:
buckets = rest.get_buckets()
except Exception:
log.info('15 seconds sleep before calling get_buckets again...')
time.sleep(15)
buckets = rest.get_buckets()
if len(buckets) != bucket_count:
msg = 'tried to create {0} buckets, only created {1}'.format(bucket_count, len(buckets))
log.error(msg)
self.fail(msg=msg)
示例4: common_setup
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_nodes_self [as 别名]
def common_setup(self):
self.cluster_helper = Cluster()
self.log = logger.Logger.get_logger()
self.cluster_run = False
self.input = TestInputSingleton.input
self.servers = self.input.servers
serverInfo = self.servers[0]
rest = RestConnection(serverInfo)
if len(set([server.ip for server in self.servers])) == 1:
ip = rest.get_nodes_self().ip
for server in self.servers:
server.ip = ip
self.cluster_run = True
self.case_number = self.input.param("case_number", 0)
self.replica = self.input.param("replica", 1)
self.keys_count = self.input.param("keys-count", 1000)
self.load_ratio = self.input.param("load-ratio", 1)
self.ratio_expiry = self.input.param("ratio-expiry", 0.03)
self.ratio_deletes = self.input.param("ratio-deletes", 0.13)
self.num_buckets = self.input.param("num-buckets", 1)
self.failover_factor = self.num_swap = self.input.param("num-swap", 1)
self.num_initial_servers = self.input.param("num-initial-servers", 3)
self.fail_orchestrator = self.swap_orchestrator = self.input.param("swap-orchestrator", False)
self.do_access = self.input.param("do-access", True)
self.load_started = False
self.loaders = []
try:
# Clear the state from Previous invalid run
if rest._rebalance_progress_status() == "running":
self.log.warning("rebalancing is still running, previous test should be verified")
stopped = rest.stop_rebalance()
self.assertTrue(stopped, msg="unable to stop rebalance")
self.log.info(
"============== SwapRebalanceBase setup was started for test #{0} {1}==============".format(
self.case_number, self._testMethodName
)
)
SwapRebalanceBase.reset(self)
# Make sure the test is setup correctly
min_servers = int(self.num_initial_servers) + int(self.num_swap)
msg = "minimum {0} nodes required for running swap rebalance"
self.assertTrue(len(self.servers) >= min_servers, msg=msg.format(min_servers))
self.log.info("picking server : {0} as the master".format(serverInfo))
node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
info = rest.get_nodes_self()
rest.init_cluster(username=serverInfo.rest_username, password=serverInfo.rest_password)
rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
if self.num_buckets > 10:
BaseTestCase.change_max_buckets(self, self.num_buckets)
self.log.info(
"============== SwapRebalanceBase setup was finished for test #{0} {1} ==============".format(
self.case_number, self._testMethodName
)
)
SwapRebalanceBase._log_start(self)
except Exception, e:
self.cluster_helper.shutdown()
self.fail(e)
示例5: set_get_test
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_nodes_self [as 别名]
def set_get_test(self, value_size, number_of_items):
fixed_value = MemcachedClientHelper.create_value("S", value_size)
specs = [
("default", 0),
("set-get-bucket-replica-1", 1),
("set-get-bucket-replica-2", 2),
("set-get-bucket-replica-3", 3),
]
serverInfo = self.master
rest = RestConnection(serverInfo)
bucket_ram = int(rest.get_nodes_self().memoryQuota / 4)
mcport = rest.get_nodes_self().memcached
for name, replica in specs:
rest.create_bucket(name, bucket_ram, "sasl", "password", replica, mcport)
bucket_data = {}
buckets = RestConnection(serverInfo).get_buckets()
for bucket in buckets:
bucket_data[bucket.name] = {}
ready = BucketOperationHelper.wait_for_memcached(serverInfo, bucket.name)
self.test.assertTrue(ready, "wait_for_memcached failed")
client = MemcachedClientHelper.direct_client(serverInfo, bucket.name)
inserted = []
rejected = []
while len(inserted) <= number_of_items and len(rejected) <= number_of_items:
try:
key = str(uuid.uuid4())
client.set(key, 0, 0, fixed_value)
inserted.append(key)
except mc_bin_client.MemcachedError:
pass
retry = 0
remaining_items = []
remaining_items.extend(inserted)
msg = "memcachedError : {0} - unable to get a pre-inserted key : {1}"
while retry < 10 and len(remaining_items) > 0:
verified_keys = []
for key in remaining_items:
try:
flag, keyx, value = client.get(key=key)
if not value == fixed_value:
self.test.fail("value mismatch for key {0}".format(key))
verified_keys.append(key)
except mc_bin_client.MemcachedError as error:
self.log.error(msg.format(error.status, key))
retry += 1
[remaining_items.remove(x) for x in verified_keys]
print_count = 0
for key in remaining_items:
if print_count > 100:
break
print_count += 1
self.log.error("unable to verify key : {0}".format(key))
if remaining_items:
self.test.fail("unable to verify {0} keys".format(len(remaining_items)))
示例6: load_with_failover_master
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_nodes_self [as 别名]
def load_with_failover_master(self):
self._load_all_buckets(self.src_master, self.gen_create, "create", 0)
self._load_all_buckets(self.dest_master, self.gen_create2, "create", 0)
self.sleep(self._timeout)
if self._failover is not None:
if "source" in self._failover and len(self.src_nodes) > 1:
self.log.info(" Failing over Source Master Node {0}".format(self.src_master.ip))
prev_master_id = RestConnection(self.src_master).get_nodes_self().id
self.cluster.failover(self.src_nodes, [self.src_master])
self.log.info(" Rebalance out Source Master Node {0}".format(self.src_master.ip))
self.cluster.rebalance(self.src_nodes, [], [self.src_master])
self.src_nodes.remove(self.src_master)
self.src_master = self.src_nodes[0]
rest = RestConnection(self.src_master)
master_id = rest.get_nodes_self().id
for bucket in self.buckets:
if bucket.master_id == prev_master_id:
bucket.master_id = master_id
elif "source" in self._failover and len(self.src_nodes) <= 1:
self.log.info(
"Number of nodes {0} is less than minimum '2' needed for failover on a cluster.".format(
len(self.src_nodes)
)
)
if "destination" in self._failover and len(self.dest_nodes) > 1:
self.log.info(" Failing over Destination Master Node {0}".format(self.dest_master.ip))
prev_master_id = RestConnection(self.dest_master).get_nodes_self().id
self.cluster.failover(self.dest_nodes, [self.dest_master])
self.log.info(" Rebalance out Destination Master Node {0}".format(self.dest_master.ip))
self.cluster.rebalance(self.dest_nodes, [], [self.dest_master])
self.dest_nodes.remove(self.dest_master)
self.dest_master = self.dest_nodes[0]
rest = RestConnection(self.dest_master)
master_id = rest.get_nodes_self().id
for bucket in self.buckets:
if bucket.master_id == prev_master_id:
bucket.master_id = master_id
elif "destination" in self._failover and len(self.dest_nodes) <= 1:
self.log.info(
"Number of nodes {0} is less than minimum '2' needed for failover on a cluster.".format(
len(self.dest_nodes)
)
)
self.sleep(self._timeout / 2)
self._async_update_delete_data()
self.merge_buckets(self.src_master, self.dest_master, bidirection=True)
self.sleep(self._timeout * 5)
self.verify_results(verify_src=True)
示例7: failover
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_nodes_self [as 别名]
def failover(self, howmany):
#chekck if all nodes are part of the cluster
rest = RestConnection(self.servers[0])
nodes = rest.node_statuses()
if len(nodes) != len(self.servers):
self.test.fail(num_nodes_mismatch.format(len(self.servers), len(nodes)))
if len(nodes) - howmany < 2:
self.test.fail(num_nodes_mismatch.format(len(nodes), howmany))
master_node = rest.get_nodes_self()
#when selecting make sure we dont pick the master node
selection = []
for n in nodes:
if n.id != master_node.id:
selection.append(n)
shuffle(selection)
failed = selection[0:howmany]
for f in failed:
self.log.info("will fail over node : {0}".format(f.id))
if len(nodes) / (1 + howmany) >= 1:
self.test.assertTrue(RestHelper(rest).wait_for_replication(900),
msg="replication did not finish after 15 minutes")
for f in failed:
self._stop_server(f)
self.log.info("10 seconds delay to wait for membase-server to shutdown")
#wait for 5 minutes until node is down
for f in failed:
if f.port == 8091:
self.test.assertTrue(RestHelper(rest).wait_for_node_status(f, "unhealthy", 300),
msg="node status is not unhealthy even after waiting for 5 minutes")
self.test.assertTrue(rest.fail_over(f.id), msg="failover did not complete")
self.log.info("failed over node : {0}".format(f.id))
return failed
示例8: direct_mc_bin_client
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_nodes_self [as 别名]
def direct_mc_bin_client(self, server, bucket, timeout=30):
# USE MC BIN CLIENT WHEN NOT USING SDK CLIENT
rest = RestConnection(server)
node = None
try:
node = rest.get_nodes_self()
except ValueError as e:
self.log.info("could not connect to server {0}, will try scanning all nodes".format(server))
if not node:
nodes = rest.get_nodes()
for n in nodes:
if n.ip == server.ip and n.port == server.port:
node = n
if isinstance(server, dict):
self.log.info("dict:{0}".format(server))
self.log.info("creating direct client {0}:{1} {2}".format(server["ip"], node.memcached, bucket))
else:
self.log.info("creating direct client {0}:{1} {2}".format(server.ip, node.memcached, bucket))
RestHelper(rest).vbucket_map_ready(bucket, 60)
vBuckets = RestConnection(server).get_vbuckets(bucket)
if isinstance(server, dict):
client = MemcachedClient(server["ip"], node.memcached, timeout=timeout)
else:
client = MemcachedClient(server.ip, node.memcached, timeout=timeout)
if vBuckets != None:
client.vbucket_count = len(vBuckets)
else:
client.vbucket_count = 0
bucket_info = rest.get_bucket(bucket)
return client
示例9: test_default_moxi
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_nodes_self [as 别名]
def test_default_moxi(self):
name = 'default'
for serverInfo in self.servers:
rest = RestConnection(serverInfo)
replicaNumber = 1
proxyPort = rest.get_nodes_self().moxi
rest.create_bucket(bucket=name,
ramQuotaMB=200,
replicaNumber=replicaNumber,
proxyPort=proxyPort)
msg = 'create_bucket succeeded but bucket {0} does not exist'.format(name)
self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(name, rest), msg=msg)
ready = BucketOperationHelper.wait_for_memcached(serverInfo, name)
self.assertTrue(ready, "wait_for_memcached failed")
inserted_keys = BucketOperationHelper.load_some_data(serverInfo, 1, name)
self.assertTrue(inserted_keys, 'unable to insert any key to memcached')
verified = BucketOperationHelper.verify_data(serverInfo, inserted_keys, True, False, self, bucket=name)
self.assertTrue(verified, msg='verified all the keys stored')
#verify keys
rest.delete_bucket(name)
msg = 'bucket "{0}" was not deleted even after waiting for two minutes'.format(name)
self.assertTrue(BucketOperationHelper.wait_for_bucket_deletion(name, rest, timeout_in_seconds=60), msg=msg)
rest.create_bucket(bucket=name,
ramQuotaMB=200,
replicaNumber=replicaNumber,
proxyPort=proxyPort)
msg = 'create_bucket succeeded but bucket {0} does not exist'.format(name)
self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(name, rest), msg=msg)
BucketOperationHelper.wait_for_memcached(serverInfo, name)
#now let's recreate the bucket
self.log.info('recreated the default bucket...')
#loop over the keys make sure they dont exist
self.assertTrue(BucketOperationHelper.keys_dont_exist(serverInfo, inserted_keys, name),
msg='at least one key found in the bucket')
示例10: verify_referenced_by_ip
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_nodes_self [as 别名]
def verify_referenced_by_ip(self, servers):
for server in servers:
rest = RestConnection(server)
current_hostname = rest.get_nodes_self().hostname
self.assertTrue(current_hostname.startswith(server.ip),
"Server %s: Expected hostname %s, actual is %s" %(
server, server.ip, current_hostname))
示例11: setUp
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_nodes_self [as 别名]
def setUp(self):
self.log = logger.Logger.get_logger()
self.master = TestInputSingleton.input.servers[0]
self.input = TestInputSingleton.input
self.servers = self.input.servers
self.num_of_docs = self.input.param("num_of_docs", 1000)
rest = RestConnection(self.master)
for server in self.servers:
rest.init_cluster(server.rest_username, server.rest_password)
info = rest.get_nodes_self()
for server in self.servers:
rest.init_cluster_memoryQuota(
server.rest_username, server.rest_password, memoryQuota=info.mcdMemoryReserved
)
ClusterOperationHelper.cleanup_cluster(self.servers)
BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
self._create_default_bucket()
# Rebalance the nodes
ClusterOperationHelper.begin_rebalance_in(self.master, self.servers)
ClusterOperationHelper.end_rebalance(self.master)
self._log_start()
示例12: initialize
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_nodes_self [as 别名]
def initialize(self, params):
# log = logger.new_logger("Installer")
start_time = time.time()
cluster_initialized = False
server = params["server"]
remote_client = RemoteMachineShellConnection(params["server"])
while time.time() < (start_time + (10 * 60)):
rest = RestConnection(server)
try:
rest.init_cluster(username=server.rest_username, password=server.rest_password)
rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
if server.data_path:
time.sleep(3)
# Make sure that data_path is writable by couchbase user
#remote_client.stop_couchbase()
remote_client.execute_command('rm -rf {0}/*'.format(server.data_path))
remote_client.execute_command("chown -R couchbase:couchbase {0}".format(server.data_path))
rest.set_data_path(data_path=server.data_path)
# Symlink data-dir to custom path
#remote_client.execute_command('mv /opt/couchbase/var {0}'.format(server.data_path))
#remote_client.execute_command('ln -s {0}/var /opt/couchbase/var'.format(server.data_path))
#remote_client.execute_command("chown -h couchbase:couchbase /opt/couchbase/var")
#remote_client.start_couchbase()
time.sleep(3)
cluster_initialized = True
break
except ServerUnavailableException:
log.error("error happened while initializing the cluster @ {0}".format(server.ip))
log.info('sleep for 5 seconds before trying again ...')
time.sleep(5)
if not cluster_initialized:
raise Exception("unable to initialize membase node")
示例13: convert_to_hostname
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_nodes_self [as 别名]
def convert_to_hostname(self, servers_with_hostnames):
try:
hostname = []
for server in servers_with_hostnames:
shell = RemoteMachineShellConnection(server)
info = shell.extract_remote_info()
domain = ''.join(info.domain[0])
hostname.append(info.hostname[0] + "." + domain)
master_rest = RestConnection(server)
var = master_rest.get_nodes_self().hostname
flag = True if server.ip in var else False
self.log.info("get_node_self function returned : {0}".format(var))
if flag:
self.log.info("Node {0} is referred via IP. Need to be referred with hostname. Changing the name of the node!!".format(server.ip))
version = RestConnection(server).get_nodes_self().version
if version.startswith("1.8.1") or version.startswith("2.0.0") or version.startswith("2.0.1"):
RemoteUtilHelper.use_hostname_for_server_settings(server)
obj = RestConnection(server)
obj.init_cluster()
else:
obj = RestConnection(server)
obj.init_cluster()
var = master_rest.rename_node(username='Administrator', password='password', port='', hostname=hostname[-1])
else:
self.log.info("Node {0} already referred via hostname. No need to convert the name".format(server.ip))
finally:
shell.disconnect()
return hostname
示例14: test_default_case_sensitive_dedicated
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_nodes_self [as 别名]
def test_default_case_sensitive_dedicated(self):
name = 'Default'
for serverInfo in self.servers:
rest = RestConnection(serverInfo)
proxyPort = rest.get_nodes_self().moxi
rest.create_bucket(bucket=name,
ramQuotaMB=200,
authType='sasl',
saslPassword='test_non_default',
proxyPort=proxyPort)
msg = 'create_bucket succeeded but bucket {0} does not exist'.format(name)
self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(name, rest), msg=msg)
name = 'default'
try:
rest.create_bucket(bucket=name,
ramQuotaMB=200,
proxyPort=11221,
authType='sasl',
saslPassword='test_non_default')
msg = "create_bucket created two buckets in different case : {0},{1}".format('default', 'Default')
self.fail(msg)
except BucketCreationException as ex:
#check if 'default' and 'Default' buckets exist
self.log.info('BucketCreationException was thrown as expected')
self.log.info(ex.message)
示例15: get_vBuckets_info
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_nodes_self [as 别名]
def get_vBuckets_info(master):
"""
return state and count items for all vbuckets for each node
format: dict: {u'1node_ip1': {'vb_79': ['replica', '0'], 'vb_78': ['active', '0']..}, u'1node_ip1':....}
"""
rest = RestConnection(master)
port = rest.get_nodes_self().memcached
nodes = rest.node_statuses()
_nodes_stats= {}
for node in nodes:
stat={}
buckets = []
_server = {"ip": node.ip, "port": node.port, "username": master.rest_username,
"password": master.rest_password}
try:
buckets = rest.get_buckets()
mc = MemcachedClient(node.ip, port)
stat_hash = mc.stats("hash")
except Exception:
if not buckets:
log.error("There are not any buckets in {0}:{1} node".format(node.ip, node.port))
else:
log.error("Impossible to get vBucket's information for {0}:{1} node".format(node.ip, node.port))
_nodes_stats[node.ip+":"+str(node.port)]
continue
mc.close()
vb_names=[key[:key.index(":")] for key in stat_hash.keys()]
for name in vb_names:
stat[name]=[stat_hash[name + ":state"], stat_hash[name+":counted"]]
_nodes_stats[node.ip+":"+str(port)] = stat
log.info(_nodes_stats)
return _nodes_stats