本文整理汇总了Python中membase.api.rest_client.RestConnection.diag_eval方法的典型用法代码示例。如果您正苦于以下问题:Python RestConnection.diag_eval方法的具体用法?Python RestConnection.diag_eval怎么用?Python RestConnection.diag_eval使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类membase.api.rest_client.RestConnection
的用法示例。
在下文中一共展示了RestConnection.diag_eval方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _start_server
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import diag_eval [as 别名]
def _start_server(self, node):
master_rest = RestConnection(self.servers[0])
for server in self.servers:
rest = RestConnection(server)
self.log.info("see if server {0}:{1} is stopped".format(server.ip, server.port))
if RestHelper(rest).is_ns_server_running(timeout_in_seconds=5):
continue
self.log.info("running {0}".format(start_cluster.format(node.id)))
master_rest.diag_eval(start_cluster.format(node.id))
示例2: find_orchestrator
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import diag_eval [as 别名]
def find_orchestrator(master):
rest = RestConnection(master)
command = "node(global:whereis_name(ns_orchestrator))"
status, content = rest.diag_eval(command)
# Get rid of single quotes '[email protected]'
content = content.replace("'", '')
return status, content
示例3: _kill_nodes
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import diag_eval [as 别名]
def _kill_nodes(self, nodes, servers, bucket_name):
self.reboot = self.input.param("reboot", True)
if not self.reboot:
for node in nodes:
_node = {
"ip": node.ip,
"port": node.port,
"username": self.servers[0].rest_username,
"password": self.servers[0].rest_password,
}
node_rest = RestConnection(_node)
_mc = MemcachedClientHelper.direct_client(_node, bucket_name)
self.log.info("restarted the node %s:%s" % (node.ip, node.port))
pid = _mc.stats()["pid"]
command = 'os:cmd("kill -9 {0} ")'.format(pid)
self.log.info(command)
killed = node_rest.diag_eval(command)
self.log.info("killed ?? {0} ".format(killed))
_mc.close()
else:
for server in servers:
shell = RemoteMachineShellConnection(server)
command = "reboot"
output, error = shell.execute_command(command)
shell.log_command_output(output, error)
shell.disconnect()
time.sleep(self.wait_timeout * 8)
shell = RemoteMachineShellConnection(server)
command = "/sbin/iptables -F"
output, error = shell.execute_command(command)
shell.log_command_output(output, error)
shell.disconnect()
示例4: checkTLS1_1_blocking
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import diag_eval [as 别名]
def checkTLS1_1_blocking(self):
self.get_the_testssl_script(self.TEST_SSL_FILENAME)
command = "ns_config:set(ssl_minimum_protocol, 'tlsv1.2')"
self.log.info("posting: %s" % command)
rest = RestConnection(self.master)
res = rest.diag_eval(command)
# do the initial check
self.check_all_servers( rest )
# restart the server
try:
for server in self.servers:
shell = RemoteMachineShellConnection(server)
shell.stop_couchbase()
time.sleep(10) # Avoid using sleep like this on further calls
shell.start_couchbase()
shell.disconnect()
except Exception as e:
self.log.error(traceback.format_exc())
# and check again
time.sleep(30)
self.check_all_servers( rest )
示例5: _get_vbuckets
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import diag_eval [as 别名]
def _get_vbuckets(self, server):
rest = RestConnection(server)
command = "ns_config:search(couchbase_num_vbuckets_default)"
status, content = rest.diag_eval(command)
try:
vbuckets = int(re.sub('[^\d]', '', content))
except:
vbuckets = 1024
return vbuckets
示例6: _stop_server
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import diag_eval [as 别名]
def _stop_server(self, node):
master_rest = RestConnection(self.servers[0])
for server in self.servers:
rest = RestConnection(server)
self.log.info("see if server {0}:{1} is running".format(server.ip, server.port))
if not RestHelper(rest).is_ns_server_running(timeout_in_seconds=5):
continue
node_id = rest.get_nodes_self().id
if node_id == node.id:
# if its 8091 then do ssh otherwise use ns_servr
if node.port == 8091:
shell = RemoteMachineShellConnection(server)
if shell.is_membase_installed():
shell.stop_membase()
self.log.info("Membase stopped")
else:
shell.stop_couchbase()
self.log.info("Couchbase stopped")
shell.disconnect()
break
else:
self.log.info("running {0}".format(stop_cluster.format(node.id)))
master_rest.diag_eval(stop_cluster.format(node.id))
示例7: _kill_nodes
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import diag_eval [as 别名]
def _kill_nodes(self, nodes):
is_partial = self.input.param("is_partial", "True")
_nodes = []
if len(self.servers) > 1 :
skip = 2
else:
skip = 1
if is_partial:
_nodes = nodes[0:len(nodes):skip]
else:
_nodes = nodes
for node in _nodes:
_node = {"ip": node.ip, "port": node.port, "username": self.servers[0].rest_username,
"password": self.servers[0].rest_password}
_mc = MemcachedClientHelper.direct_client(_node, self.bucket_name)
self.log.info("restarted the node %s:%s" % (node.ip, node.port))
pid = _mc.stats()["pid"]
node_rest = RestConnection(_node)
command = "os:cmd(\"kill -9 {0} \")".format(pid)
self.log.info(command)
killed = node_rest.diag_eval(command)
self.log.info("killed ?? {0} ".format(killed))
_mc.close()
示例8: _kill_nodes
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import diag_eval [as 别名]
def _kill_nodes(self, nodes):
is_partial = self.input.param("is_partial", "True")
_nodes = []
if len(self.servers) > 1 :
skip = 2
else:
skip = 1
if is_partial:
_nodes = nodes[0:len(nodes):skip]
else:
_nodes = nodes
for node in _nodes:
_node = {"ip": node.ip, "port": node.port, "username": self.servers[0].rest_username,
"password": self.servers[0].rest_password}
_mc = MemcachedClientHelper.direct_client(_node, self.bucket_name)
self.log.info("restarted the node %s:%s" % (node.ip, node.port))
pid = _mc.stats()["pid"]
_mc.close()
node_rest = RestConnection(_node)
for _server in self.servers:
if _server.ip == node.ip:
self.log.info("Returned Server index %s" % _server)
shell = RemoteMachineShellConnection(_server)
break
info = shell.extract_remote_info()
os_type = info.type.lower()
if os_type == 'windows':
shell.terminate_process(info, 'memcached.exe')
self.log.info("killed ?? node %s " % node.ip)
# command = "taskkill /F /T /IM memcached.exe*"
else:
command = "os:cmd(\"kill -9 {0} \")".format(pid)
self.log.info(command)
killed = node_rest.diag_eval(command)
self.log.info("killed ?? {0} ".format(killed))
示例9: _common_test_body_failed_swap_rebalance
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import diag_eval [as 别名]
def _common_test_body_failed_swap_rebalance(self):
master = self.servers[0]
rest = RestConnection(master)
num_initial_servers = self.num_initial_servers
creds = self.input.membase_settings
intial_severs = self.servers[:num_initial_servers]
self.log.info("CREATE BUCKET PHASE")
SwapRebalanceBase.create_buckets(self)
# Cluster all starting set of servers
self.log.info("INITIAL REBALANCE PHASE")
status, servers_rebalanced = RebalanceHelper.rebalance_in(intial_severs, len(intial_severs) - 1)
self.assertTrue(status, msg="Rebalance was failed")
self.log.info("DATA LOAD PHASE")
self.loaders = SwapRebalanceBase.start_load_phase(self, master)
# Wait till load phase is over
SwapRebalanceBase.stop_load(self.loaders, do_stop=False)
self.log.info("DONE LOAD PHASE")
# Start the swap rebalance
current_nodes = RebalanceHelper.getOtpNodeIds(master)
self.log.info("current nodes : {0}".format(current_nodes))
toBeEjectedNodes = RebalanceHelper.pick_nodes(master, howmany=self.num_swap)
optNodesIds = [node.id for node in toBeEjectedNodes]
if self.swap_orchestrator:
status, content = ClusterOperationHelper.find_orchestrator(master)
self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
format(status, content))
# When swapping all the nodes
if self.num_swap is len(current_nodes):
optNodesIds.append(content)
else:
optNodesIds[0] = content
for node in optNodesIds:
self.log.info("removing node {0} and rebalance afterwards".format(node))
new_swap_servers = self.servers[num_initial_servers:num_initial_servers + self.num_swap]
for server in new_swap_servers:
otpNode = rest.add_node(creds.rest_username, creds.rest_password, server.ip)
msg = "unable to add node {0} to the cluster"
self.assertTrue(otpNode, msg.format(server.ip))
if self.swap_orchestrator:
rest = RestConnection(new_swap_servers[0])
master = new_swap_servers[0]
self.log.info("DATA ACCESS PHASE")
self.loaders = SwapRebalanceBase.start_access_phase(self, master)
self.log.info("SWAP REBALANCE PHASE")
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()],
ejectedNodes=optNodesIds)
SwapRebalanceBase.sleep(self, 10, "Rebalance should start")
self.log.info("FAIL SWAP REBALANCE PHASE @ {0}".format(self.percentage_progress))
reached = RestHelper(rest).rebalance_reached(self.percentage_progress)
if reached == 100 and not RestHelper(rest).is_cluster_rebalanced():
# handle situation when rebalance failed at the beginning
self.log.error('seems rebalance failed!')
self.log.info("Latest logs from UI:")
for i in rest.get_logs(): self.log.error(i)
self.fail("rebalance failed even before killing memcached")
bucket = rest.get_buckets()[0].name
pid = None
if self.swap_orchestrator:
# get PID via remote connection if master is a new node
shell = RemoteMachineShellConnection(master)
o, _ = shell.execute_command("ps -eo comm,pid | awk '$1 == \"memcached\" { print $2 }'")
pid = o[0]
shell.disconnect()
else:
for i in xrange(2):
try:
_mc = MemcachedClientHelper.direct_client(master, bucket)
pid = _mc.stats()["pid"]
break
except EOFError as e:
self.log.error("{0}.Retry in 2 sec".format(e))
SwapRebalanceBase.sleep(self, 1)
if pid is None:
self.fail("impossible to get a PID")
command = "os:cmd(\"kill -9 {0} \")".format(pid)
self.log.info(command)
killed = rest.diag_eval(command)
self.log.info("killed {0}:{1}?? {2} ".format(master.ip, master.port, killed))
self.log.info("sleep for 10 sec after kill memcached")
SwapRebalanceBase.sleep(self, 10)
# we can't get stats for new node when rebalance falls
if not self.swap_orchestrator:
ClusterOperationHelper._wait_warmup_completed(self, [master], bucket, wait_time=600)
i = 0
# we expect that rebalance will be failed
try:
rest.monitorRebalance()
except RebalanceFailedException:
# retry rebalance if it failed
self.log.warn("Rebalance failed but it's expected")
#.........这里部分代码省略.........
示例10: set_vbuckets
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import diag_eval [as 别名]
def set_vbuckets(master, vbuckets):
rest = RestConnection(master)
command = "rpc:eval_everywhere(ns_config, set, [couchbase_num_vbuckets_default, {0}]).".format(vbuckets)
status, content = rest.diag_eval(command)
return status, content
示例11: _common_test_body_failed_swap_rebalance
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import diag_eval [as 别名]
def _common_test_body_failed_swap_rebalance(self):
master = self.servers[0]
rest = RestConnection(master)
num_initial_servers = self.num_initial_servers
creds = self.input.membase_settings
intial_severs = self.servers[:num_initial_servers]
# Cluster all starting set of servers
self.log.info("INITIAL REBALANCE PHASE")
RebalanceHelper.rebalance_in(intial_severs, len(intial_severs)-1)
self.log.info("CREATE BUCKET PHASE")
SwapRebalanceBase.create_buckets(self)
self.log.info("DATA LOAD PHASE")
loaders = SwapRebalanceBase.start_load_phase(self, master)
# Wait till load phase is over
SwapRebalanceBase.stop_load(loaders, do_stop=False)
self.log.info("DONE LOAD PHASE")
# Start the swap rebalance
current_nodes = RebalanceHelper.getOtpNodeIds(master)
self.log.info("current nodes : {0}".format(current_nodes))
toBeEjectedNodes = RebalanceHelper.pick_nodes(master, howmany=self.num_swap)
optNodesIds = [node.id for node in toBeEjectedNodes]
if self.swap_orchestrator:
status, content = ClusterHelper.find_orchestrator(master)
self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
format(status, content))
# When swapping all the nodes
if self.num_swap is len(current_nodes):
optNodesIds.append(content)
else:
optNodesIds[0] = content
for node in optNodesIds:
self.log.info("removing node {0} and rebalance afterwards".format(node))
new_swap_servers = self.servers[num_initial_servers:num_initial_servers+self.num_swap]
for server in new_swap_servers:
otpNode = rest.add_node(creds.rest_username, creds.rest_password, server.ip)
msg = "unable to add node {0} to the cluster"
self.assertTrue(otpNode, msg.format(server.ip))
if self.swap_orchestrator:
rest = RestConnection(new_swap_servers[0])
master = new_swap_servers[0]
self.log.info("DATA ACCESS PHASE")
loaders = SwapRebalanceBase.start_access_phase(self, master)
self.log.info("SWAP REBALANCE PHASE")
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()],\
ejectedNodes=optNodesIds)
# Rebalance is failed at 20%, 40% and 60% completion
for i in [1, 2, 3]:
expected_progress = 20*i
self.log.info("FAIL SWAP REBALANCE PHASE @ {0}".format(expected_progress))
reached = RestHelper(rest).rebalance_reached(expected_progress)
command = "[erlang:exit(element(2, X), kill) || X <- supervisor:which_children(ns_port_sup)]."
memcached_restarted = rest.diag_eval(command)
self.assertTrue(memcached_restarted, "unable to restart memcached/moxi process through diag/eval")
time.sleep(20)
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()],\
ejectedNodes=optNodesIds)
# Stop loaders
SwapRebalanceBase.stop_load(loaders)
self.assertTrue(rest.monitorRebalance(),
msg="rebalance operation failed after adding node {0}".format(toBeEjectedNodes))
self.log.info("DONE DATA ACCESS PHASE")
#for bucket in rest.get_buckets():
# SwapRebalanceBase.verify_data(new_swap_servers[0], bucket_data[bucket.name].get('inserted_keys'),\
# bucket.name, self)
# RebalanceHelper.wait_for_persistence(master, bucket.name)
self.log.info("VERIFICATION PHASE")
SwapRebalanceBase.items_verification(master, self)
示例12: _do_warmup
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import diag_eval [as 别名]
def _do_warmup(self, howmany, timeout_in_seconds=1800):
# max_time is in micro seconds
self._insert_data(howmany)
if int(howmany) < 50:
self.log.info("sleep 10 seconds for small number items insert correctly into bucket")
time.sleep(10)
curr_items = int(self.onenodemc.stats()["curr_items"])
uptime = int(self.onenodemc.stats()["uptime"])
RebalanceHelper.wait_for_persistence(self.master, "default")
self.log.info("sleeping for 10 seconds")
time.sleep(10)
rest = RestConnection(self.master)
command = "try ns_server_testrunner_api:kill_memcached(20000) catch _:_ -> [erlang:exit(element(2, X), kill) || X <- supervisor:which_children(ns_port_sup)] end."
memcached_restarted, content = rest.diag_eval(command)
self.assertTrue(memcached_restarted, "unable to restart memcached/moxi process through diag/eval")
#wait until memcached starts
start = time.time()
memcached_restarted = False
while time.time() - start < 60:
try:
self.onenodemc = MemcachedClientHelper.direct_client(self.master, "default")
value = int(self.onenodemc.stats()["uptime"])
if value < uptime:
self.log.info("memcached restarted...")
memcached_restarted = True
break
self.onenodemc.close()
# The uptime stat have a 1 sec resolution so there is no point of
# retrying more often
time.sleep(1)
except Exception:
time.sleep(1)
self.assertTrue(memcached_restarted, "memcached restarted and uptime is now reset")
# Warmup till curr_items match
self.onenodemc = MemcachedClientHelper.direct_client(self.master, "default")
stats = self.onenodemc.stats()
present_count = int(stats["curr_items"])
ep_warmup_thread = stats["ep_warmup_thread"]
self.log.info("ep curr_items : {0}, inserted_items {1} directly after kill_memcached ".format(present_count, curr_items))
self.log.info("ep_warmup_thread directly after kill_memcached: {0}".format(ep_warmup_thread))
start = time.time()
while ep_warmup_thread != "complete":
if (time.time() - start) <= timeout_in_seconds:
stats = self.onenodemc.stats()
present_count = int(stats["curr_items"])
ep_warmup_thread = stats["ep_warmup_thread"]
self.log.warn("curr_items {0}, ep_warmup_thread {1}".format(present_count, ep_warmup_thread))
time.sleep(1)
else:
self.fail("Timed out waiting for warmup")
stats = self.onenodemc.stats()
present_count = int(stats["curr_items"])
if present_count < curr_items:
self.log.error("Warmup failed. Got {0} and expected {1} items".format(present_count, curr_items))
self.fail("Warmup failed. Incomplete number of messages after killing memcached")
if "ep_warmup_time" not in stats:
self.log.error("'ep_warmup_time' was not found in stats:{0}".format(stats))
warmup_time = int(stats["ep_warmup_time"])
self.log.info("ep_warmup_time is {0}".format(warmup_time))
示例13: GetrTests
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import diag_eval [as 别名]
class GetrTests(BaseTestCase):
NO_REBALANCE = 0
DURING_REBALANCE = 1
AFTER_REBALANCE = 2
def setUp(self):
super(GetrTests, self).setUp()
self.memcapableTestBase = MemcapableTestBase()
self.rest = RestConnection(self.master)
descr = self.input.param("descr", "")
if descr:
self.log.info("Test:{0}".format(descr))
def tearDown(self):
super(GetrTests, self).tearDown()
def test_getr(self):
item_count = self.input.param("item_count", 10000)
replica_count = self.input.param("replica_count", 1)
expiration = self.input.param("expiration", 0)
delay = float(self.input.param("delay", 0))
eject = self.input.param("eject", 0)
delete = self.input.param("delete", 0)
mutate = self.input.param("mutate", 0)
warmup = self.input.param("warmup", 0)
skipload = self.input.param("skipload", 0)
rebalance = self.input.param("rebalance", 0)
negative_test = False
if delay > expiration:
negative_test = True
if delete and not mutate:
negative_test = True
if skipload and not mutate:
negative_test = True
prefix = str(uuid.uuid4())[:7]
BucketOperationHelper.delete_all_buckets_or_assert([self.master], self)
BucketOperationHelper.create_bucket(self.master, name=self.default_bucket_name, replica=replica_count, port=11210, test_case=self, bucket_ram=-1, password="")
if rebalance == GetrTests.DURING_REBALANCE or rebalance == GetrTests.AFTER_REBALANCE:
# leave 1 node unclustered for rebalance in
ClusterOperationHelper.begin_rebalance_out(self.master, self.servers[-1:])
ClusterOperationHelper.end_rebalance(self.master)
ClusterOperationHelper.begin_rebalance_in(self.master, self.servers[:-1])
ClusterOperationHelper.end_rebalance(self.master)
else:
ClusterOperationHelper.begin_rebalance_in(self.master, self.servers)
ClusterOperationHelper.end_rebalance(self.master)
vprefix = ""
if not skipload:
self._load_items(item_count=item_count, expiration=expiration, prefix=prefix, vprefix=vprefix)
if not expiration:
RebalanceHelper.wait_for_stats_int_value(self.master, self.default_bucket_name, "curr_items_tot", item_count * (replica_count + 1), "<=", 600, True)
if delete:
self._delete_items(item_count=item_count, prefix=prefix)
if mutate:
vprefix = "mutated"
self._load_items(item_count=item_count, expiration=expiration, prefix=prefix, vprefix=vprefix)
self.assertTrue(RebalanceHelper.wait_for_replication(self.rest.get_nodes(), timeout=180),
msg="replication did not complete")
if eject:
self._eject_items(item_count=item_count, prefix=prefix)
if delay:
self.sleep(delay)
if rebalance == GetrTests.DURING_REBALANCE:
ClusterOperationHelper.begin_rebalance_in(self.master, self.servers)
if rebalance == GetrTests.AFTER_REBALANCE:
ClusterOperationHelper.end_rebalance(self.master)
if warmup:
self.log.info("restarting memcached")
command = "rpc:multicall(erlang, apply, [fun () -> try ns_server_testrunner_api:restart_memcached(20000) catch _:_ -> ns_port_sup:restart_port_by_name(memcached) end end, []], 20000)."
memcached_restarted, content = self.rest.diag_eval(command)
#wait until memcached starts
self.assertTrue(memcached_restarted, "unable to restart memcached process through diag/eval")
RebalanceHelper.wait_for_stats(self.master, self.default_bucket_name, "curr_items_tot", item_count * (replica_count + 1), 600)
count = self._getr_items(item_count=item_count, replica_count=replica_count, prefix=prefix, vprefix=vprefix)
if negative_test:
self.assertTrue(count == 0, "found {0} items, expected none".format(count))
else:
self.assertTrue(count == replica_count * item_count, "expected {0} items, got {1} items".format(replica_count * item_count, count))
if rebalance == GetrTests.DURING_REBALANCE:
ClusterOperationHelper.end_rebalance(self.master)
def _load_items(self, item_count, expiration, prefix, vprefix=""):
flags = 0
client = MemcachedClientHelper.proxy_client(self.master, self.default_bucket_name)
time_start = time.time()
for i in range(item_count):
timeout_end = time.time() + 10
#.........这里部分代码省略.........
示例14: RebalanceProgressTests
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import diag_eval [as 别名]
#.........这里部分代码省略.........
self._check_stats(servers_init, previous_stats, new_stats, "outgoing")
previous_stats = copy.deepcopy(new_stats)
time.sleep(1)
rebalance.result()
def test_progress_rebalance_swap(self):
if self.nodes_in != self.nodes_out:
self.fail("nodes_in != nodes_out. Not a swap rebalance")
servers_in = self.servers[self.nodes_init : self.nodes_init + self.nodes_in]
servers_init = self.servers[:self.nodes_init]
servers_unchanged = self.servers[:(self.nodes_init - self.nodes_out)]
servers_out = self.servers[(self.nodes_init - self.nodes_out) : self.nodes_init]
rebalance = self.cluster.async_rebalance(servers_init, servers_in, servers_out)
self.sleep(5, "wait for rebalance start")
previous_stats = self._get_detailed_progress()
while rebalance.state != "FINISHED":
new_stats = self._get_detailed_progress()
if new_stats == {}:
self.log.info("Got empty progress")
break
#vbuckets left should go decreasing
#docsTotal and docsTransferred should be 0 in added nodes
#no vbuckets moving for unchanged nodes
#docsTotal should not change
#docsTransferred should go increasing
self._check_stats(servers_in, previous_stats, new_stats, "outgoing",
docs_total=0, docs_transf=0)
self._check_stats(servers_in, previous_stats, new_stats, "ingoing")
self._check_stats(servers_unchanged, previous_stats, new_stats, "ingoing",
active_vb=0, replica_vb=0)
self._check_stats(servers_unchanged, previous_stats, new_stats, "outgoing",
active_vb=0, replica_vb=0)
self._check_stats(servers_out, previous_stats, new_stats, "outgoing")
#sum of sending and receiving vbuckets should coincide
self._check_vb_sums(servers_in, servers_out, new_stats)
previous_stats = copy.deepcopy(new_stats)
time.sleep(1)
rebalance.result()
def _check_vb_sums(self, servers_ingoing, servers_outgoing, new_stats):
active_vb_sum_1 = sum([new_stats[server.ip]["ingoing"]['activeVBucketsLeft'] for server in servers_ingoing])
active_vb_sum_2 = sum([new_stats[server.ip]["outgoing"]['activeVBucketsLeft'] for server in servers_outgoing])
self.assertTrue(active_vb_sum_1 == active_vb_sum_2,
"Active vbuckets left should be equal in servers_in and init. %s" % new_stats)
def _check_stats(self, servers, previous_stats, new_stats, type,
docs_total=None, docs_transf=None,
active_vb=None, replica_vb=None):
self.assertTrue(new_stats["buckets_count"] == len(self.buckets),
"Expected buckets %s. Actual stat %s" %(
len(self.buckets), new_stats))
for server in servers:
current_stat = new_stats[server.ip][type]
previous_stat = previous_stats[server.ip][type]
if new_stats["bucket"] != previous_stats["bucket"]:
self.assertTrue(current_stat['activeVBucketsLeft'] >= previous_stat['activeVBucketsLeft'],
"activeVBucketsLeft for node %s increased! Previous stat %s. Actual: %s" %(
server.ip, current_stat, previous_stat))
self.assertTrue(current_stat['replicaVBucketsLeft'] >= previous_stat['replicaVBucketsLeft'],
"replicaVBucketsLeft for node %s increased! Previous stat %s. Actual: %s" %(
server.ip, current_stat, previous_stat))
else:
self.assertTrue(current_stat['activeVBucketsLeft'] <= previous_stat['activeVBucketsLeft'],
"activeVBucketsLeft for node %s increased! Previous stat %s. Actual: %s" %(
server.ip, current_stat, previous_stat))
self.assertTrue(current_stat['replicaVBucketsLeft'] <= previous_stat['replicaVBucketsLeft'],
"replicaVBucketsLeft for node %s increased! Previous stat %s. Actual: %s" %(
server.ip, current_stat, previous_stat))
try:
if current_stat['docsTotal'] != previous_stat['docsTotal']:
self.log.warn("docsTotal for node %s changed! Previous stat %s. Actual: %s" %(
server.ip, current_stat, previous_stat))
except Exception, ex:
if previous_stat['docsTotal'] != 0 and current_stat['docsTotal'] == 0:
command = "sys:get_status({global, ns_rebalance_observer})."
self.log.info("posting: %s" % command)
self.rest.diag_eval(command)
raise ex
self.assertTrue(current_stat['docsTransferred'] >= previous_stat['docsTransferred'],
"docsTransferred for node %s decreased! Previous stat %s. Actual: %s" %(
server.ip, current_stat, previous_stat))
if docs_total is not None:
self.assertTrue(current_stat['docsTotal'] == docs_total,
"DocTotal for %s is %s, but should be %s. Stat %s" % (
server.ip, current_stat['docsTotal'], docs_total, current_stat))
if docs_transf is not None:
self.assertTrue(current_stat['docsTransferred'] == docs_transf,
"docsTransferred for %s is %s, but should be %s. Stat %s" % (
server.ip, current_stat['docsTotal'], docs_transf, current_stat))
if active_vb is not None:
self.assertTrue(current_stat['activeVBucketsLeft'] == active_vb,
"docsTransferred for %s is %s, but should be %s. Stat %s" % (
server.ip, current_stat['activeVBucketsLeft'], active_vb, current_stat))
if replica_vb is not None:
self.assertTrue(current_stat['replicaVBucketsLeft'] == replica_vb,
"docsTransferred for %s is %s, but should be %s. Stat %s" % (
server.ip, current_stat['activeVBucketsLeft'], active_vb, current_stat))
self.log.info("Checked stat: %s" % new_stats)
示例15: do_warmup
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import diag_eval [as 别名]
def do_warmup(self):
howmany = self.num_of_docs
self.input = TestInputSingleton.input
self.servers = self.input.servers
self._insert_data(howmany)
RebalanceHelper.wait_for_stats_on_all(self.master, "default", "ep_queue_size", 0)
RebalanceHelper.wait_for_stats_on_all(self.master, "default", "ep_flusher_todo", 0)
time.sleep(5)
rest = RestConnection(self.master)
map = {}
# collect curr_items from all nodes
for server in self.servers:
mc_conn = MemcachedClientHelper.direct_client(server, "default")
map["{0}:{1}".format(server.ip, server.port)] = {}
map["{0}:{1}".format(server.ip, server.port)]["curr_items_tot"] = mc_conn.stats("")["curr_items_tot"]
map["{0}:{1}".format(server.ip, server.port)]["previous_uptime"] = mc_conn.stats("")["uptime"]
self.log.info(
"memcached {0}:{1} has {2} items".format(server.ip, server.port, mc_conn.stats("")["curr_items_tot"])
)
mc_conn.close()
# Killing Memcached
nodes = rest.node_statuses()
for node in nodes:
_node = {
"ip": node.ip,
"port": node.port,
"username": self.servers[0].rest_username,
"password": self.servers[0].rest_password,
}
_mc = MemcachedClientHelper.direct_client(_node, "default")
pid = _mc.stats()["pid"]
node_rest = RestConnection(_node)
command = 'os:cmd("kill -9 {0} ")'.format(pid)
self.log.info(command)
killed = node_rest.diag_eval(command)
self.log.info("killed ?? {0} ".format(killed))
_mc.close()
start = time.time()
memcached_restarted = False
for server in self.servers:
mc = None
while time.time() - start < 60:
try:
mc = MemcachedClientHelper.direct_client(server, "default")
stats = mc.stats()
new_uptime = int(stats["uptime"])
if new_uptime < map["{0}:{1}".format(server.ip, server.port)]["previous_uptime"]:
self.log.info("memcached restarted...")
memcached_restarted = True
break
except Exception:
self.log.error("unable to connect to {0}:{1}".format(server.ip, server.port))
if mc:
mc.close()
time.sleep(1)
if not memcached_restarted:
self.fail("memcached did not start {0}:{1}".format(server.ip, server.port))
for server in self.servers:
mc = MemcachedClientHelper.direct_client(server, "default")
expected_curr_items_tot = map["{0}:{1}".format(server.ip, server.port)]["curr_items_tot"]
now_items = 0
start = time.time()
if server == self.servers[0]:
wait_time = 600
else:
wait_time = 60
# Try to get the stats for 10 minutes, else hit out.
while time.time() - start < wait_time:
# Get the wamrup time for each server
try:
stats = mc.stats()
if stats is not None:
warmup_time = int(stats["ep_warmup_time"])
self.log.info("ep_warmup_time is %s " % warmup_time)
self.log.info(
"Collected the stats {0} for server {1}:{2}".format(
stats["ep_warmup_time"], server.ip, server.port
)
)
break
else:
self.log.info(" Did not get the stats from the server yet, trying again.....")
time.sleep(2)
except Exception as e:
self.log.error(
"Could not get warmup_time stats from server {0}:{1}, exception {2}".format(
server.ip, server.port, e
)
)
#.........这里部分代码省略.........