本文整理汇总了Python中membase.api.rest_client.RestConnection.print_UI_logs方法的典型用法代码示例。如果您正苦于以下问题:Python RestConnection.print_UI_logs方法的具体用法?Python RestConnection.print_UI_logs怎么用?Python RestConnection.print_UI_logs使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类membase.api.rest_client.RestConnection
的用法示例。
在下文中一共展示了RestConnection.print_UI_logs方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: wait_for_failover_or_assert
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import print_UI_logs [as 别名]
def wait_for_failover_or_assert(master, autofailover_count, timeout, testcase):
time_start = time.time()
time_max_end = time_start + timeout
failover_count = 0
while time.time() < time_max_end:
failover_count = AutoFailoverBaseTest.get_failover_count(master)
if failover_count == autofailover_count:
testcase.log.info("{0} nodes failed over as expected".format(failover_count))
testcase.log.info("expected failover in {0} seconds, actual time {1} seconds".format\
(timeout - AutoFailoverBaseTest.MAX_FAIL_DETECT_TIME, time.time() - time_start))
return
time.sleep(2)
rest = RestConnection(master)
rest.print_UI_logs()
testcase.log.warn("pools/default from {0} : {1}".format(master.ip, rest.cluster_status()))
testcase.fail("{0} nodes failed over, expected {1} in {2} seconds".
format(failover_count, autofailover_count, time.time() - time_start))
示例2: wait_for_warmup_or_assert
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import print_UI_logs [as 别名]
def wait_for_warmup_or_assert(master, warmup_count, timeout, testcase):
time_start = time.time()
time_max_end = time_start + timeout
bucket_name = testcase.rest.get_buckets()[0].name
while time.time() < time_max_end:
num_nodes_with_warmup = 0
for node in testcase.rest.get_bucket(bucket_name).nodes:
if node.status == 'warmup':
num_nodes_with_warmup += 1
if num_nodes_with_warmup == warmup_count:
testcase.log.info("{0} nodes warmup as expected".format(num_nodes_with_warmup))
testcase.log.info("expected warmup in {0} seconds, actual time {1} seconds".format \
(timeout - AutoReprovisionBaseTest.MAX_FAIL_DETECT_TIME,
time.time() - time_start))
return
time.sleep(2)
rest = RestConnection(master)
rest.print_UI_logs()
testcase.log.warn("pools/default from {0} : {1}".format(master.ip, rest.cluster_status()))
testcase.fail("{0} nodes warmup, expected {1} in {2} seconds".
format(num_nodes_with_warmup, warmup_count, time.time() - time_start))
示例3: FailoverTests
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import print_UI_logs [as 别名]
#.........这里部分代码省略.........
graceful_count = 0
graceful_failover = True
failed_over = True
for node in chosen:
unreachable = False
if failover_reason == 'stop_server':
unreachable=True
self.stop_server(node)
self.log.info("10 seconds delay to wait for membase-server to shutdown")
# wait for 5 minutes until node is down
self.assertTrue(RestHelper(self.rest).wait_for_node_status(node, "unhealthy", self.wait_timeout * 10),
msg="node status is not unhealthy even after waiting for 5 minutes")
elif failover_reason == "firewall":
unreachable=True
self.filter_list.append (node.ip)
server = [srv for srv in self.servers if node.ip == srv.ip][0]
RemoteUtilHelper.enable_firewall(server, bidirectional=self.bidirectional)
status = RestHelper(self.rest).wait_for_node_status(node, "unhealthy", self.wait_timeout * 10)
if status:
self.log.info("node {0}:{1} is 'unhealthy' as expected".format(node.ip, node.port))
else:
# verify iptables on the node if something wrong
for server in self.servers:
if server.ip == node.ip:
shell = RemoteMachineShellConnection(server)
info = shell.extract_remote_info()
if info.type.lower() == "windows":
o, r = shell.execute_command("netsh advfirewall show allprofiles")
shell.log_command_output(o, r)
else:
o, r = shell.execute_command("/sbin/iptables --list")
shell.log_command_output(o, r)
shell.disconnect()
self.rest.print_UI_logs()
api = self.rest.baseUrl + 'nodeStatuses'
status, content, header = self.rest._http_request(api)
json_parsed = json.loads(content)
self.log.info("nodeStatuses: {0}".format(json_parsed))
self.fail("node status is not unhealthy even after waiting for 5 minutes")
# verify the failover type
if self.check_verify_failover_type:
graceful_count, graceful_failover = self.verify_failover_type(node, graceful_count, self.num_replicas, unreachable)
# define precondition check for failover
success_failed_over = self.rest.fail_over(node.id, graceful=(self.graceful and graceful_failover))
if self.graceful and graceful_failover:
if self.stopGracefulFailover or self.killNodes or self.stopNodes or self.firewallOnNodes:
self.victim_node_operations(node)
# Start Graceful Again
self.log.info(" Start Graceful Failover Again !")
success_failed_over = self.rest.fail_over(node.id, graceful=(self.graceful and graceful_failover))
msg = "graceful failover failed for nodes {0}".format(node.id)
self.assertTrue(self.rest.monitorRebalance(stop_if_loop=True), msg=msg)
else:
msg = "rebalance failed while removing failover nodes {0}".format(node.id)
self.assertTrue(self.rest.monitorRebalance(stop_if_loop=True), msg=msg)
failed_over = failed_over and success_failed_over
# Check for negative cases
if self.graceful and (failover_reason in ['stop_server', 'firewall']):
if failed_over:
# MB-10479
self.rest.print_UI_logs()
self.assertFalse(failed_over, "Graceful Falover was started for unhealthy node!!! ")
return
elif self.gracefulFailoverFail and not failed_over:
""" Check if the fail_over fails as expected """
示例4: FailoverTests
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import print_UI_logs [as 别名]
#.........这里部分代码省略.........
self.log.info("failover_reason : {0}".format(failover_reason))
self.log.info("num_failed_nodes : {0}".format(self.num_failed_nodes))
self.log.info('picking server : {0} as the master'.format(self.referenceNode))
def run_failover_operations(self, chosen, failover_reason):
""" Method to run fail over operations used in the test scenario based on failover reason """
# Perform Operations relalted to failover
for node in chosen:
if failover_reason == 'stop_server':
self.stop_server(node)
self.log.info("10 seconds delay to wait for membase-server to shutdown")
# wait for 5 minutes until node is down
self.assertTrue(RestHelper(self.rest).wait_for_node_status(node, "unhealthy", 300),
msg="node status is not unhealthy even after waiting for 5 minutes")
elif failover_reason == "firewall":
server = [srv for srv in self.servers if node.ip == srv.ip][0]
RemoteUtilHelper.enable_firewall(server, bidirectional=self.bidirectional)
status = RestHelper(self.rest).wait_for_node_status(node, "unhealthy", 300)
if status:
self.log.info("node {0}:{1} is 'unhealthy' as expected".format(node.ip, node.port))
else:
# verify iptables on the node if something wrong
for server in self.servers:
if server.ip == node.ip:
shell = RemoteMachineShellConnection(server)
info = shell.extract_remote_info()
if info.type.lower() == "windows":
o, r = shell.execute_command("netsh advfirewall show allprofiles")
shell.log_command_output(o, r)
else:
o, r = shell.execute_command("/sbin/iptables --list")
shell.log_command_output(o, r)
shell.disconnect()
self.rest.print_UI_logs()
api = self.rest.baseUrl + 'nodeStatuses'
status, content, header = self.rest._http_request(api)
json_parsed = json.loads(content)
self.log.info("nodeStatuses: {0}".format(json_parsed))
self.fail("node status is not unhealthy even after waiting for 5 minutes")
# define precondition check for failover
failed_over = self.rest.fail_over(node.id, graceful=self.graceful)
# Check for negative cases
if self.graceful and (failover_reason in ['stop_server', 'firewall']):
if failed_over:
# MB-10479
self.rest.print_UI_logs()
self.assertFalse(failed_over, "Graceful Falover was started for unhealthy node!!! ")
return
elif self.gracefulFailoverFail and failed_over:
""" Check if the fail_over fails as expected """
self.assertTrue(not failed_over,""" Graceful failover should fail due to not enough replicas """)
return
# Check if failover happened as expected or re-try one more time
if not failed_over:
self.log.info("unable to failover the node the first time. try again in 60 seconds..")
# try again in 75 seconds
self.sleep(75)
failed_over = self.rest.fail_over(node.id, graceful=self.graceful)
if self.graceful and (failover_reason not in ['stop_server', 'firewall']):
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed for Graceful Failover, stuck or did not completed")
def run_operation_tasks(self):
示例5: _common_test_body_failed_swap_rebalance
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import print_UI_logs [as 别名]
def _common_test_body_failed_swap_rebalance(self):
master = self.servers[0]
rest = RestConnection(master)
num_initial_servers = self.num_initial_servers
creds = self.input.membase_settings
intial_severs = self.servers[:num_initial_servers]
self.log.info("CREATE BUCKET PHASE")
SwapRebalanceBase.create_buckets(self)
# Cluster all starting set of servers
self.log.info("INITIAL REBALANCE PHASE")
status, servers_rebalanced = RebalanceHelper.rebalance_in(intial_severs, len(intial_severs) - 1)
self.assertTrue(status, msg="Rebalance was failed")
self.log.info("DATA LOAD PHASE")
self.loaders = SwapRebalanceBase.start_load_phase(self, master)
# Wait till load phase is over
SwapRebalanceBase.stop_load(self.loaders, do_stop=False)
self.log.info("DONE LOAD PHASE")
# Start the swap rebalance
current_nodes = RebalanceHelper.getOtpNodeIds(master)
self.log.info("current nodes : {0}".format(current_nodes))
toBeEjectedNodes = RebalanceHelper.pick_nodes(master, howmany=self.num_swap)
optNodesIds = [node.id for node in toBeEjectedNodes]
if self.swap_orchestrator:
status, content = ClusterOperationHelper.find_orchestrator(master)
self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
format(status, content))
# When swapping all the nodes
if self.num_swap is len(current_nodes):
optNodesIds.append(content)
else:
optNodesIds[0] = content
for node in optNodesIds:
self.log.info("removing node {0} and rebalance afterwards".format(node))
new_swap_servers = self.servers[num_initial_servers:num_initial_servers + self.num_swap]
for server in new_swap_servers:
otpNode = rest.add_node(creds.rest_username, creds.rest_password, server.ip, server.port)
msg = "unable to add node {0} to the cluster"
self.assertTrue(otpNode, msg.format(server.ip))
if self.swap_orchestrator:
rest = RestConnection(new_swap_servers[0])
master = new_swap_servers[0]
self.log.info("DATA ACCESS PHASE")
self.loaders = SwapRebalanceBase.start_access_phase(self, master)
self.log.info("SWAP REBALANCE PHASE")
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()],
ejectedNodes=optNodesIds)
SwapRebalanceBase.sleep(self, 10, "Rebalance should start")
self.log.info("FAIL SWAP REBALANCE PHASE @ {0}".format(self.percentage_progress))
reached = RestHelper(rest).rebalance_reached(self.percentage_progress)
if reached and RestHelper(rest).is_cluster_rebalanced():
# handle situation when rebalance failed at the beginning
self.log.error('seems rebalance failed!')
rest.print_UI_logs()
self.fail("rebalance failed even before killing memcached")
bucket = rest.get_buckets()[0].name
pid = None
if self.swap_orchestrator and not self.cluster_run:
# get PID via remote connection if master is a new node
shell = RemoteMachineShellConnection(master)
o, _ = shell.execute_command("ps -eo comm,pid | awk '$1 == \"memcached\" { print $2 }'")
pid = o[0]
shell.disconnect()
else:
times = 2
if self.cluster_run:
times = 20
for i in xrange(times):
try:
_mc = MemcachedClientHelper.direct_client(master, bucket)
pid = _mc.stats()["pid"]
break
except EOFError as e:
self.log.error("{0}.Retry in 2 sec".format(e))
SwapRebalanceBase.sleep(self, 2)
if pid is None:
self.fail("impossible to get a PID")
command = "os:cmd(\"kill -9 {0} \")".format(pid)
self.log.info(command)
killed = rest.diag_eval(command)
self.log.info("killed {0}:{1}?? {2} ".format(master.ip, master.port, killed))
self.log.info("sleep for 10 sec after kill memcached")
SwapRebalanceBase.sleep(self, 10)
# we can't get stats for new node when rebalance falls
if not self.swap_orchestrator:
ClusterOperationHelper._wait_warmup_completed(self, [master], bucket, wait_time=600)
i = 0
# we expect that rebalance will be failed
try:
rest.monitorRebalance()
except RebalanceFailedException:
#.........这里部分代码省略.........
示例6: common_test_body
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import print_UI_logs [as 别名]
def common_test_body(self, keys_count, failover_reason):
log = logger.Logger.get_logger()
log.info("keys_count : {0}".format(keys_count))
log.info("replicas : {0}".format(self.num_replicas))
log.info("failover_reason : {0}".format(failover_reason))
log.info('picking server : {0} as the master'.format(self.master))
rest = RestConnection(self.master)
versions = rest.get_nodes_versions()
for version in versions:
if "3" > version and self.graceful:
log.error("Graceful failover can't be applied to nodes with version less then 3.*")
log.error("Please check configuration parameters: SKIPPING TEST.")
return
self._load_all_buckets(self.master, self.gen_create, "create", 0,
batch_size=10000, pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers)
_servers_ = copy.deepcopy(self.servers)
nodes = rest.node_statuses()
RebalanceHelper.wait_for_replication(self.servers, self.cluster)
chosen = RebalanceHelper.pick_nodes(self.master, howmany=self.num_replicas)
for node in chosen:
# let's do op
if failover_reason == 'stop_server':
self.stop_server(node)
log.info("10 seconds delay to wait for membase-server to shutdown")
# wait for 5 minutes until node is down
self.assertTrue(RestHelper(rest).wait_for_node_status(node, "unhealthy", 300),
msg="node status is not unhealthy even after waiting for 5 minutes")
elif failover_reason == "firewall":
server = [srv for srv in self.servers if node.ip == srv.ip][0]
RemoteUtilHelper.enable_firewall(server, bidirectional=self.bidirectional)
status = RestHelper(rest).wait_for_node_status(node, "unhealthy", 300)
if status:
log.info("node {0}:{1} is 'unhealthy' as expected".format(node.ip, node.port))
else:
# verify iptables on the node if something wrong
for server in self.servers:
if server.ip == node.ip:
shell = RemoteMachineShellConnection(server)
info = shell.extract_remote_info()
if info.type.lower() == "windows":
o, r = shell.execute_command("netsh advfirewall show allprofiles")
else:
o, r = shell.execute_command("/sbin/iptables --list")
shell.log_command_output(o, r)
shell.disconnect()
rest.print_UI_logs()
api = rest.baseUrl + 'nodeStatuses'
status, content, header = rest._http_request(api)
json_parsed = json.loads(content)
self.log.info("nodeStatuses: {0}".format(json_parsed))
self.fail("node status is not unhealthy even after waiting for 5 minutes")
failed_over = rest.fail_over(node.id, graceful=self.graceful)
if self.graceful and (failover_reason in ['stop_server', 'firewall']):
if failed_over:
# MB-10479
rest.print_UI_logs()
self.assertFalse(failed_over, "Graceful Falover was started for unhealthy node!!! ")
#perform general Failover
failed_over = rest.fail_over(node.id)
if not failed_over:
self.log.info("unable to failover the node the first time. try again in 60 seconds..")
# try again in 75 seconds
self.sleep(75)
failed_over = rest.fail_over(node.id, graceful=self.graceful)
if self.graceful and (failover_reason not in ['stop_server', 'firewall']):
reached = RestHelper(rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed for Graceful Failover, stuck or did not completed")
self.assertTrue(failed_over, "unable to failover node after {0}".format(failover_reason))
log.info("failed over node : {0}".format(node.id))
self._failed_nodes.append(node)
if self.add_back_flag:
for node in self._failed_nodes:
rest.add_back_node(node.id)
self.sleep(5)
self.sleep(10, "after failover before invoking rebalance...")
rest.rebalance(otpNodes=[node.id for node in nodes],
ejectedNodes=[])
msg = "rebalance failed while removing failover nodes {0}".format(chosen)
self.assertTrue(rest.monitorRebalance(stop_if_loop=True), msg=msg)
else:
# Need a delay > min because MB-7168
self.sleep(60, "after failover before invoking rebalance...")
rest.rebalance(otpNodes=[node.id for node in nodes],
ejectedNodes=[node.id for node in chosen])
if self.during_ops:
self.sleep(5, "Wait for some progress in rebalance")
if self.during_ops == "change_password":
old_pass = self.master.rest_password
self.change_password(new_password=self.input.param("new_password", "new_pass"))
rest = RestConnection(self.master)
elif self.during_ops == "change_port":
self.change_port(new_port=self.input.param("new_port", "9090"))
#.........这里部分代码省略.........