本文整理汇总了Python中membase.api.rest_client.RestConnection.get_logs方法的典型用法代码示例。如果您正苦于以下问题:Python RestConnection.get_logs方法的具体用法?Python RestConnection.get_logs怎么用?Python RestConnection.get_logs使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类membase.api.rest_client.RestConnection
的用法示例。
在下文中一共展示了RestConnection.get_logs方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: wait_for_failover_or_assert
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_logs [as 别名]
def wait_for_failover_or_assert(master, autofailover_count, timeout, testcase):
time_start = time.time()
time_max_end = time_start + timeout
failover_count = 0
while time.time() < time_max_end:
failover_count = AutoFailoverBaseTest.get_failover_count(master)
if failover_count == autofailover_count:
testcase.log.info("{0} nodes failed over as expected".format(failover_count))
testcase.log.info("expected failover in {0} seconds, actual time {1} seconds".format\
(timeout - AutoFailoverBaseTest.MAX_FAIL_DETECT_TIME, time.time() - time_start))
return
time.sleep(2)
rest = RestConnection(master)
testcase.log.info("Latest logs from UI:")
for i in rest.get_logs(): testcase.log.error(i)
testcase.log.warn("pools/default from {0} : {1}".format(master.ip, rest.cluster_status()))
testcase.fail("{0} nodes failed over, expected {1} in {2} seconds".
format(failover_count, autofailover_count, time.time() - time_start))
示例2: rebalance_in_with_cluster_password_change
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_logs [as 别名]
def rebalance_in_with_cluster_password_change(self):
new_password = self.input.param("new_password", "new_pass")
servs_result = self.servers[:self.nodes_init + self.nodes_in]
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
self.servers[self.nodes_init:self.nodes_init + self.nodes_in],
[])
old_pass = self.master.rest_password
self.sleep(10, "Wait for rebalance have some progress")
self.change_password(new_password=new_password)
try:
rebalance.result()
self.log.exception("rebalance should be failed when password is changing")
except Exception as ex:
self.sleep(10, "wait for rebalance failed")
rest = RestConnection(self.master)
self.log.info("Latest logs from UI:")
for i in rest.get_logs(): self.log.error(i)
self.assertFalse(RestHelper(rest).is_cluster_rebalanced())
finally:
self.change_password(new_password=old_pass)
示例3: _common_test_body_failed_swap_rebalance
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_logs [as 别名]
def _common_test_body_failed_swap_rebalance(self):
master = self.servers[0]
rest = RestConnection(master)
num_initial_servers = self.num_initial_servers
creds = self.input.membase_settings
intial_severs = self.servers[:num_initial_servers]
self.log.info("CREATE BUCKET PHASE")
SwapRebalanceBase.create_buckets(self)
# Cluster all starting set of servers
self.log.info("INITIAL REBALANCE PHASE")
status, servers_rebalanced = RebalanceHelper.rebalance_in(intial_severs, len(intial_severs) - 1)
self.assertTrue(status, msg="Rebalance was failed")
self.log.info("DATA LOAD PHASE")
self.loaders = SwapRebalanceBase.start_load_phase(self, master)
# Wait till load phase is over
SwapRebalanceBase.stop_load(self.loaders, do_stop=False)
self.log.info("DONE LOAD PHASE")
# Start the swap rebalance
current_nodes = RebalanceHelper.getOtpNodeIds(master)
self.log.info("current nodes : {0}".format(current_nodes))
toBeEjectedNodes = RebalanceHelper.pick_nodes(master, howmany=self.num_swap)
optNodesIds = [node.id for node in toBeEjectedNodes]
if self.swap_orchestrator:
status, content = ClusterOperationHelper.find_orchestrator(master)
self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
format(status, content))
# When swapping all the nodes
if self.num_swap is len(current_nodes):
optNodesIds.append(content)
else:
optNodesIds[0] = content
for node in optNodesIds:
self.log.info("removing node {0} and rebalance afterwards".format(node))
new_swap_servers = self.servers[num_initial_servers:num_initial_servers + self.num_swap]
for server in new_swap_servers:
otpNode = rest.add_node(creds.rest_username, creds.rest_password, server.ip)
msg = "unable to add node {0} to the cluster"
self.assertTrue(otpNode, msg.format(server.ip))
if self.swap_orchestrator:
rest = RestConnection(new_swap_servers[0])
master = new_swap_servers[0]
self.log.info("DATA ACCESS PHASE")
self.loaders = SwapRebalanceBase.start_access_phase(self, master)
self.log.info("SWAP REBALANCE PHASE")
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()],
ejectedNodes=optNodesIds)
SwapRebalanceBase.sleep(self, 10, "Rebalance should start")
self.log.info("FAIL SWAP REBALANCE PHASE @ {0}".format(self.percentage_progress))
reached = RestHelper(rest).rebalance_reached(self.percentage_progress)
if reached == 100 and not RestHelper(rest).is_cluster_rebalanced():
# handle situation when rebalance failed at the beginning
self.log.error('seems rebalance failed!')
self.log.info("Latest logs from UI:")
for i in rest.get_logs(): self.log.error(i)
self.fail("rebalance failed even before killing memcached")
bucket = rest.get_buckets()[0].name
pid = None
if self.swap_orchestrator:
# get PID via remote connection if master is a new node
shell = RemoteMachineShellConnection(master)
o, _ = shell.execute_command("ps -eo comm,pid | awk '$1 == \"memcached\" { print $2 }'")
pid = o[0]
shell.disconnect()
else:
for i in xrange(2):
try:
_mc = MemcachedClientHelper.direct_client(master, bucket)
pid = _mc.stats()["pid"]
break
except EOFError as e:
self.log.error("{0}.Retry in 2 sec".format(e))
SwapRebalanceBase.sleep(self, 1)
if pid is None:
self.fail("impossible to get a PID")
command = "os:cmd(\"kill -9 {0} \")".format(pid)
self.log.info(command)
killed = rest.diag_eval(command)
self.log.info("killed {0}:{1}?? {2} ".format(master.ip, master.port, killed))
self.log.info("sleep for 10 sec after kill memcached")
SwapRebalanceBase.sleep(self, 10)
# we can't get stats for new node when rebalance falls
if not self.swap_orchestrator:
ClusterOperationHelper._wait_warmup_completed(self, [master], bucket, wait_time=600)
i = 0
# we expect that rebalance will be failed
try:
rest.monitorRebalance()
except RebalanceFailedException:
# retry rebalance if it failed
self.log.warn("Rebalance failed but it's expected")
#.........这里部分代码省略.........
示例4: common_test_body
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_logs [as 别名]
def common_test_body(self, keys_count, failover_reason):
log = logger.Logger.get_logger()
log.info("keys_count : {0}".format(keys_count))
log.info("replicas : {0}".format(self.num_replicas))
log.info("failover_reason : {0}".format(failover_reason))
log.info('picking server : {0} as the master'.format(self.master))
self._load_all_buckets(self.master, self.gen_create, "create", 0,
batch_size=10000, pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers)
_servers_ = self.servers
rest = RestConnection(self.master)
nodes = rest.node_statuses()
RebalanceHelper.wait_for_replication(self.servers, self.cluster)
chosen = RebalanceHelper.pick_nodes(self.master, howmany=self.num_replicas)
for node in chosen:
# let's do op
if failover_reason == 'stop_server':
self.stop_server(node)
log.info("10 seconds delay to wait for membase-server to shutdown")
# wait for 5 minutes until node is down
self.assertTrue(RestHelper(rest).wait_for_node_status(node, "unhealthy", 300),
msg="node status is not unhealthy even after waiting for 5 minutes")
elif failover_reason == "firewall":
server = [srv for srv in self.servers if node.ip == srv.ip][0]
RemoteUtilHelper.enable_firewall(server, bidirectional=self.bidirectional)
status = RestHelper(rest).wait_for_node_status(node, "unhealthy", 300)
if status:
log.info("node {0}:{1} is 'unhealthy' as expected".format(node.ip, node.port))
else:
# verify iptables on the node if something wrong
for server in self.servers:
if server.ip == node.ip:
shell = RemoteMachineShellConnection(server)
info = shell.extract_remote_info()
if info.type.lower() == "windows":
o, r = shell.execute_command("netsh advfirewall show allprofiles")
else:
o, r = shell.execute_command("/sbin/iptables --list")
shell.log_command_output(o, r)
shell.disconnect()
for i in rest.get_logs(): self.log.error(i)
api = rest.baseUrl + 'nodeStatuses'
status, content, header = rest._http_request(api)
json_parsed = json.loads(content)
self.log.info("nodeStatuses: {0}".format(json_parsed))
self.fail("node status is not unhealthy even after waiting for 5 minutes")
failed_over = rest.fail_over(node.id)
if not failed_over:
self.log.info("unable to failover the node the first time. try again in 60 seconds..")
# try again in 75 seconds
time.sleep(75)
failed_over = rest.fail_over(node.id)
self.assertTrue(failed_over, "unable to failover node after {0}".format(failover_reason))
log.info("failed over node : {0}".format(node.id))
self._failed_nodes.append(node)
if self.add_back_flag:
for node in self._failed_nodes:
rest.add_back_node(node.id)
time.sleep(5)
log.info("10 seconds sleep after failover before invoking rebalance...")
time.sleep(10)
rest.rebalance(otpNodes=[node.id for node in nodes],
ejectedNodes=[])
msg = "rebalance failed while removing failover nodes {0}".format(chosen)
self.assertTrue(rest.monitorRebalance(stop_if_loop=True), msg=msg)
else:
# Need a delay > min because MB-7168
log.info("60 seconds sleep after failover before invoking rebalance...")
time.sleep(60)
rest.rebalance(otpNodes=[node.id for node in nodes],
ejectedNodes=[node.id for node in chosen])
if self.during_ops:
self.sleep(5, "Wait for some progress in rebalance")
if self.during_ops == "change_password":
old_pass = self.master.rest_password
self.change_password(new_password=self.input.param("new_password", "new_pass"))
rest = RestConnection(self.master)
elif self.during_ops == "change_port":
self.change_port(new_port=self.input.param("new_port", "9090"))
rest = RestConnection(self.master)
try:
msg = "rebalance failed while removing failover nodes {0}".format(chosen)
self.assertTrue(rest.monitorRebalance(stop_if_loop=True), msg=msg)
for failed in chosen:
for server in _servers_:
if server.ip == failed.ip:
_servers_.remove(server)
self._cleanup_nodes.append(server)
log.info("Begin VERIFICATION ...")
RebalanceHelper.wait_for_replication(_servers_, self.cluster)
self.verify_cluster_stats(_servers_, self.master)
finally:
if self.during_ops:
if self.during_ops == "change_password":
#.........这里部分代码省略.........
示例5: AutoReprovisionTests
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_logs [as 别名]
#.........这里部分代码省略.........
self)
if operation != 'restart':
RemoteUtilHelper.common_basic_setup([self.servers[i]])
AutoReprovisionBaseTest.wait_for_failover_or_assert(self.master, 0,
timeout + AutoReprovisionBaseTest.MAX_FAIL_DETECT_TIME,
self)
helper = RestHelper(RestConnection(self.master))
self.assertTrue(helper.is_cluster_healthy(), "cluster status is not healthy")
self.sleep(40)
if operation == 'memcached_failure' or operation == 'failover':
self.assertTrue(helper.is_cluster_rebalanced(), "cluster is not balanced")
else:
if 'kv' in self.servers[i].services and self.replicas > 0:
self.assertFalse(helper.is_cluster_rebalanced(), "cluster is balanced")
self.rest.rebalance(otpNodes=[node.id for node in self.rest.node_statuses()], ejectedNodes=[])
self.assertTrue(self.rest.monitorRebalance())
else:
self.assertTrue(helper.is_cluster_rebalanced(), "cluster is not balanced")
buckets = self.rest.get_buckets()
if self.replicas == 0 and (operation == 'restart' or operation == 'reboot'):
data_lost = True
for bucket in buckets:
if not data_lost:
self.verify_loaded_data(self.master, bucket.name, self.loaded_items[bucket.name])
def test_ui_logs(self):
timeout = self.timeout / 2
server_fail1 = self.servers[1]
server_fail2 = self.servers[2]
status = self.rest.update_autoreprovision_settings(True, 2)
if not status:
self.fail('failed to change autoreprovision_settings!')
self.sleep(5)
logs = self.rest.get_logs(5)
self.assertTrue(u'Enabled auto-reprovision config with max_nodes set to 2' in [l['text'] for l in logs])
self.log.info("stopping the first server")
self._stop_couchbase(server_fail1)
AutoReprovisionBaseTest.wait_for_failover_or_assert(self.master, 1,
timeout + AutoReprovisionBaseTest.MAX_FAIL_DETECT_TIME,
self)
self.log.info("resetting the autoreprovision count")
if not self.rest.reset_autoreprovision():
self.fail('failed to reset autoreprovision count!')
logs = self.rest.get_logs(5)
self.assertTrue(u'auto-reprovision count reset from 0' in [l['text'] for l in logs])
self.log.info("stopping the second server")
self._stop_couchbase(server_fail2)
AutoReprovisionBaseTest.wait_for_failover_or_assert(self.master, 2,
timeout + AutoReprovisionBaseTest.MAX_FAIL_DETECT_TIME,
self)
settings = self.rest.get_autoreprovision_settings()
self.assertEquals(settings.enabled, True)
self.assertEquals(settings.max_nodes, 2)
self.assertEquals(settings.count, 0)
self._start_couchbase(server_fail2)
self._start_couchbase(server_fail1)
self.sleep(30)
settings = self.rest.get_autoreprovision_settings()
self.assertEquals(settings.enabled, True)
self.assertEquals(settings.max_nodes, 2)
self.assertEquals(settings.count, 2)
logs = self.rest.get_logs(5)
self.assertTrue(u'auto-reprovision is disabled as maximum number of nodes (2) '