本文整理汇总了Python中remote.remote_util.RemoteMachineShellConnection.execute_cbcollect_info方法的典型用法代码示例。如果您正苦于以下问题:Python RemoteMachineShellConnection.execute_cbcollect_info方法的具体用法?Python RemoteMachineShellConnection.execute_cbcollect_info怎么用?Python RemoteMachineShellConnection.execute_cbcollect_info使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类remote.remote_util.RemoteMachineShellConnection
的用法示例。
在下文中一共展示了RemoteMachineShellConnection.execute_cbcollect_info方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run
# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import execute_cbcollect_info [as 别名]
def run(self):
remote_client = RemoteMachineShellConnection(self.server)
now = datetime.now()
day = now.day
month = now.month
year = now.year
file_name = "%s-%s%s%s-diag.zip" % (self.server.ip, month, day, year)
print "Collecting logs from %s\n" % (self.server.ip)
output, error = remote_client.execute_cbcollect_info(file_name)
print "\n".join(output)
print "\n".join(error)
user_path = "/home/"
if self.server.ssh_username == "root":
user_path = "/"
if not remote_client.file_exists("%s%s" % (user_path, self.server.ssh_username), file_name):
raise Exception("%s doesn't exists on server" % (file_name))
if remote_client.get_file("%s%s" % (user_path, self.server.ssh_username), file_name, "%s/%s" % (self.path, file_name)):
print "Downloading zipped logs from %s" % (self.server.ip)
else:
raise Exception("Fail to download zipped logs from %s" % (self.server.ip))
remote_client.disconnect()
示例2: unidirectional
# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import execute_cbcollect_info [as 别名]
#.........这里部分代码省略.........
def replication_with_firewall_enabled(self):
self.src_cluster.set_xdcr_param("xdcrFailureRestartInterval", 1)
self.setup_xdcr_and_load()
self.perform_update_delete()
NodeHelper.enable_firewall(self.dest_master)
self.sleep(30)
NodeHelper.disable_firewall(self.dest_master)
self.verify_results()
"""Testing Unidirectional append ( Loading only at source) Verifying whether XDCR replication is successful on
subsequent destination clusters. """
def test_append(self):
self.setup_xdcr_and_load()
self.verify_results()
loop_count = self._input.param("loop_count", 20)
for i in xrange(loop_count):
self.log.info("Append iteration # %s" % i)
gen_append = BlobGenerator('loadOne', 'loadOne', self._value_size, end=self._num_items)
self.src_cluster.load_all_buckets_from_generator(gen_append, ops=OPS.APPEND, batch_size=1)
self.sleep(self._wait_timeout)
self.verify_results()
'''
This method runs cbcollectinfo tool after setting up uniXDCR and check
whether the log generated by cbcollectinfo contains xdcr log file or not.
'''
def collectinfotest_for_xdcr(self):
self.load_with_ops()
self.node_down = self._input.param("node_down", False)
self.log_filename = self._input.param("file_name", "collectInfo")
self.shell = RemoteMachineShellConnection(self.src_master)
self.shell.execute_cbcollect_info("%s.zip" % (self.log_filename))
from clitest import collectinfotest
# HACK added self.buckets data member.
self.buckets = self.src_cluster.get_buckets()
collectinfotest.CollectinfoTests.verify_results(
self, self.log_filename
)
""" Verify the fix for MB-9548"""
def verify_replications_deleted_after_bucket_deletion(self):
self.setup_xdcr_and_load()
self.verify_results()
rest_conn = RestConnection(self.src_master)
replications = rest_conn.get_replications()
self.assertTrue(replications, "Number of replications should not be 0")
self.src_cluster.delete_all_buckets()
self.sleep(60)
replications = rest_conn.get_replications()
self.log.info("Replications : %s" % replications)
self.assertTrue(not replications, "Rest returns replication list even after source bucket is deleted ")
""" Verify fix for MB-9862"""
def test_verify_memcache_connections(self):
allowed_memcached_conn = self._input.param("allowed_connections", 100)
max_ops_per_second = self._input.param("max_ops_per_second", 2500)
min_item_size = self._input.param("min_item_size", 128)
num_docs = self._input.param("num_docs", 30000)
# start load, max_ops_per_second is the combined limit for all buckets
mcsodaLoad = LoadWithMcsoda(self.src_master, num_docs, prefix='')
mcsodaLoad.cfg["max-ops"] = 0
mcsodaLoad.cfg["max-ops-per-sec"] = max_ops_per_second
mcsodaLoad.cfg["exit-after-creates"] = 1
mcsodaLoad.cfg["min-value-size"] = min_item_size
示例3: bidirectional
# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import execute_cbcollect_info [as 别名]
#.........这里部分代码省略.........
self.verify_results()
def replication_while_rebooting_a_non_master_src_dest_node(self):
bucket_type = self._input.param("bucket_type", "membase")
if bucket_type == "ephemeral":
self.log.info("Test case does not apply to ephemeral")
return
self.setup_xdcr_and_load()
self.async_perform_update_delete()
self.sleep(self._wait_timeout)
reboot_node_dest = self.dest_cluster.reboot_one_node(self)
NodeHelper.wait_node_restarted(reboot_node_dest, self, wait_time=self._wait_timeout * 4, wait_if_warmup=True)
reboot_node_src = self.src_cluster.reboot_one_node(self)
NodeHelper.wait_node_restarted(reboot_node_src, self, wait_time=self._wait_timeout * 4, wait_if_warmup=True)
self.sleep(120)
ClusterOperationHelper.wait_for_ns_servers_or_assert([reboot_node_dest], self, wait_if_warmup=True)
ClusterOperationHelper.wait_for_ns_servers_or_assert([reboot_node_src], self, wait_if_warmup=True)
self.verify_results()
def test_disk_full(self):
self.setup_xdcr_and_load()
self.verify_results()
self.sleep(self._wait_timeout)
zip_file = "%s.zip" % (self._input.param("file_name", "collectInfo"))
try:
for node in [self.src_master, self.dest_master]:
self.shell = RemoteMachineShellConnection(node)
self.shell.execute_cbcollect_info(zip_file)
if self.shell.extract_remote_info().type.lower() != "windows":
command = "unzip %s" % (zip_file)
output, error = self.shell.execute_command(command)
self.shell.log_command_output(output, error)
if len(error) > 0:
raise Exception("unable to unzip the files. Check unzip command output for help")
cmd = 'grep -R "Approaching full disk warning." cbcollect_info*/'
output, _ = self.shell.execute_command(cmd)
else:
cmd = "curl -0 http://{1}:{2}@{0}:8091/diag 2>/dev/null | grep 'Approaching full disk warning.'".format(
self.src_master.ip,
self.src_master.rest_username,
self.src_master.rest_password)
output, _ = self.shell.execute_command(cmd)
self.assertNotEquals(len(output), 0, "Full disk warning not generated as expected in %s" % node.ip)
self.log.info("Full disk warning generated as expected in %s" % node.ip)
self.shell.delete_files(zip_file)
self.shell.delete_files("cbcollect_info*")
except Exception as e:
self.log.info(e)
def test_rollback(self):
bucket = self.src_cluster.get_buckets()[0]
src_nodes = self.src_cluster.get_nodes()
dest_nodes = self.dest_cluster.get_nodes()
nodes = src_nodes + dest_nodes
# Stop Persistence on Node A & Node B
for node in nodes:
mem_client = MemcachedClientHelper.direct_client(node, bucket)
mem_client.stop_persistence()
示例4: unidirectional
# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import execute_cbcollect_info [as 别名]
#.........这里部分代码省略.........
def replication_with_firewall_enabled(self):
self.src_cluster.set_xdcr_param("xdcrFailureRestartInterval", 1)
self.setup_xdcr_and_load()
self.perform_update_delete()
NodeHelper.enable_firewall(self.dest_master)
self.sleep(30)
NodeHelper.disable_firewall(self.dest_master)
self.verify_results()
"""Testing Unidirectional append ( Loading only at source) Verifying whether XDCR replication is successful on
subsequent destination clusters. """
def test_append(self):
self.setup_xdcr_and_load()
self.verify_results()
loop_count = self._input.param("loop_count", 20)
for i in xrange(loop_count):
self.log.info("Append iteration # %s" % i)
gen_append = BlobGenerator('loadOne', 'loadOne', self._value_size, end=self._num_items)
self.src_cluster.load_all_buckets_from_generator(gen_append, ops=OPS.APPEND, batch_size=1)
self.sleep(self._wait_timeout)
self.verify_results()
'''
This method runs cbcollectinfo tool after setting up uniXDCR and check
whether the log generated by cbcollectinfo contains xdcr log file or not.
'''
def collectinfotest_for_xdcr(self):
self.load_with_ops()
self.node_down = self._input.param("node_down", False)
self.log_filename = self._input.param("file_name", "collectInfo")
self.shell = RemoteMachineShellConnection(self.src_master)
self.shell.execute_cbcollect_info("%s.zip" % (self.log_filename))
from clitest import collectinfotest
# HACK added self.buckets data member.
self.buckets = self.src_cluster.get_buckets()
collectinfotest.CollectinfoTests.verify_results(
self, self.log_filename
)
""" Verify the fix for MB-9548"""
def test_verify_replications_stream_delete(self):
self.setup_xdcr_and_load()
self.verify_results()
rest_conn = RestConnection(self.src_master)
replications = rest_conn.get_replications()
self.assertTrue(replications, "Number of replication streams should not be 0")
self.src_cluster.delete_all_buckets()
replications = rest_conn.get_replications()
self.assertTrue(not replications, "No replication streams should exists after deleting the buckets")
""" Verify fix for MB-9862"""
def test_verify_memcache_connections(self):
allowed_memcached_conn = self._input.param("allowed_connections", 100)
max_ops_per_second = self._input.param("max_ops_per_second", 2500)
min_item_size = self._input.param("min_item_size", 128)
num_docs = self._input.param("num_docs", 30000)
# start load, max_ops_per_second is the combined limit for all buckets
mcsodaLoad = LoadWithMcsoda(self.src_master, num_docs, prefix='')
mcsodaLoad.cfg["max-ops"] = 0
mcsodaLoad.cfg["max-ops-per-sec"] = max_ops_per_second
mcsodaLoad.cfg["exit-after-creates"] = 1
mcsodaLoad.cfg["min-value-size"] = min_item_size
mcsodaLoad.cfg["json"] = 0
示例5: bidirectional
# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import execute_cbcollect_info [as 别名]
#.........这里部分代码省略.........
finally:
# For timeout error, all tasks to be cancelled
# Before proceeding to next test
for task in tasks:
task.cancel()
"""Replication with disabled/enabled ddoc compaction on both clusters.
This test begins by loading a given number of items on both clusters.
Then we disabled or enabled compaction on both clusters( set via params).
Then we mutate and delete data on clusters 3 times. After deletion we recreate
deleted items. When data was changed 3 times we perform
a full verification: wait for the disk queues to drain
and then verify that there has been no data loss on both clusters."""
def replication_with_disabled_ddoc_compaction(self):
self.setup_xdcr()
self.src_cluster.load_all_buckets(self._num_items)
self.dest_cluster.load_all_buckets(self._num_items)
if "C1" in self._disable_compaction:
self.src_cluster.disable_compaction()
if "C2" in self._disable_compaction:
self.dest_cluster.disable_compaction()
# perform doc's ops 3 times to increase rev number
for _ in range(3):
self.async_perform_update_delete()
# wait till deletes have been sent to recreate
self.sleep(60)
# restore(re-creating) deleted items
if 'C1' in self._del_clusters:
c1_kv_gen = self.src_cluster.get_kv_gen()
c1_gen_delete = copy.deepcopy(c1_kv_gen[OPS.DELETE])
if self._expires:
# if expiration set, recreate those keys before
# trying to update
c1_gen_update = copy.deepcopy(c1_kv_gen[OPS.UPDATE])
self.src_cluster.load_all_buckets_from_generator(kv_gen=c1_gen_update)
self.src_cluster.load_all_buckets_from_generator(kv_gen=c1_gen_delete)
if 'C2' in self._del_clusters:
c2_kv_gen = self.dest_cluster.get_kv_gen()
c2_gen_delete = copy.deepcopy(c2_kv_gen[OPS.DELETE])
if self._expires:
c2_gen_update = copy.deepcopy(c2_kv_gen[OPS.UPDATE])
self.dest_cluster.load_all_buckets_from_generator(kv_gen=c2_gen_update)
self.dest_cluster.load_all_buckets_from_generator(kv_gen=c2_gen_delete)
# wait till we recreate deleted keys before we can delete/update
self.sleep(60)
self.verify_results()
def replication_while_rebooting_a_non_master_src_dest_node(self):
self.setup_xdcr_and_load()
self.async_perform_update_delete()
self.sleep(self._wait_timeout)
reboot_node_dest = self.dest_cluster.reboot_one_node(self)
NodeHelper.wait_node_restarted(reboot_node_dest, self, wait_time=self._wait_timeout * 4, wait_if_warmup=True)
reboot_node_src = self.src_cluster.reboot_one_node(self)
NodeHelper.wait_node_restarted(reboot_node_src, self, wait_time=self._wait_timeout * 4, wait_if_warmup=True)
self.sleep(120)
ClusterOperationHelper.wait_for_ns_servers_or_assert([reboot_node_dest], self, wait_if_warmup=True)
ClusterOperationHelper.wait_for_ns_servers_or_assert([reboot_node_src], self, wait_if_warmup=True)
self.verify_results()
def test_disk_full(self):
self.setup_xdcr_and_load()
self.verify_results()
self.sleep(self._wait_timeout)
zip_file = "%s.zip" % (self._input.param("file_name", "collectInfo"))
try:
for node in [self.src_master, self.dest_master]:
self.shell = RemoteMachineShellConnection(node)
self.shell.execute_cbcollect_info(zip_file)
if self.shell.extract_remote_info().type.lower() != "windows":
command = "unzip %s" % (zip_file)
output, error = self.shell.execute_command(command)
self.shell.log_command_output(output, error)
if len(error) > 0:
raise Exception("unable to unzip the files. Check unzip command output for help")
cmd = 'grep -R "Approaching full disk warning." cbcollect_info*/'
output, _ = self.shell.execute_command(cmd)
else:
cmd = "curl -0 http://{1}:{2}@{0}:8091/diag 2>/dev/null | grep 'Approaching full disk warning.'".format(
self.src_master.ip,
self.src_master.rest_username,
self.src_master.rest_password)
output, _ = self.shell.execute_command(cmd)
self.assertNotEquals(len(output), 0, "Full disk warning not generated as expected in %s" % node.ip)
self.log.info("Full disk warning generated as expected in %s" % node.ip)
self.shell.delete_files(zip_file)
self.shell.delete_files("cbcollect_info*")
except Exception as e:
self.log.info(e)