本文整理汇总了Python中remote.remote_util.RemoteMachineShellConnection.is_couchbase_installed方法的典型用法代码示例。如果您正苦于以下问题:Python RemoteMachineShellConnection.is_couchbase_installed方法的具体用法?Python RemoteMachineShellConnection.is_couchbase_installed怎么用?Python RemoteMachineShellConnection.is_couchbase_installed使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类remote.remote_util.RemoteMachineShellConnection
的用法示例。
在下文中一共展示了RemoteMachineShellConnection.is_couchbase_installed方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: change_erlang_threads_values
# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import is_couchbase_installed [as 别名]
def change_erlang_threads_values(servers, sync_threads=True, num_threads="16:16"):
"""Change the the type of sync erlang threads and its value
sync_threads=True means sync threads +S with default threads number equal 16:16
sync_threads=False means async threads: +A 16, for instance
Default: +S 16:16
"""
log = logger.Logger.get_logger()
for server in servers:
sh = RemoteMachineShellConnection(server)
product = "membase"
if sh.is_couchbase_installed():
product = "couchbase"
sync_type = sync_threads and "S" or "A"
command = "sed -i 's/+[A,S] .*/+%s %s \\\/g' /opt/%s/bin/%s-server" % (
sync_type,
num_threads,
product,
product,
)
o, r = sh.execute_command(command)
sh.log_command_output(o, r)
msg = "modified erlang +%s to %s for server %s"
log.info(msg % (sync_type, num_threads, server.ip))
示例2: stop_cluster
# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import is_couchbase_installed [as 别名]
def stop_cluster(servers):
for server in servers:
shell = RemoteMachineShellConnection(server)
if shell.is_couchbase_installed():
shell.stop_couchbase()
else:
shell.stop_membase()
示例3: _stop_server
# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import is_couchbase_installed [as 别名]
def _stop_server(self, node):
for server in self.servers:
if server.ip == node.ip and server.port == str(node.port):
shell = RemoteMachineShellConnection(server)
if shell.is_couchbase_installed():
shell.stop_couchbase()
self.log.info("Couchbase stopped")
else:
shell.stop_membase()
self.log.info("Membase stopped")
shell.disconnect()
return
示例4: run
# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import is_couchbase_installed [as 别名]
def run(self):
remote = RemoteMachineShellConnection(self.server)
server_type = 'membase'
if remote.is_couchbase_installed():
server_type = 'couchbase'
stamp = time.strftime("%d_%m_%Y_%H_%M")
try:
info = remote.extract_remote_info()
if info.type.lower() != 'windows':
core_files = []
print "looking for crashes on {0} ... ".format(info.ip)
print "erl_crash files under /opt/{0}/var/lib/{0}/".format(server_type)
core_files.extend(remote.file_starts_with("/opt/{0}/var/lib/{0}/".format(server_type), "erl_crash"))
print "core* files under /opt/{0}/var/lib/{0}/".format(server_type)
core_files.extend(remote.file_starts_with("/opt/{0}/var/lib/{0}/".format(server_type), "core"))
print "core* files under /tmp/"
core_files.extend(remote.file_starts_with("/tmp/", "core"))
if core_files:
print "found crashes on {0}: {1}".format(info.ip, core_files)
else:
print "crashes not found on {0}".format(info.ip)
i = 0
for core_file in core_files:
if core_file.find('erl_crash.dump') != -1:
#let's just copy that file back
erl_crash_file_name = "erlang-{0}-{1}.log".format(self.server.ip, i)
remote_path, file_name = os.path.dirname(core_file), os.path.basename(core_file)
if remote.get_file(remote_path, file_name, os.path.join(self.path, erl_crash_file_name)):
print 'downloaded core file : {0}'.format(core_file)
i += 1
else:
command = "/opt/{0}/bin/tools/cbanalyze-core".format(server_type)
core_file_name = "core-{0}-{1}.log".format(self.server.ip, i)
core_log_output = "/tmp/{0}".format(core_file_name)
output, _ = remote.execute_command('{0} {1} -f {2}'.format(command, core_file, core_log_output))
print output
remote_path, file_name = os.path.dirname(core_log_output), os.path.basename(core_log_output)
if remote.get_file(remote_path, file_name, os.path.join(self.path, core_file_name)):
print 'downloaded core file : {0}'.format(core_log_output)
i += 1
if i > 0:
command = "mkdir -p /tmp/backup_crash/{0};mv -f /tmp/core* /tmp/backup_crash/{0}; mv -f /opt/{0}/var/lib/{1}/erl_crash.dump* /tmp/backup_crash/{0}".\
format(stamp, server_type)
print "put all crashes on {0} in backup folder: /tmp/backup_crash/{1}".format(self.server.ip, stamp)
remote.execute_command(command)
output, error = remote.execute_command("ls -la /tmp/backup_crash/{0}".format(stamp))
for o in output:
print o
remote.disconnect()
if remote:
remote.disconnect()
except Exception as ex:
print ex
示例5: stop_server
# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import is_couchbase_installed [as 别名]
def stop_server(self, node):
""" Method to stop a server which is subject to failover """
for server in self.servers:
if server.ip == node.ip:
shell = RemoteMachineShellConnection(server)
if shell.is_couchbase_installed():
shell.stop_couchbase()
self.log.info("Couchbase stopped")
else:
shell.stop_membase()
self.log.info("Membase stopped")
shell.disconnect()
break
示例6: stop_server
# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import is_couchbase_installed [as 别名]
def stop_server(self, node):
log = logger.Logger.get_logger()
for server in self.servers:
if server.ip == node.ip:
shell = RemoteMachineShellConnection(server)
if shell.is_couchbase_installed():
shell.stop_couchbase()
log.info("Couchbase stopped")
else:
shell.stop_membase()
log.info("Membase stopped")
shell.disconnect()
break
示例7: change_erlang_async
# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import is_couchbase_installed [as 别名]
def change_erlang_async(servers, value=16):
"""Change the number of async internal erlang threads
Identified with +A param
Default: 16
"""
log = logger.Logger.get_logger()
for server in servers:
sh = RemoteMachineShellConnection(server)
product = "membase"
if sh.is_couchbase_installed():
product = "couchbase"
command = "sed -i 's/+A .*/+A %s \\\/g' /opt/%s/bin/%s-server" % (value, product, product)
o, r = sh.execute_command(command)
sh.log_command_output(o, r)
msg = "modified erlang +A to %s for server %s"
log.info(msg % (value, server.ip))
示例8: set_erlang_schedulers
# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import is_couchbase_installed [as 别名]
def set_erlang_schedulers(servers, value="16:16"):
"""
Set num of erlang schedulers.
Also erase async option (+A)
"""
ClusterOperationHelper.stop_cluster(servers)
log = logger.Logger.get_logger()
for server in servers:
sh = RemoteMachineShellConnection(server)
product = "membase"
if sh.is_couchbase_installed():
product = "couchbase"
command = "sed -i 's/S\+ 128:128/S %s/' /opt/%s/bin/%s-server" % (value, product, product)
o, r = sh.execute_command(command)
sh.log_command_output(o, r)
log.info("modified erlang +A to %s for server %s" % (value, server.ip))
ClusterOperationHelper.start_cluster(servers)
示例9: change_erlang_gc
# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import is_couchbase_installed [as 别名]
def change_erlang_gc(servers, value=None):
"""Change the frequency of erlang_gc process
export ERL_FULLSWEEP_AFTER=0 (most aggressive)
Default: None
"""
log = logger.Logger.get_logger()
if value is None:
return
for server in servers:
sh = RemoteMachineShellConnection(server)
product = "membase"
if sh.is_couchbase_installed():
product = "couchbase"
command = "sed -i '/exec erl/i export ERL_FULLSWEEP_AFTER=%s' /opt/%s/bin/%s-server" %\
(value, product, product)
o, r = sh.execute_command(command)
sh.log_command_output(o, r)
msg = "modified erlang gc to full_sweep_after %s on %s " % (value, server.ip)
log.info(msg)
示例10: run
# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import is_couchbase_installed [as 别名]
def run(self):
remote = RemoteMachineShellConnection(self.server)
server_type = 'membase'
if remote.is_couchbase_installed():
server_type = 'couchbase'
stamp = time.strftime("%d_%m_%Y_%H_%M")
try:
info = remote.extract_remote_info()
if info.type.lower() != 'windows':
core_files = []
print "looking for Erlang/Memcached crashes on {0} ... ".format(info.ip)
core_files.extend(remote.file_starts_with("/opt/{0}/var/lib/{0}/".format(server_type), "erl_crash"))
core_files.extend(remote.file_starts_with("/opt/{0}/var/lib/{0}/".format(server_type), "core"))
core_files.extend(remote.file_starts_with("/tmp/", "core"))
core_files.extend(remote.file_ends_with("/opt/{0}/var/lib/{0}/crash".format(server_type), ".dmp"))
if core_files:
print "found dumps on {0}: {1}".format(info.ip, core_files)
command = "mkdir -p /tmp/backup_crash/{0};" \
"mv -f /tmp/core* /tmp/backup_crash/{0};" \
"mv -f /opt/{1}/var/lib/{1}/erl_crash.dump* /tmp/backup_crash/{0}; " \
"mv -f /opt/{1}/var/lib/{1}/crash/*.dmp /tmp/backup_crash/{0};".\
format(stamp, server_type)
print "Moved all dumps on {0} to backup folder: /tmp/backup_crash/{1}".format(self.server.ip, stamp)
remote.execute_command(command)
output, error = remote.execute_command("ls -la /tmp/backup_crash/{0}".format(stamp))
for o in output:
print o
for core_file in core_files:
remote_path, file_name = os.path.dirname(core_file), os.path.basename(core_file)
if remote.delete_file(remote_path, file_name):
print 'deleted core file : {0}'.format(core_file)
remote.disconnect()
else:
print "dump files not found on {0}".format(info.ip)
if remote:
remote.disconnect()
except Exception as ex:
print ex
示例11: RemoteMachineShellConnection
# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import is_couchbase_installed [as 别名]
import sys
import os
sys.path.append('.')
sys.path.append('lib')
from remote.remote_util import RemoteMachineShellConnection
from TestInput import TestInputParser
if __name__ == "__main__":
input = TestInputParser.get_test_input(sys.argv)
remote = RemoteMachineShellConnection(input.servers[0])
server_type = 'membase'
if remote.is_couchbase_installed():
server_type = 'couchbase'
for serverInfo in input.servers:
try:
remote = RemoteMachineShellConnection(serverInfo)
info = remote.extract_remote_info()
if info.type.lower() != 'windows':
core_files = []
print "looking for erl_crash files under /opt/{0}/var/lib/{0}/".format(server_type)
core_files.extend(remote.file_starts_with("/opt/{0}/var/lib/{0}/".format(server_type), "erl_crash"))
print "looking for core* files under /opt/{0}/var/lib/{0}/".format(server_type)
core_files.extend(remote.file_starts_with("/opt/{0}/var/lib/{0}/".format(server_type), "core"))
print "looking for core* files under /tmp/"
core_files.extend(remote.file_starts_with("/tmp/", "core"))
i = 0
for core_file in core_files:
if core_file.find('erl_crash.dump') != -1:
示例12: BackupRestoreTests
# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import is_couchbase_installed [as 别名]
class BackupRestoreTests(unittest.TestCase):
input = None
servers = None
log = None
membase = None
shell = None
remote_tmp_folder = None
master = None
def setUp(self):
self.log = logger.Logger.get_logger()
self.input = TestInputSingleton.input
self.servers = self.input.servers
self.master = self.servers[0]
self.shell = RemoteMachineShellConnection(self.master)
# When using custom data_paths, (smaller / sizes), creates
# backup in those custom paths ( helpful when running on ec2)
info = RestConnection(self.master).get_nodes_self()
data_path = info.storage[0].get_data_path()
self.remote_tmp_folder = None
self.remote_tmp_folder = "{2}/{0}-{1}".format("backup", uuid.uuid4(), data_path)
self.is_membase = False
self.perm_command = "mkdir -p {0}".format(self.remote_tmp_folder)
if not self.shell.is_couchbase_installed():
self.is_membase = True
def common_setUp(self):
ClusterOperationHelper.cleanup_cluster(self.servers)
BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
for server in self.servers:
shell = RemoteMachineShellConnection(server)
shell.stop_membase()
shell.stop_couchbase()
shell.start_membase()
shell.start_couchbase()
RestHelper(RestConnection(server)).is_ns_server_running(timeout_in_seconds=120)
shell.disconnect()
def tearDown(self):
for server in self.servers:
self.log.info("delete remote folder @ {0}".format(self.remote_tmp_folder))
shell = RemoteMachineShellConnection(server)
shell.remove_directory(self.remote_tmp_folder)
shell.disconnect()
def add_node_and_rebalance(self, master, servers):
ClusterOperationHelper.add_all_nodes_or_assert(master, servers, self.input.membase_settings, self)
rest = RestConnection(master)
nodes = rest.node_statuses()
otpNodeIds = []
for node in nodes:
otpNodeIds.append(node.id)
rebalanceStarted = rest.rebalance(otpNodeIds, [])
self.assertTrue(rebalanceStarted,
"unable to start rebalance on master node {0}".format(master.ip))
self.log.info('started rebalance operation on master node {0}'.format(master.ip))
rebalanceSucceeded = rest.monitorRebalance()
self.assertTrue(rebalanceSucceeded,
"rebalance operation for nodes: {0} was not successful".format(otpNodeIds))
self.log.info('rebalance operaton succeeded for nodes: {0}'.format(otpNodeIds))
#now remove the nodes
#make sure its rebalanced and node statuses are healthy
helper = RestHelper(rest)
self.assertTrue(helper.is_cluster_healthy, "cluster status is not healthy")
self.assertTrue(helper.is_cluster_rebalanced, "cluster is not balanced")
def add_nodes_and_rebalance(self):
self.add_node_and_rebalance(master=self.servers[0], servers=self.servers)
def _test_backup_add_restore_bucket_body(self,
bucket,
delay_after_data_load,
startup_flag,
single_node):
server = self.master
rest = RestConnection(server)
info = rest.get_nodes_self()
size = int(info.memoryQuota * 2.0 / 3.0)
if bucket == "default":
rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=info.moxi)
else:
proxyPort = info.moxi + 500
rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=proxyPort,
authType="sasl", saslPassword="password")
ready = BucketOperationHelper.wait_for_memcached(server, bucket)
self.assertTrue(ready, "wait_for_memcached failed")
if not single_node:
self.add_nodes_and_rebalance()
distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}
inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[self.master],
name=bucket,
ram_load_ratio=1,
value_size_distribution=distribution,
moxi=True,
write_only=True,
number_of_threads=2)
#.........这里部分代码省略.........
示例13: BackupRestoreTests
# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import is_couchbase_installed [as 别名]
class BackupRestoreTests(unittest.TestCase):
input = None
servers = None
log = None
membase = None
shell = None
remote_tmp_folder = None
master = None
def setUp(self):
self.log = logger.Logger.get_logger()
self.input = TestInputSingleton.input
self.servers = self.input.servers
self.master = self.servers[0]
self.shell = RemoteMachineShellConnection(self.master)
# When using custom data_paths, (smaller / sizes), creates
# backup in those custom paths ( helpful when running on ec2)
info = RestConnection(self.master).get_nodes_self()
data_path = info.storage[0].get_data_path()
self.remote_tmp_folder = None
self.remote_tmp_folder = "{2}/{0}-{1}".format("backup", uuid.uuid4(), data_path)
self.is_membase = False
self.perm_command = "mkdir -p {0}".format(self.remote_tmp_folder)
if not self.shell.is_couchbase_installed():
self.is_membase = True
def common_setUp(self):
ClusterOperationHelper.cleanup_cluster(self.servers)
BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
for server in self.servers:
shell = RemoteMachineShellConnection(server)
shell.stop_membase()
shell.stop_couchbase()
shell.start_membase()
shell.start_couchbase()
RestHelper(RestConnection(server)).is_ns_server_running(timeout_in_seconds=120)
shell.disconnect()
def tearDown(self):
for server in self.servers:
self.log.info("delete remote folder @ {0}".format(self.remote_tmp_folder))
shell = RemoteMachineShellConnection(server)
shell.remove_directory(self.remote_tmp_folder)
shell.disconnect()
def add_node_and_rebalance(self, master, servers):
ClusterOperationHelper.add_all_nodes_or_assert(master, servers, self.input.membase_settings, self)
rest = RestConnection(master)
nodes = rest.node_statuses()
otpNodeIds = []
for node in nodes:
otpNodeIds.append(node.id)
rebalanceStarted = rest.rebalance(otpNodeIds, [])
self.assertTrue(rebalanceStarted,
"unable to start rebalance on master node {0}".format(master.ip))
self.log.info('started rebalance operation on master node {0}'.format(master.ip))
rebalanceSucceeded = rest.monitorRebalance()
self.assertTrue(rebalanceSucceeded,
"rebalance operation for nodes: {0} was not successful".format(otpNodeIds))
self.log.info('rebalance operaton succeeded for nodes: {0}'.format(otpNodeIds))
#now remove the nodes
#make sure its rebalanced and node statuses are healthy
helper = RestHelper(rest)
self.assertTrue(helper.is_cluster_healthy, "cluster status is not healthy")
self.assertTrue(helper.is_cluster_rebalanced, "cluster is not balanced")
def add_nodes_and_rebalance(self):
self.add_node_and_rebalance(master=self.servers[0], servers=self.servers)
def _test_backup_add_restore_bucket_body(self,
bucket,
delay_after_data_load,
startup_flag,
single_node):
server = self.master
rest = RestConnection(server)
info = rest.get_nodes_self()
size = int(info.memoryQuota * 2.0 / 3.0)
if bucket == "default":
rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=info.moxi)
else:
proxyPort = info.moxi + 500
rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=proxyPort,
authType="sasl", saslPassword="password")
ready = BucketOperationHelper.wait_for_memcached(server, bucket)
self.assertTrue(ready, "wait_for_memcached failed")
if not single_node:
self.add_nodes_and_rebalance()
distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}
inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[self.master],
name=bucket,
ram_load_ratio=1,
value_size_distribution=distribution,
moxi=True,
write_only=True,
number_of_threads=2)
#.........这里部分代码省略.........