本文整理汇总了Python中remote.remote_util.RemoteMachineShellConnection.stop_membase方法的典型用法代码示例。如果您正苦于以下问题:Python RemoteMachineShellConnection.stop_membase方法的具体用法?Python RemoteMachineShellConnection.stop_membase怎么用?Python RemoteMachineShellConnection.stop_membase使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类remote.remote_util.RemoteMachineShellConnection
的用法示例。
在下文中一共展示了RemoteMachineShellConnection.stop_membase方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_reset
# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import stop_membase [as 别名]
def test_reset(self):
for serverInfo in self.servers:
remote_client = RemoteMachineShellConnection(serverInfo)
remote_client.stop_membase()
remote_client.start_membase()
remote_client.disconnect()
time.sleep(10)
示例2: stop_cluster
# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import stop_membase [as 别名]
def stop_cluster(servers):
for server in servers:
shell = RemoteMachineShellConnection(server)
if shell.is_couchbase_installed():
shell.stop_couchbase()
else:
shell.stop_membase()
示例3: stop_membase
# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import stop_membase [as 别名]
def stop_membase(self,node):
log = logger.Logger.get_logger()
for server in self._servers:
if server.ip == node.ip:
shell = RemoteMachineShellConnection(server)
shell.stop_membase()
shell.disconnect()
log.info("stopped membase server on {0}".format(server))
break
示例4: common_setUp
# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import stop_membase [as 别名]
def common_setUp(self):
ClusterOperationHelper.cleanup_cluster(self.servers)
BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
for server in self.servers:
shell = RemoteMachineShellConnection(server)
shell.stop_membase()
shell.stop_couchbase()
shell.start_membase()
shell.start_couchbase()
RestHelper(RestConnection(server)).is_ns_server_running(timeout_in_seconds=120)
shell.disconnect()
示例5: _stop_server
# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import stop_membase [as 别名]
def _stop_server(self, node):
for server in self.servers:
if server.ip == node.ip and server.port == str(node.port):
shell = RemoteMachineShellConnection(server)
if shell.is_couchbase_installed():
shell.stop_couchbase()
self.log.info("Couchbase stopped")
else:
shell.stop_membase()
self.log.info("Membase stopped")
shell.disconnect()
return
示例6: stop_server
# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import stop_membase [as 别名]
def stop_server(self, node):
""" Method to stop a server which is subject to failover """
for server in self.servers:
if server.ip == node.ip:
shell = RemoteMachineShellConnection(server)
if shell.is_couchbase_installed():
shell.stop_couchbase()
self.log.info("Couchbase stopped")
else:
shell.stop_membase()
self.log.info("Membase stopped")
shell.disconnect()
break
示例7: stop_server
# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import stop_membase [as 别名]
def stop_server(self, node):
log = logger.Logger.get_logger()
for server in self.servers:
if server.ip == node.ip:
shell = RemoteMachineShellConnection(server)
if shell.is_couchbase_installed():
shell.stop_couchbase()
log.info("Couchbase stopped")
else:
shell.stop_membase()
log.info("Membase stopped")
shell.disconnect()
break
示例8: stop_server
# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import stop_membase [as 别名]
def stop_server(self, node):
log = logger.Logger.get_logger()
for server in self._servers:
rest = RestConnection(server)
if not RestHelper(rest).is_ns_server_running(timeout_in_seconds=5):
continue
server_ip = rest.get_nodes_self().ip
if server_ip == node.ip:
shell = RemoteMachineShellConnection(server)
if shell.is_membase_installed():
shell.stop_membase()
log.info("Membase stopped")
else:
shell.stop_couchbase()
log.info("Couchbase stopped")
shell.disconnect()
break
示例9: rebalance_out
# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import stop_membase [as 别名]
def rebalance_out(self, how_many):
msg = "choosing three nodes and rebalance them out from the cluster"
self.log.info(msg)
rest = RestConnection(self._servers[0])
nodes = rest.node_statuses()
nodeIps = [node.ip for node in nodes]
self.log.info("current nodes : {0}".format(nodeIps))
toBeEjected = []
toBeEjectedServers = []
selection = self._servers[1:]
shuffle(selection)
for server in selection:
for node in nodes:
if server.ip == node.ip:
toBeEjected.append(node.id)
toBeEjectedServers.append(server)
break
if len(toBeEjected) == how_many:
break
if len(toBeEjected) > 0:
self.log.info("selected {0} for rebalance out from the cluster".format(toBeEjected))
otpNodes = [node.id for node in nodes]
started = rest.rebalance(otpNodes, toBeEjected)
msg = "rebalance operation started ? {0}"
self.log.info(msg.format(started))
if started:
result = rest.monitorRebalance()
msg = "successfully rebalanced out selected nodes from the cluster ? {0}"
self.log.info(msg.format(result))
for server in toBeEjectedServers:
shell = RemoteMachineShellConnection(server)
try:
shell.stop_membase()
except:
pass
try:
shell.start_membase()
except:
pass
shell.disconnect()
RestHelper(RestConnection(server)).is_ns_server_running()
#let's restart membase on those nodes
return result
return True
示例10: _stop_server
# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import stop_membase [as 别名]
def _stop_server(self, node):
master_rest = RestConnection(self.servers[0])
for server in self.servers:
rest = RestConnection(server)
self.log.info("see if server {0}:{1} is running".format(server.ip, server.port))
if not RestHelper(rest).is_ns_server_running(timeout_in_seconds=5):
continue
node_id = rest.get_nodes_self().id
if node_id == node.id:
# if its 8091 then do ssh otherwise use ns_servr
if node.port == 8091:
shell = RemoteMachineShellConnection(server)
if shell.is_membase_installed():
shell.stop_membase()
self.log.info("Membase stopped")
else:
shell.stop_couchbase()
self.log.info("Couchbase stopped")
shell.disconnect()
break
else:
self.log.info("running {0}".format(stop_cluster.format(node.id)))
master_rest.diag_eval(stop_cluster.format(node.id))
示例11: _stop_membase_servers
# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import stop_membase [as 别名]
def _stop_membase_servers(self, servers):
for server in servers:
remote = RemoteMachineShellConnection(server)
remote.stop_membase()
remote.disconnect()
示例12: _install_and_upgrade
# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import stop_membase [as 别名]
def _install_and_upgrade(self, initial_version='1.6.5.3',
create_buckets=False,
insert_data=False,
start_upgraded_first=True,
load_ratio=-1,
roll_upgrade=False,
upgrade_path=[]):
node_upgrade_path = []
node_upgrade_path.extend(upgrade_path)
#then start them in whatever order you want
inserted_keys = []
log = logger.Logger.get_logger()
if roll_upgrade:
log.info("performing a rolling upgrade")
input = TestInputSingleton.input
rest_settings = input.membase_settings
servers = input.servers
save_upgrade_config = False
is_amazon = False
if input.test_params.get('amazon',False):
is_amazon = True
# install older build on all nodes
for server in servers:
remote = RemoteMachineShellConnection(server)
rest = RestConnection(server)
info = remote.extract_remote_info()
older_build = BuildQuery().find_membase_release_build(deliverable_type=info.deliverable_type,
os_architecture=info.architecture_type,
build_version=initial_version,
product='membase-server-enterprise', is_amazon=is_amazon)
remote.membase_uninstall()
remote.couchbase_uninstall()
remote.execute_command('/etc/init.d/membase-server stop')
remote.download_build(older_build)
#now let's install ?
remote.membase_install(older_build)
RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)
rest.init_cluster_port(rest_settings.rest_username, rest_settings.rest_password)
rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
remote.disconnect()
bucket_data = {}
master = servers[0]
if create_buckets:
#let's create buckets
#wait for the bucket
#bucket port should also be configurable , pass it as the
#parameter to this test ? later
self._create_default_bucket(master)
inserted_keys = self._load_data(master, load_ratio)
_create_load_multiple_bucket(self, master, bucket_data, howmany=2)
# cluster all the nodes together
ClusterOperationHelper.add_all_nodes_or_assert(master,
servers,
rest_settings, self)
rest = RestConnection(master)
nodes = rest.node_statuses()
otpNodeIds = []
for node in nodes:
otpNodeIds.append(node.id)
rebalanceStarted = rest.rebalance(otpNodeIds, [])
self.assertTrue(rebalanceStarted,
"unable to start rebalance on master node {0}".format(master.ip))
log.info('started rebalance operation on master node {0}'.format(master.ip))
rebalanceSucceeded = rest.monitorRebalance()
self.assertTrue(rebalanceSucceeded,
"rebalance operation for nodes: {0} was not successful".format(otpNodeIds))
if initial_version == "1.7.0" or initial_version == "1.7.1":
self._save_config(rest_settings, master)
input_version = input.test_params['version']
node_upgrade_path.append(input_version)
#if we dont want to do roll_upgrade ?
log.info("Upgrade path: {0} -> {1}".format(initial_version, node_upgrade_path))
log.info("List of servers {0}".format(servers))
if not roll_upgrade:
for version in node_upgrade_path:
if version is not initial_version:
log.info("Upgrading to version {0}".format(version))
self._stop_membase_servers(servers)
if re.search('1.8', version):
save_upgrade_config = True
appropriate_build = _get_build(servers[0], version, is_amazon=is_amazon)
self.assertTrue(appropriate_build.url, msg="unable to find build {0}".format(version))
for server in servers:
remote = RemoteMachineShellConnection(server)
remote.download_build(appropriate_build)
remote.membase_upgrade(appropriate_build, save_upgrade_config=save_upgrade_config)
RestHelper(RestConnection(server)).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)
#verify admin_creds still set
pools_info = RestConnection(server).get_pools_info()
self.assertTrue(pools_info['implementationVersion'], appropriate_build.product_version)
if start_upgraded_first:
#.........这里部分代码省略.........
示例13: _test_backup_add_restore_bucket_body
# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import stop_membase [as 别名]
def _test_backup_add_restore_bucket_body(self,
bucket,
delay_after_data_load,
startup_flag,
single_node):
server = self.master
rest = RestConnection(server)
info = rest.get_nodes_self()
size = int(info.memoryQuota * 2.0 / 3.0)
if bucket == "default":
rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=info.moxi)
else:
proxyPort = info.moxi + 500
rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=proxyPort,
authType="sasl", saslPassword="password")
ready = BucketOperationHelper.wait_for_memcached(server, bucket)
self.assertTrue(ready, "wait_for_memcached failed")
if not single_node:
self.add_nodes_and_rebalance()
distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}
inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[self.master],
name=bucket,
ram_load_ratio=1,
value_size_distribution=distribution,
moxi=True,
write_only=True,
number_of_threads=2)
if not single_node:
rest = RestConnection(self.master)
self.assertTrue(RestHelper(rest).wait_for_replication(180), msg="replication did not complete")
self.log.info("Sleep {0} seconds after data load".format(delay_after_data_load))
ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0)
self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0)
self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
node = RestConnection(self.master).get_nodes_self()
if not startup_flag:
for server in self.servers:
shell = RemoteMachineShellConnection(server)
shell.stop_membase()
shell.stop_couchbase()
shell.disconnect()
output, error = self.shell.execute_command(self.perm_command)
self.shell.log_command_output(output, error)
#now let's back up
BackupHelper(self.master, self).backup(bucket, node, self.remote_tmp_folder)
if not startup_flag:
for server in self.servers:
shell = RemoteMachineShellConnection(server)
shell.start_membase()
shell.start_couchbase()
RestHelper(RestConnection(server)).is_ns_server_running()
shell.disconnect()
BucketOperationHelper.delete_bucket_or_assert(self.master, bucket, self)
if bucket == "default":
rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=info.moxi)
else:
proxyPort = info.moxi + 500
rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=proxyPort,
authType="sasl", saslPassword="password")
BucketOperationHelper.wait_for_memcached(self.master, bucket)
if bucket == "default":
BackupHelper(self.master, self).restore(backup_location=self.remote_tmp_folder, moxi_port=info.moxi)
else:
BackupHelper(self.master, self).restore(backup_location=self.remote_tmp_folder, moxi_port=info.moxi, username=bucket, password='password')
keys_exist = BucketOperationHelper.keys_exist_or_assert_in_parallel(inserted_keys, self.master, bucket, self, concurrency=4)
self.assertTrue(keys_exist, msg="unable to verify keys after restore")
示例14: _install_and_upgrade
# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import stop_membase [as 别名]
def _install_and_upgrade(self, initial_version='1.6.5.3',
initialize_cluster=False,
create_buckets=False,
insert_data=False):
log = logger.Logger.get_logger()
input = TestInputSingleton.input
version = input.test_params['version']
rest_settings = input.membase_settings
servers = input.servers
server = servers[0]
is_amazon = False
if input.test_params.get('amazon',False):
is_amazon = True
remote = RemoteMachineShellConnection(server)
rest = RestConnection(server)
info = remote.extract_remote_info()
remote.membase_uninstall()
remote.couchbase_uninstall()
builds, changes = BuildQuery().get_all_builds()
#release_builds = BuildQuery().get_all_release_builds(initial_version)
#if initial_version == "1.7.2":
# initial_version = "1.7.2r-20"
older_build = BuildQuery().find_membase_release_build(deliverable_type=info.deliverable_type,
os_architecture=info.architecture_type,
build_version=initial_version,
product='membase-server-enterprise', is_amazon=is_amazon)
if info.type.lower() == 'windows':
if older_build.product_version.startswith("1.8"):
abbr_product = "cb"
else:
abbr_product = "mb"
remote.download_binary_in_win(older_build.url, abbr_product, initial_version)
remote.membase_install_win(older_build, initial_version)
RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)
rest.init_cluster_port(rest_settings.rest_username, rest_settings.rest_password)
bucket_data = {}
if initialize_cluster:
rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
if create_buckets:
_create_load_multiple_bucket(self, server, bucket_data, howmany=2)
if version.startswith("1.8"):
abbr_product = "cb"
appropriate_build = _get_build(servers[0], version, is_amazon=is_amazon)
self.assertTrue(appropriate_build.url, msg="unable to find build {0}".format(version))
remote.download_binary_in_win(appropriate_build.url, abbr_product, version)
remote.stop_membase()
log.info("###### START UPGRADE. #########")
remote.membase_upgrade_win(info.architecture_type, info.windows_name, version, initial_version)
remote.disconnect()
RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)
pools_info = rest.get_pools_info()
rest.init_cluster_port(rest_settings.rest_username, rest_settings.rest_password)
time.sleep(TIMEOUT_SECS)
# verify admin_creds still set
self.assertTrue(pools_info['implementationVersion'], appropriate_build.product_version)
if initialize_cluster:
#TODO: how can i verify that the cluster init config is preserved
if create_buckets:
self.assertTrue(BucketOperationHelper.wait_for_bucket_creation('bucket-0', rest),
msg="bucket 'default' does not exist..")
if insert_data:
buckets = rest.get_buckets()
for bucket in buckets:
BucketOperationHelper.keys_exist_or_assert(bucket_data[bucket.name]["inserted_keys"],
server,
bucket.name, self)
else:
log.error("This is not windows server!")
示例15: _install_and_upgrade
# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import stop_membase [as 别名]
def _install_and_upgrade(self, initial_version='1.6.5.3',
initialize_cluster=False,
create_buckets=False,
insert_data=False):
input = TestInputSingleton.input
rest_settings = input.membase_settings
servers = input.servers
server = servers[0]
save_upgrade_config = False
if initial_version.startswith("1.7") and input.test_params['version'].startswith("1.8"):
save_upgrade_config = True
is_amazon = False
if input.test_params.get('amazon', False):
is_amazon = True
if initial_version.startswith("1.6") or initial_version.startswith("1.7"):
product = 'membase-server-enterprise'
else:
product = 'couchbase-server-enterprise'
remote = RemoteMachineShellConnection(server)
rest = RestConnection(server)
info = remote.extract_remote_info()
remote.membase_uninstall()
remote.couchbase_uninstall()
builds, changes = BuildQuery().get_all_builds()
# check to see if we are installing from latestbuilds or releases
# note: for newer releases (1.8.0) even release versions can have the
# form 1.8.0r-55
if re.search('r', initial_version):
builds, changes = BuildQuery().get_all_builds()
older_build = BuildQuery().find_membase_build(builds, deliverable_type=info.deliverable_type,
os_architecture=info.architecture_type,
build_version=initial_version,
product=product, is_amazon=is_amazon)
else:
older_build = BuildQuery().find_membase_release_build(deliverable_type=info.deliverable_type,
os_architecture=info.architecture_type,
build_version=initial_version,
product=product, is_amazon=is_amazon)
remote.stop_membase()
remote.stop_couchbase()
remote.download_build(older_build)
#now let's install ?
remote.membase_install(older_build)
RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)
rest.init_cluster_port(rest_settings.rest_username, rest_settings.rest_password)
bucket_data = {}
if initialize_cluster:
rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
if create_buckets:
_create_load_multiple_bucket(self, server, bucket_data, howmany=2)
version = input.test_params['version']
appropriate_build = _get_build(servers[0], version, is_amazon=is_amazon)
self.assertTrue(appropriate_build.url, msg="unable to find build {0}".format(version))
remote.download_build(appropriate_build)
remote.membase_upgrade(appropriate_build, save_upgrade_config=save_upgrade_config)
remote.disconnect()
RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)
pools_info = rest.get_pools_info()
rest.init_cluster_port(rest_settings.rest_username, rest_settings.rest_password)
time.sleep(TIMEOUT_SECS)
#verify admin_creds still set
self.assertTrue(pools_info['implementationVersion'], appropriate_build.product_version)
if initialize_cluster:
#TODO: how can i verify that the cluster init config is preserved
if create_buckets:
self.assertTrue(BucketOperationHelper.wait_for_bucket_creation('bucket-0', rest),
msg="bucket 'default' does not exist..")
if insert_data:
buckets = rest.get_buckets()
for bucket in buckets:
BucketOperationHelper.keys_exist_or_assert(bucket_data[bucket.name]["inserted_keys"],
server,
bucket.name, self)