本文整理汇总了Python中membase.helper.cluster_helper.ClusterOperationHelper.cleanup_cluster方法的典型用法代码示例。如果您正苦于以下问题:Python ClusterOperationHelper.cleanup_cluster方法的具体用法?Python ClusterOperationHelper.cleanup_cluster怎么用?Python ClusterOperationHelper.cleanup_cluster使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类membase.helper.cluster_helper.ClusterOperationHelper
的用法示例。
在下文中一共展示了ClusterOperationHelper.cleanup_cluster方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: setUp_bucket
# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import cleanup_cluster [as 别名]
def setUp_bucket(self, unittest):
self.log = logger.Logger.get_logger()
self.input = TestInputSingleton.input
unittest.assertTrue(self.input, msg="input parameters missing...")
self.test = unittest
self.master = self.input.servers[0]
rest = RestConnection(self.master)
rest.init_cluster(username=self.master.rest_username, password=self.master.rest_password)
rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
ClusterOperationHelper.cleanup_cluster([self.master])
BucketOperationHelper.delete_all_buckets_or_assert([self.master], self.test)
serverInfo = self.master
rest = RestConnection(serverInfo)
info = rest.get_nodes_self()
rest.init_cluster(username=serverInfo.rest_username,
password=serverInfo.rest_password)
rest.init_cluster_memoryQuota(memoryQuota=info.memoryQuota)
# Add built-in user
testuser = [{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'password': 'password'}]
RbacBase().create_user_source(testuser, 'builtin', self.master)
time.sleep(10)
# Assign user to role
role_list = [{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'roles': 'admin'}]
RbacBase().add_user_role(role_list, RestConnection(self.master), 'builtin')
time.sleep(10)
示例2: tearDown
# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import cleanup_cluster [as 别名]
def tearDown(self):
if not self.input.param("skip_cleanup", True):
if self.times_teardown_called > 1 :
self.shell.disconnect()
if self.input.param("skip_cleanup", True):
if self.case_number > 1 or self.times_teardown_called > 1:
self.shell.disconnect()
self.times_teardown_called += 1
serverInfo = self.servers[0]
rest = RestConnection(serverInfo)
zones = rest.get_zone_names()
for zone in zones:
if zone != "Group 1":
rest.delete_zone(zone)
self.clusters_dic = self.input.clusters
if self.clusters_dic:
if len(self.clusters_dic) > 1:
self.dest_nodes = self.clusters_dic[1]
self.dest_master = self.dest_nodes[0]
if self.dest_nodes and len(self.dest_nodes) > 1:
self.log.info("======== clean up destination cluster =======")
rest = RestConnection(self.dest_nodes[0])
rest.remove_all_remote_clusters()
rest.remove_all_replications()
BucketOperationHelper.delete_all_buckets_or_assert(self.dest_nodes, self)
ClusterOperationHelper.cleanup_cluster(self.dest_nodes)
elif len(self.clusters_dic) == 1:
self.log.error("=== need 2 cluster to setup xdcr in ini file ===")
else:
self.log.info("**** If run xdcr test, need cluster config is setup in ini file. ****")
super(CliBaseTest, self).tearDown()
示例3: setUp
# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import cleanup_cluster [as 别名]
def setUp(self):
self.log = logger.Logger.get_logger()
self.master = TestInputSingleton.input.servers[0]
self.input = TestInputSingleton.input
self.servers = self.input.servers
self.num_of_docs = self.input.param("num_of_docs", 1000)
rest = RestConnection(self.master)
for server in self.servers:
rest.init_cluster(server.rest_username, server.rest_password)
info = rest.get_nodes_self()
for server in self.servers:
rest.init_cluster_memoryQuota(
server.rest_username, server.rest_password, memoryQuota=info.mcdMemoryReserved
)
ClusterOperationHelper.cleanup_cluster(self.servers)
BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
self._create_default_bucket()
# Rebalance the nodes
ClusterOperationHelper.begin_rebalance_in(self.master, self.servers)
ClusterOperationHelper.end_rebalance(self.master)
self._log_start()
示例4: cleanup
# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import cleanup_cluster [as 别名]
def cleanup(self):
rest = RestConnection(self.master)
rest.stop_rebalance()
BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
for server in self.servers:
ClusterOperationHelper.cleanup_cluster([server])
ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
示例5: tearDown
# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import cleanup_cluster [as 别名]
def tearDown(self):
try:
test_failed = len(self._resultForDoCleanups.errors)
if self.driver and test_failed:
BaseHelper(self).create_screenshot()
if self.driver:
self.driver.close()
if test_failed and TestInputSingleton.input.param("stop-on-failure", False):
print "test fails, teardown will be skipped!!!"
return
rest = RestConnection(self.servers[0])
try:
reb_status = rest._rebalance_progress_status()
except ValueError as e:
if e.message == 'No JSON object could be decoded':
print "cluster not initialized!!!"
return
if reb_status == 'running':
stopped = rest.stop_rebalance()
self.assertTrue(stopped, msg="unable to stop rebalance")
BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
for server in self.servers:
ClusterOperationHelper.cleanup_cluster([server])
ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
except Exception as e:
raise e
finally:
if self.driver:
self.shell.disconnect()
示例6: common_setup
# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import cleanup_cluster [as 别名]
def common_setup(self, replica):
self._input = TestInputSingleton.input
self._servers = self._input.servers
first = self._servers[0]
self.log = logger.Logger().get_logger()
self.log.info(self._input)
rest = RestConnection(first)
for server in self._servers:
RestHelper(RestConnection(server)).is_ns_server_running()
ClusterOperationHelper.cleanup_cluster(self._servers)
BucketOperationHelper.delete_all_buckets_or_assert(self._servers, self)
ClusterOperationHelper.add_all_nodes_or_assert(self._servers[0], self._servers, self._input.membase_settings, self)
nodes = rest.node_statuses()
otpNodeIds = []
for node in nodes:
otpNodeIds.append(node.id)
info = rest.get_nodes_self()
bucket_ram = info.mcdMemoryReserved * 3 / 4
rest.create_bucket(bucket="default",
ramQuotaMB=int(bucket_ram),
replicaNumber=replica,
proxyPort=rest.get_nodes_self().moxi)
msg = "wait_for_memcached fails"
ready = BucketOperationHelper.wait_for_memcached(first, "default"),
self.assertTrue(ready, msg)
rebalanceStarted = rest.rebalance(otpNodeIds, [])
self.assertTrue(rebalanceStarted,
"unable to start rebalance on master node {0}".format(first.ip))
self.log.info('started rebalance operation on master node {0}'.format(first.ip))
rebalanceSucceeded = rest.monitorRebalance()
# without a bucket this seems to fail
self.assertTrue(rebalanceSucceeded,
"rebalance operation for nodes: {0} was not successful".format(otpNodeIds))
self.awareness = VBucketAwareMemcached(rest, "default")
示例7: setUp
# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import cleanup_cluster [as 别名]
def setUp(self):
self.log = logger.Logger.get_logger()
self.input = TestInputSingleton.input
self.assertTrue(self.input, msg="input parameters missing...")
self.servers = self.input.servers
self.master = self.servers[0]
rest = RestConnection(self.master)
rest.init_cluster(username=self.master.rest_username,
password=self.master.rest_password)
info = rest.get_nodes_self()
node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
BucketOperationHelper.delete_all_buckets_or_assert(servers=self.servers, test_case=self)
ClusterOperationHelper.cleanup_cluster(servers=self.servers)
credentials = self.input.membase_settings
ClusterOperationHelper.add_all_nodes_or_assert(master=self.master, all_servers=self.servers, rest_settings=credentials, test_case=self)
rest = RestConnection(self.master)
nodes = rest.node_statuses()
otpNodeIds = []
for node in nodes:
otpNodeIds.append(node.id)
rebalanceStarted = rest.rebalance(otpNodeIds, [])
self.assertTrue(rebalanceStarted,
"unable to start rebalance on master node {0}".format(self.master.ip))
self.log.info('started rebalance operation on master node {0}'.format(self.master.ip))
rebalanceSucceeded = rest.monitorRebalance()
示例8: tearDown
# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import cleanup_cluster [as 别名]
def tearDown(self):
try:
self._cluster_helper.shutdown()
log = logger.Logger.get_logger()
log.info("============== tearDown was started for test #{0} {1} =============="\
.format(self.case_number, self._testMethodName))
RemoteUtilHelper.common_basic_setup(self._servers)
log.info("10 seconds delay to wait for membase-server to start")
time.sleep(10)
for server in self._cleanup_nodes:
shell = RemoteMachineShellConnection(server)
o, r = shell.execute_command("iptables -F")
shell.log_command_output(o, r)
o, r = shell.execute_command("/sbin/iptables -A INPUT -p tcp -i eth0 --dport 1000:60000 -j ACCEPT")
shell.log_command_output(o, r)
o, r = shell.execute_command("/sbin/iptables -A OUTPUT -p tcp -o eth0 --dport 1000:60000 -j ACCEPT")
shell.log_command_output(o, r)
o, r = shell.execute_command("/etc/init.d/couchbase-server start")
shell.log_command_output(o, r)
shell.disconnect()
BucketOperationHelper.delete_all_buckets_or_assert(self._servers, self)
ClusterOperationHelper.cleanup_cluster(self._servers)
ClusterHelper.wait_for_ns_servers_or_assert(self._servers, self)
log.info("============== tearDown was finished for test #{0} {1} =============="\
.format(self.case_number, self._testMethodName))
finally:
pass
示例9: common_setup
# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import cleanup_cluster [as 别名]
def common_setup(input, testcase):
servers = input.servers
RemoteUtilHelper.common_basic_setup(servers)
BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase)
for server in servers:
ClusterOperationHelper.cleanup_cluster([server])
ClusterHelper.wait_for_ns_servers_or_assert(servers, testcase)
示例10: common_tearDown
# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import cleanup_cluster [as 别名]
def common_tearDown(servers, testcase):
log = logger.Logger.get_logger()
log.info(
"============== common_tearDown was started for test #{0} {1} ==============".format(
testcase.case_number, testcase._testMethodName
)
)
RemoteUtilHelper.common_basic_setup(servers)
log.info("10 seconds delay to wait for couchbase-server to start")
time.sleep(10)
ClusterOperationHelper.wait_for_ns_servers_or_assert(
servers, testcase, wait_time=AutoFailoverBaseTest.MAX_FAIL_DETECT_TIME * 15, wait_if_warmup=True
)
try:
rest = RestConnection(self._servers[0])
buckets = rest.get_buckets()
for bucket in buckets:
MemcachedClientHelper.flush_bucket(servers[0], bucket.name)
except Exception:
pass
BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase)
ClusterOperationHelper.cleanup_cluster(servers)
log.info(
"============== common_tearDown was finished for test #{0} {1} ==============".format(
testcase.case_number, testcase._testMethodName
)
)
示例11: tearDown
# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import cleanup_cluster [as 别名]
def tearDown(self):
# super(Rebalance, self).tearDown()
try:
self.log.info("============== XDCRbasetests stats for test #{0} {1} =============="\
.format(self.case_number, self._testMethodName))
self._end_replication_flag = 1
if hasattr(self, '_stats_thread1'): self._stats_thread1.join()
if hasattr(self, '_stats_thread2'): self._stats_thread2.join()
if hasattr(self, '_stats_thread3'): self._stats_thread3.join()
if self._replication_direction_str in "bidirection":
if hasattr(self, '_stats_thread4'): self._stats_thread4.join()
if hasattr(self, '_stats_thread5'): self._stats_thread5.join()
if hasattr(self, '_stats_thread6'): self._stats_thread6.join()
if self._replication_direction_str in "bidirection":
self.log.info("Type of run: BIDIRECTIONAL XDCR")
else:
self.log.info("Type of run: UNIDIRECTIONAL XDCR")
self._print_stats(self.src_master)
if self._replication_direction_str in "bidirection":
self._print_stats(self.dest_master)
self.log.info("============== = = = = = = = = END = = = = = = = = = = ==============")
self.log.info("============== rebalanceXDCR cleanup was started for test #{0} {1} =============="\
.format(self.case_number, self._testMethodName))
for nodes in [self.src_nodes, self.dest_nodes]:
for node in nodes:
BucketOperationHelper.delete_all_buckets_or_assert([node], self)
ClusterOperationHelper.cleanup_cluster([node], self)
ClusterOperationHelper.wait_for_ns_servers_or_assert([node], self)
self.log.info("============== rebalanceXDCR cleanup was finished for test #{0} {1} =============="\
.format(self.case_number, self._testMethodName))
finally:
self.cluster.shutdown(force=True)
self._log_finish(self)
示例12: test_insert_x_docs_during_rebalance
# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import cleanup_cluster [as 别名]
def test_insert_x_docs_during_rebalance(self):
num_docs = self.helper.input.param("num-docs", 100000)
msg = "description : have a single node, insert {0} docs, "\
"query it, add another node, start rebalancing, insert {0} "\
"docs, finish rebalancing, keep on adding nodes..."
self.log.info(msg.format(num_docs))
design_name = "dev_test_insert_{0}_docs_during_rebalance".format(
num_docs)
prefix = str(uuid.uuid4())[:7]
# Make sure we are fully de-clustered
ClusterOperationHelper.cleanup_cluster(self.helper.servers)
self.helper.create_index_fun(design_name)
inserted_keys = self.helper.insert_docs(num_docs, prefix)
# Add all servers to the master server one by one and start
# rebalacing
for server in self.helper.servers[1:]:
ClusterOperationHelper.add_and_rebalance(
[self.helper.master, server], False)
# Docs with the same prefix are overwritten and not newly created
prefix = str(uuid.uuid4())[:7]
inserted_keys.extend(self.helper.insert_docs(
num_docs, prefix, wait_for_persistence=False))
self._wait_for_rebalance()
# Make sure data is persisted
self.helper.wait_for_persistence()
# Verify that all documents got inserted
self.helper.query_index_for_verification(design_name, inserted_keys)
示例13: tearDown
# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import cleanup_cluster [as 别名]
def tearDown(self):
if hasattr(self, '_resultForDoCleanups') and len(self._resultForDoCleanups.failures) > 0 \
and 'stop-on-failure' in TestInputSingleton.input.test_params and \
str(TestInputSingleton.input.test_params['stop-on-failure']).lower() == 'true':
# supported starting with python2.7
log.warn("CLEANUP WAS SKIPPED")
self.cluster.shutdown(force=True)
self._log_finish(self)
else:
try:
self.log.info("============== tearDown was started for test #{0} {1} =============="\
.format(self.case_number, self._testMethodName))
RemoteUtilHelper.common_basic_setup(self.servers)
BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
for node in self.servers:
master = node
try:
ClusterOperationHelper.cleanup_cluster(self.servers,
master=master)
except:
continue
self.log.info("============== tearDown was finished for test #{0} {1} =============="\
.format(self.case_number, self._testMethodName))
finally:
super(FailoverBaseTest, self).tearDown()
示例14: test_insert_x_delete_y_docs_destroy_cluster
# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import cleanup_cluster [as 别名]
def test_insert_x_delete_y_docs_destroy_cluster(self):
num_docs = self.helper.input.param("num-docs", 100000)
num_deleted_docs = self.helper.input.param("num-deleted-docs", 10000)
msg = "description : have a cluster, insert {0} docs, delete "\
"{1} docs while destroying the cluster into a single node "\
"and query it"
self.log.info(msg.format(num_docs, num_deleted_docs))
design_name = "dev_test_delete_{0}_docs_destroy_cluster".format(
num_deleted_docs)
prefix = str(uuid.uuid4())[:7]
# Make sure we are fully clustered
ClusterOperationHelper.add_and_rebalance(self.helper.servers)
self.helper.create_index_fun(design_name, prefix)
inserted_keys = self.helper.insert_docs(num_docs, prefix)
# Start destroying the cluster and rebalancing it without waiting
# until it's finished
ClusterOperationHelper.cleanup_cluster(self.helper.servers,
False)
deleted_keys = self.helper.delete_docs(num_deleted_docs, prefix)
self._wait_for_rebalance()
# Verify that the docs got delete and are no longer part of the
# spatial view
results = self.helper.get_results(design_name, num_docs)
result_keys = self.helper.get_keys(results)
self.assertEqual(len(result_keys), num_docs - len(deleted_keys))
self.helper.verify_result(inserted_keys, deleted_keys + result_keys)
示例15: tearDown
# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import cleanup_cluster [as 别名]
def tearDown(self):
try:
if (hasattr(self, '_resultForDoCleanups') and len(self._resultForDoCleanups.failures) > 0 \
and TestInputSingleton.input.param("stop-on-failure", False))\
or self.input.param("skip_cleanup", False):
self.log.warn("CLEANUP WAS SKIPPED")
else:
self.log.info("============== basetestcase cleanup was started for test #{0} {1} =============="\
.format(self.case_number, self._testMethodName))
rest = RestConnection(self.master)
alerts = rest.get_alerts()
if alerts is not None and len(alerts) != 0:
self.log.warn("Alerts were found: {0}".format(alerts))
if rest._rebalance_progress_status() == 'running':
self.log.warning("rebalancing is still running, test should be verified")
stopped = rest.stop_rebalance()
self.assertTrue(stopped, msg="unable to stop rebalance")
BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
ClusterOperationHelper.cleanup_cluster(self.servers)
self.sleep(10)
ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
self.log.info("============== basetestcase cleanup was finished for test #{0} {1} =============="\
.format(self.case_number, self._testMethodName))
except BaseException:
# increase case_number to retry tearDown in setup for the next test
self.case_number += 1000
finally:
# stop all existing task manager threads
self.cluster.shutdown()
self._log_finish(self)