本文整理汇总了Python中membase.helper.cluster_helper.ClusterOperationHelper.wait_for_ns_servers_or_assert方法的典型用法代码示例。如果您正苦于以下问题:Python ClusterOperationHelper.wait_for_ns_servers_or_assert方法的具体用法?Python ClusterOperationHelper.wait_for_ns_servers_or_assert怎么用?Python ClusterOperationHelper.wait_for_ns_servers_or_assert使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类membase.helper.cluster_helper.ClusterOperationHelper
的用法示例。
在下文中一共展示了ClusterOperationHelper.wait_for_ns_servers_or_assert方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: replication_while_rebooting_a_non_master_destination_node
# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import wait_for_ns_servers_or_assert [as 别名]
def replication_while_rebooting_a_non_master_destination_node(self):
self._load_all_buckets(self.src_master, self.gen_create, "create", 0)
self._load_all_buckets(self.dest_master, self.gen_create2, "create", 0)
self._async_update_delete_data()
self.sleep(self._timeout)
reboot_node_dest = self.dest_nodes[len(self.dest_nodes) - 1]
shell = RemoteMachineShellConnection(reboot_node_dest)
if shell.extract_remote_info().type.lower() == 'windows':
o, r = shell.execute_command("shutdown -r -f -t 0")
elif shell.extract_remote_info().type.lower() == 'linux':
o, r = shell.execute_command("reboot")
shell.log_command_output(o, r)
reboot_node_src = self.src_nodes[len(self.src_nodes) - 1]
shell = RemoteMachineShellConnection(reboot_node_src)
if shell.extract_remote_info().type.lower() == 'windows':
o, r = shell.execute_command("shutdown -r -f -t 0")
elif shell.extract_remote_info().type.lower() == 'linux':
o, r = shell.execute_command("reboot")
shell.log_command_output(o, r)
self.sleep(360)
ClusterOperationHelper.wait_for_ns_servers_or_assert([reboot_node_dest], self, wait_if_warmup=True)
ClusterOperationHelper.wait_for_ns_servers_or_assert([reboot_node_src], self, wait_if_warmup=True)
self.merge_buckets(self.src_master, self.dest_master, bidirection=True)
self.verify_results(verify_src=True)
示例2: tearDown
# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import wait_for_ns_servers_or_assert [as 别名]
def tearDown(self):
try:
self._cluster_helper.shutdown()
log = logger.Logger.get_logger()
log.info("============== tearDown was started for test #{0} {1} =============="\
.format(self.case_number, self._testMethodName))
RemoteUtilHelper.common_basic_setup(self._servers)
log.info("10 seconds delay to wait for membase-server to start")
time.sleep(10)
for server in self._cleanup_nodes:
shell = RemoteMachineShellConnection(server)
o, r = shell.execute_command("iptables -F")
shell.log_command_output(o, r)
o, r = shell.execute_command("/sbin/iptables -A INPUT -p tcp -i eth0 --dport 1000:60000 -j ACCEPT")
shell.log_command_output(o, r)
o, r = shell.execute_command("/sbin/iptables -A OUTPUT -p tcp -o eth0 --dport 1000:60000 -j ACCEPT")
shell.log_command_output(o, r)
o, r = shell.execute_command("/etc/init.d/couchbase-server start")
shell.log_command_output(o, r)
shell.disconnect()
BucketOperationHelper.delete_all_buckets_or_assert(self._servers, self)
ClusterOperationHelper.cleanup_cluster(self._servers)
ClusterHelper.wait_for_ns_servers_or_assert(self._servers, self)
log.info("============== tearDown was finished for test #{0} {1} =============="\
.format(self.case_number, self._testMethodName))
finally:
pass
示例3: test_warmup
# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import wait_for_ns_servers_or_assert [as 别名]
def test_warmup(self):
index_field = self.input.param("index_field", 'name')
indexes = []
try:
indexes = self._create_multiple_indexes(index_field)
num_srv_warm_up = self.input.param("srv_warm_up", 1)
if self.input.tuq_client is None:
self.fail("For this test external tuq server is requiered. " + \
"Please specify one in conf")
self.test_union_all()
for server in self.servers[self.nodes_init - num_srv_warm_up:self.nodes_init]:
remote = RemoteMachineShellConnection(server)
remote.stop_server()
remote.start_server()
remote.disconnect()
#run query, result may not be as expected, but tuq shouldn't fail
try:
self.test_union_all()
except:
pass
ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
self.sleep(5)
self.test_union_all()
finally:
self._delete_multiple_indexes(indexes)
示例4: cleanup
# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import wait_for_ns_servers_or_assert [as 别名]
def cleanup(self):
rest = RestConnection(self.master)
rest.stop_rebalance()
BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
for server in self.servers:
ClusterOperationHelper.cleanup_cluster([server])
ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
示例5: tearDown
# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import wait_for_ns_servers_or_assert [as 别名]
def tearDown(self):
# super(Rebalance, self).tearDown()
try:
self.log.info("============== XDCRbasetests stats for test #{0} {1} =============="\
.format(self.case_number, self._testMethodName))
self._end_replication_flag = 1
if hasattr(self, '_stats_thread1'): self._stats_thread1.join()
if hasattr(self, '_stats_thread2'): self._stats_thread2.join()
if hasattr(self, '_stats_thread3'): self._stats_thread3.join()
if self._replication_direction_str in "bidirection":
if hasattr(self, '_stats_thread4'): self._stats_thread4.join()
if hasattr(self, '_stats_thread5'): self._stats_thread5.join()
if hasattr(self, '_stats_thread6'): self._stats_thread6.join()
if self._replication_direction_str in "bidirection":
self.log.info("Type of run: BIDIRECTIONAL XDCR")
else:
self.log.info("Type of run: UNIDIRECTIONAL XDCR")
self._print_stats(self.src_master)
if self._replication_direction_str in "bidirection":
self._print_stats(self.dest_master)
self.log.info("============== = = = = = = = = END = = = = = = = = = = ==============")
self.log.info("============== rebalanceXDCR cleanup was started for test #{0} {1} =============="\
.format(self.case_number, self._testMethodName))
for nodes in [self.src_nodes, self.dest_nodes]:
for node in nodes:
BucketOperationHelper.delete_all_buckets_or_assert([node], self)
ClusterOperationHelper.cleanup_cluster([node], self)
ClusterOperationHelper.wait_for_ns_servers_or_assert([node], self)
self.log.info("============== rebalanceXDCR cleanup was finished for test #{0} {1} =============="\
.format(self.case_number, self._testMethodName))
finally:
self.cluster.shutdown(force=True)
self._log_finish(self)
示例6: test_full_eviction_changed_to_value_eviction
# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import wait_for_ns_servers_or_assert [as 别名]
def test_full_eviction_changed_to_value_eviction(self):
KEY_NAME = 'key1'
gen_create = BlobGenerator('eviction', 'eviction-', self.value_size, end=self.num_items)
gen_create2 = BlobGenerator('eviction2', 'eviction2-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen_create, "create", 0)
self._wait_for_stats_all_buckets(self.servers[:self.nodes_init])
self._verify_stats_all_buckets(self.servers[:self.nodes_init])
remote = RemoteMachineShellConnection(self.master)
for bucket in self.buckets:
output, _ = remote.execute_couchbase_cli(cli_command='bucket-edit',
cluster_host="localhost",
user=self.master.rest_username,
password=self.master.rest_password,
options='--bucket=%s --bucket-eviction-policy=valueOnly' % bucket.name)
self.assertTrue(' '.join(output).find('SUCCESS') != -1, 'Eviction policy wasn\'t changed')
ClusterOperationHelper.wait_for_ns_servers_or_assert(
self.servers[:self.nodes_init], self,
wait_time=self.wait_timeout, wait_if_warmup=True)
self.sleep(10, 'Wait some time before next load')
#self._load_all_buckets(self.master, gen_create2, "create", 0)
#import pdb;pdb.set_trace()
rest = RestConnection(self.master)
client = VBucketAwareMemcached(rest, 'default')
mcd = client.memcached(KEY_NAME)
try:
rc = mcd.set(KEY_NAME, 0,0, json.dumps({'value':'value2'}))
self.fail('Bucket is incorrectly functional')
except MemcachedError, e:
pass # this is the exception we are hoping for
示例7: common_setup
# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import wait_for_ns_servers_or_assert [as 别名]
def common_setup(input, testcase):
servers = input.servers
RemoteUtilHelper.common_basic_setup(servers)
BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase)
for server in servers:
ClusterOperationHelper.cleanup_cluster([server])
ClusterHelper.wait_for_ns_servers_or_assert(servers, testcase)
示例8: common_tearDown
# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import wait_for_ns_servers_or_assert [as 别名]
def common_tearDown(servers, testcase):
log = logger.Logger.get_logger()
log.info(
"============== common_tearDown was started for test #{0} {1} ==============".format(
testcase.case_number, testcase._testMethodName
)
)
RemoteUtilHelper.common_basic_setup(servers)
log.info("10 seconds delay to wait for couchbase-server to start")
time.sleep(10)
ClusterOperationHelper.wait_for_ns_servers_or_assert(
servers, testcase, wait_time=AutoFailoverBaseTest.MAX_FAIL_DETECT_TIME * 15, wait_if_warmup=True
)
try:
rest = RestConnection(self._servers[0])
buckets = rest.get_buckets()
for bucket in buckets:
MemcachedClientHelper.flush_bucket(servers[0], bucket.name)
except Exception:
pass
BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase)
ClusterOperationHelper.cleanup_cluster(servers)
log.info(
"============== common_tearDown was finished for test #{0} {1} ==============".format(
testcase.case_number, testcase._testMethodName
)
)
示例9: tearDown
# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import wait_for_ns_servers_or_assert [as 别名]
def tearDown(self):
try:
if self.driver:
path_screen = self.input.ui_conf['screenshots'] or 'logs/screens'
full_path = '{1}/screen_{0}.png'.format(time.time(), path_screen)
self.log.info('screenshot is available: %s' % full_path)
if not os.path.exists(path_screen):
os.mkdir(path_screen)
self.driver.get_screenshot_as_file(os.path.abspath(full_path))
rest = RestConnection(self.servers[0])
if rest._rebalance_progress_status() == 'running':
stopped = rest.stop_rebalance()
self.assertTrue(stopped, msg="unable to stop rebalance")
BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
for server in self.servers:
ClusterOperationHelper.cleanup_cluster([server])
ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
if self.driver:
self.driver.close()
except Exception as e:
raise e
finally:
if self.driver:
self.shell.disconnect()
self.cluster.shutdown()
示例10: test_prepared_with_warmup
# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import wait_for_ns_servers_or_assert [as 别名]
def test_prepared_with_warmup(self):
try:
num_srv_warm_up = self.input.param("srv_warm_up", 1)
if self.input.tuq_client is None:
self.fail("For this test external tuq server is requiered. " +\
"Please specify one in conf")
self.test_union_all()
for server in self.servers[self.nodes_init - num_srv_warm_up:self.nodes_init]:
remote = RemoteMachineShellConnection(server)
remote.stop_server()
remote.start_server()
remote.disconnect()
#run query, result may not be as expected, but tuq shouldn't fail
try:
self.test_union_all()
except:
pass
ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self, wait_if_warmup=True)
self.verify_cluster_stats(self.servers[:self.nodes_init])
self.sleep(50)
self.verify_cluster_stats(self.servers[:self.nodes_init])
self.log.info("-"*100)
self.log.info("Querying alternate query node to test the encoded_prepare ....")
self.test_prepared_union()
self.log.info("-"*100)
finally:
self.log.info("Done with encoded_prepare ....")
示例11: offline_cluster_upgrade_and_reboot
# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import wait_for_ns_servers_or_assert [as 别名]
def offline_cluster_upgrade_and_reboot(self):
self._install(self.servers[:self.nodes_init])
self.operations(self.servers[:self.nodes_init])
if self.ddocs_num:
self.create_ddocs_and_views()
if self.during_ops:
for opn in self.during_ops:
getattr(self, opn)()
num_stoped_nodes = self.input.param('num_stoped_nodes', self.nodes_init)
stoped_nodes = self.servers[self.nodes_init - num_stoped_nodes :self.nodes_init]
self.sleep(self.sleep_time, "Pre-setup of old version is done. Wait for upgrade")
for upgrade_version in self.upgrade_versions:
for server in stoped_nodes:
remote = RemoteMachineShellConnection(server)
remote.stop_server()
remote.disconnect()
self.sleep(self.sleep_time)
upgrade_threads = self._async_update(upgrade_version, stoped_nodes)
for upgrade_thread in upgrade_threads:
upgrade_thread.join()
success_upgrade = True
while not self.queue.empty():
success_upgrade &= self.queue.get()
if not success_upgrade:
self.fail("Upgrade failed!")
for server in stoped_nodes:
remote = RemoteMachineShellConnection(server)
remote.stop_server()
self.sleep(5)
remote.start_couchbase()
remote.disconnect()
ClusterOperationHelper.wait_for_ns_servers_or_assert(stoped_nodes, self)
self.verification(self.servers[:self.nodes_init])
示例12: reset
# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import wait_for_ns_servers_or_assert [as 别名]
def reset(self):
self.log.info(
"============== SwapRebalanceBase cleanup was started for test #{0} {1} ==============".format(
self.case_number, self._testMethodName
)
)
self.log.info("Stopping load in Teardown")
SwapRebalanceBase.stop_load(self.loaders)
for server in self.servers:
rest = RestConnection(server)
if rest._rebalance_progress_status() == "running":
self.log.warning("rebalancing is still running, test should be verified")
stopped = rest.stop_rebalance()
self.assertTrue(stopped, msg="unable to stop rebalance")
BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
for server in self.servers:
ClusterOperationHelper.cleanup_cluster([server])
if server.data_path:
rest = RestConnection(server)
rest.set_data_path(data_path=server.data_path)
ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
self.log.info(
"============== SwapRebalanceBase cleanup was finished for test #{0} {1} ==============".format(
self.case_number, self._testMethodName
)
)
示例13: tearDown
# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import wait_for_ns_servers_or_assert [as 别名]
def tearDown(self):
try:
if (hasattr(self, '_resultForDoCleanups') and len(self._resultForDoCleanups.failures) > 0 \
and TestInputSingleton.input.param("stop-on-failure", False))\
or self.input.param("skip_cleanup", False):
self.log.warn("CLEANUP WAS SKIPPED")
else:
self.log.info("============== basetestcase cleanup was started for test #{0} {1} =============="\
.format(self.case_number, self._testMethodName))
rest = RestConnection(self.master)
alerts = rest.get_alerts()
if alerts is not None and len(alerts) != 0:
self.log.warn("Alerts were found: {0}".format(alerts))
if rest._rebalance_progress_status() == 'running':
self.log.warning("rebalancing is still running, test should be verified")
stopped = rest.stop_rebalance()
self.assertTrue(stopped, msg="unable to stop rebalance")
BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
ClusterOperationHelper.cleanup_cluster(self.servers)
self.sleep(10)
ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
self.log.info("============== basetestcase cleanup was finished for test #{0} {1} =============="\
.format(self.case_number, self._testMethodName))
except BaseException:
# increase case_number to retry tearDown in setup for the next test
self.case_number += 1000
finally:
# stop all existing task manager threads
self.cluster.shutdown()
self._log_finish(self)
示例14: setUp
# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import wait_for_ns_servers_or_assert [as 别名]
def setUp(self):
self.log = logger.Logger.get_logger()
self.input = TestInputSingleton.input
self.cluster = Cluster()
self.servers = self.input.servers
self.buckets = {}
self.default_bucket = self.input.param("default_bucket", True)
self.standard_buckets = self.input.param("standard_buckets", 0)
self.sasl_buckets = self.input.param("sasl_buckets", 0)
self.total_buckets = self.sasl_buckets + self.default_bucket + self.standard_buckets
self.num_servers = self.input.param("servers", len(self.servers))
self.num_replicas = self.input.param("replicas", 1)
self.num_items = self.input.param("items", 1000)
self.dgm_run = self.input.param("dgm_run", False)
if not self.input.param("skip_cleanup", False):
BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
for server in self.servers:
ClusterOperationHelper.cleanup_cluster([server])
ClusterOperationHelper.wait_for_ns_servers_or_assert([self.servers[0]], self)
self.quota = self._initialize_nodes(self.cluster, self.servers)
if self.dgm_run:
self.quota = 256
self.bucket_size = self._get_bucket_size(self.quota, self.total_buckets)
if self.default_bucket:
self.cluster.create_default_bucket(self.servers[0], self.bucket_size, self.num_replicas)
self.buckets['default'] = {1 : KVStore()}
self._create_sasl_buckets(self.servers[0], self.sasl_buckets)
示例15: cleanup_cluster
# 需要导入模块: from membase.helper.cluster_helper import ClusterOperationHelper [as 别名]
# 或者: from membase.helper.cluster_helper.ClusterOperationHelper import wait_for_ns_servers_or_assert [as 别名]
def cleanup_cluster(self):
if not "skip_cleanup" in TestInputSingleton.input.test_params:
BucketOperationHelper.delete_all_buckets_or_assert(
self.servers, self.testcase)
ClusterOperationHelper.cleanup_cluster(self.servers)
ClusterOperationHelper.wait_for_ns_servers_or_assert(
self.servers, self.testcase)