本文整理汇总了Python中couchbase.cluster.Cluster.rebalance方法的典型用法代码示例。如果您正苦于以下问题:Python Cluster.rebalance方法的具体用法?Python Cluster.rebalance怎么用?Python Cluster.rebalance使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类couchbase.cluster.Cluster
的用法示例。
在下文中一共展示了Cluster.rebalance方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: BaseTestCase
# 需要导入模块: from couchbase.cluster import Cluster [as 别名]
# 或者: from couchbase.cluster.Cluster import rebalance [as 别名]
class BaseTestCase(unittest.TestCase):
def setUp(self):
self.log = logger.Logger.get_logger()
self.input = TestInputSingleton.input
self.servers = self.input.servers
self.buckets = []
self.master = self.servers[0]
self.cluster = Cluster()
self.pre_warmup_stats = {}
try:
self.wait_timeout = self.input.param("wait_timeout", 60)
# number of case that is performed from testrunner( increment each time)
self.case_number = self.input.param("case_number", 0)
self.default_bucket = self.input.param("default_bucket", True)
if self.default_bucket:
self.default_bucket_name = "default"
self.standard_buckets = self.input.param("standard_buckets", 0)
self.sasl_buckets = self.input.param("sasl_buckets", 0)
self.memcached_buckets = self.input.param("memcached_buckets", 0)
self.total_buckets = self.sasl_buckets + self.default_bucket + self.standard_buckets
self.num_servers = self.input.param("servers", len(self.servers))
# initial number of items in the cluster
self.nodes_init = self.input.param("nodes_init", 1)
self.nodes_in = self.input.param("nodes_in", 1)
self.nodes_out = self.input.param("nodes_out", 1)
self.num_replicas = self.input.param("replicas", 1)
self.num_items = self.input.param("items", 1000)
self.value_size = self.input.param("value_size", 512)
self.dgm_run = self.input.param("dgm_run", False)
# max items number to verify in ValidateDataTask, None - verify all
self.max_verify = self.input.param("max_verify", None)
# we don't change consistent_view on server by default
self.disabled_consistent_view = self.input.param("disabled_consistent_view", None)
self.rebalanceIndexWaitingDisabled = self.input.param("rebalanceIndexWaitingDisabled", None)
self.rebalanceIndexPausingDisabled = self.input.param("rebalanceIndexPausingDisabled", None)
self.maxParallelIndexers = self.input.param("maxParallelIndexers", None)
self.maxParallelReplicaIndexers = self.input.param("maxParallelReplicaIndexers", None)
self.quota_percent = self.input.param("quota_percent", None)
self.port = None
if self.input.param("port", None):
self.port = str(self.input.param("port", None))
self.log.info(
"============== basetestcase setup was started for test #{0} {1}==============".format(
self.case_number, self._testMethodName
)
)
# avoid any cluster operations in setup for new upgrade & upgradeXDCR tests
if str(self.__class__).find("newupgradetests") != -1 or str(self.__class__).find("upgradeXDCR") != -1:
self.log.info("any cluster operation in setup will be skipped")
self.log.info(
"============== basetestcase setup was finished for test #{0} {1} ==============".format(
self.case_number, self._testMethodName
)
)
return
# avoid clean up if the previous test has been tear down
if not self.input.param("skip_cleanup", True) or self.case_number == 1 or self.case_number > 1000:
if self.case_number > 1000:
self.log.warn("teardDown for previous test failed. will retry..")
self.case_number -= 1000
self.tearDown()
self.cluster = Cluster()
self.quota = self._initialize_nodes(
self.cluster,
self.servers,
self.disabled_consistent_view,
self.rebalanceIndexWaitingDisabled,
self.rebalanceIndexPausingDisabled,
self.maxParallelIndexers,
self.maxParallelReplicaIndexers,
self.port,
)
if str(self.__class__).find("rebalanceout.RebalanceOutTests") != -1:
# rebalance all nodes into the cluster before each test
self.cluster.rebalance(self.servers[: self.num_servers], self.servers[1 : self.num_servers], [])
elif self.nodes_init > 1:
self.cluster.rebalance(self.servers[:1], self.servers[1 : self.nodes_init], [])
elif str(self.__class__).find("ViewQueryTests") != -1 and not self.input.param("skip_rebalance", False):
self.cluster.rebalance(self.servers, self.servers[1:], [])
if self.dgm_run:
self.quota = 256
if self.total_buckets > 0:
self.bucket_size = self._get_bucket_size(self.quota, self.total_buckets)
if str(self.__class__).find("newupgradetests") == -1:
self._bucket_creation()
self.log.info(
"============== basetestcase setup was finished for test #{0} {1} ==============".format(
self.case_number, self._testMethodName
)
)
self._log_start(self)
except Exception, e:
self.cluster.shutdown()
self.fail(e)
示例2: CheckpointTests
# 需要导入模块: from couchbase.cluster import Cluster [as 别名]
# 或者: from couchbase.cluster.Cluster import rebalance [as 别名]
class CheckpointTests(unittest.TestCase):
def setUp(self):
self.cluster = Cluster()
self.input = TestInputSingleton.input
self.servers = self.input.servers
self.num_servers = self.input.param("servers", 1)
master = self.servers[0]
num_replicas = self.input.param("replicas", 1)
self.bucket = 'default'
# Start: Should be in a before class function
BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
for server in self.servers:
ClusterOperationHelper.cleanup_cluster([server])
ClusterOperationHelper.wait_for_ns_servers_or_assert([master], self)
# End: Should be in a before class function
self.quota = self.cluster.init_node(master)
self.old_vbuckets = self._get_vbuckets(master)
ClusterOperationHelper.set_vbuckets(master, 1)
self.cluster.create_default_bucket(master, self.quota, num_replicas)
self.cluster.rebalance(self.servers[:self.num_servers],
self.servers[1:self.num_servers], [])
def tearDown(self):
master = self.servers[0]
ClusterOperationHelper.set_vbuckets(master, self.old_vbuckets)
rest = RestConnection(master)
rest.stop_rebalance()
self.cluster.rebalance(self.servers[:self.num_servers], [],
self.servers[1:self.num_servers])
self.cluster.bucket_delete(master, self.bucket)
self.cluster.shutdown()
def checkpoint_create_items(self):
param = 'checkpoint'
stat_key = 'vb_0:open_checkpoint_id'
num_items = 6000
master = self._get_server_by_state(self.servers[:self.num_servers], self.bucket, ACTIVE)
self._set_checkpoint_size(self.servers[:self.num_servers], self.bucket, '5000')
chk_stats = StatsCommon.get_stats(self.servers[:self.num_servers], self.bucket,
param, stat_key)
load_thread = self.generate_load(master, self.bucket, num_items)
load_thread.join()
tasks = []
for server, value in chk_stats.items():
tasks.append(self.cluster.async_wait_for_stats([server], self.bucket, param,
stat_key, '>', value))
for task in tasks:
try:
timeout = 30 if (num_items * .001) < 30 else num_items * .001
task.result(timeout)
except TimeoutError:
self.fail("New checkpoint not created")
def checkpoint_create_time(self):
param = 'checkpoint'
timeout = 60
stat_key = 'vb_0:open_checkpoint_id'
master = self._get_server_by_state(self.servers[:self.num_servers], self.bucket, ACTIVE)
self._set_checkpoint_timeout(self.servers[:self.num_servers], self.bucket, str(timeout))
chk_stats = StatsCommon.get_stats(self.servers[:self.num_servers], self.bucket,
param, stat_key)
load_thread = self.generate_load(master, self.bucket, 1)
load_thread.join()
log.info("Sleeping for {0} seconds)".format(timeout))
time.sleep(timeout)
tasks = []
for server, value in chk_stats.items():
tasks.append(self.cluster.async_wait_for_stats([server], self.bucket, param,
stat_key, '>', value))
for task in tasks:
try:
task.result(60)
except TimeoutError:
self.fail("New checkpoint not created")
self._set_checkpoint_timeout(self.servers[:self.num_servers], self.bucket, str(600))
def checkpoint_collapse(self):
param = 'checkpoint'
chk_size = 5000
num_items = 25000
stat_key = 'vb_0:last_closed_checkpoint_id'
stat_chk_itms = 'vb_0:num_checkpoint_items'
master = self._get_server_by_state(self.servers[:self.num_servers], self.bucket, ACTIVE)
slave1 = self._get_server_by_state(self.servers[:self.num_servers], self.bucket, REPLICA1)
slave2 = self._get_server_by_state(self.servers[:self.num_servers], self.bucket, REPLICA2)
self._set_checkpoint_size(self.servers[:self.num_servers], self.bucket, str(chk_size))
m_stats = StatsCommon.get_stats([master], self.bucket, param, stat_key)
self._stop_replication(slave2, self.bucket)
load_thread = self.generate_load(master, self.bucket, num_items)
load_thread.join()
tasks = []
#.........这里部分代码省略.........
示例3: BaseTestCase
# 需要导入模块: from couchbase.cluster import Cluster [as 别名]
# 或者: from couchbase.cluster.Cluster import rebalance [as 别名]
class BaseTestCase(unittest.TestCase):
def setUp(self):
self.log = logger.Logger.get_logger()
self.input = TestInputSingleton.input
self.servers = self.input.servers
if str(self.__class__).find('moxitests') != -1:
self.moxi_server = self.input.moxis[0]
self.servers = [server for server in self.servers
if server.ip != self.moxi_server.ip]
self.buckets = []
self.master = self.servers[0]
self.cluster = Cluster()
self.pre_warmup_stats = {}
try:
self.auth_mech = self.input.param("auth_mech", "PLAIN")
self.wait_timeout = self.input.param("wait_timeout", 60)
# number of case that is performed from testrunner( increment each time)
self.case_number = self.input.param("case_number", 0)
self.default_bucket = self.input.param("default_bucket", True)
if self.default_bucket:
self.default_bucket_name = "default"
self.standard_buckets = self.input.param("standard_buckets", 0)
self.sasl_buckets = self.input.param("sasl_buckets", 0)
self.num_buckets = self.input.param("num_buckets", 0)
self.memcached_buckets = self.input.param("memcached_buckets", 0)
self.total_buckets = self.sasl_buckets + self.default_bucket + self.standard_buckets + self.memcached_buckets
self.num_servers = self.input.param("servers", len(self.servers))
# initial number of items in the cluster
self.nodes_init = self.input.param("nodes_init", 1)
self.nodes_in = self.input.param("nodes_in", 1)
self.nodes_out = self.input.param("nodes_out", 1)
self.num_replicas = self.input.param("replicas", 1)
self.enable_replica_index = self.input.param("index_replicas", 1)
self.num_items = self.input.param("items", 1000)
self.value_size = self.input.param("value_size", 512)
self.dgm_run = self.input.param("dgm_run", False)
self.active_resident_threshold = int(self.input.param("active_resident_threshold", 0))
# max items number to verify in ValidateDataTask, None - verify all
self.max_verify = self.input.param("max_verify", None)
# we don't change consistent_view on server by default
self.disabled_consistent_view = self.input.param("disabled_consistent_view", None)
self.rebalanceIndexWaitingDisabled = self.input.param("rebalanceIndexWaitingDisabled", None)
self.rebalanceIndexPausingDisabled = self.input.param("rebalanceIndexPausingDisabled", None)
self.maxParallelIndexers = self.input.param("maxParallelIndexers", None)
self.maxParallelReplicaIndexers = self.input.param("maxParallelReplicaIndexers", None)
self.quota_percent = self.input.param("quota_percent", None)
self.port = None
self.log_info=self.input.param("log_info", None)
self.log_location=self.input.param("log_location", None)
self.stat_info=self.input.param("stat_info", None)
self.port_info=self.input.param("port_info", None)
if self.input.param("log_info", None):
self.change_log_info()
if self.input.param("log_location", None):
self.change_log_location()
if self.input.param("stat_info", None):
self.change_stat_info()
if self.input.param("port_info", None):
self.change_port_info()
if self.input.param("port", None):
self.port = str(self.input.param("port", None))
self.log.info("============== basetestcase setup was started for test #{0} {1}=============="\
.format(self.case_number, self._testMethodName))
# avoid any cluster operations in setup for new upgrade & upgradeXDCR tests
if str(self.__class__).find('newupgradetests') != -1 or \
str(self.__class__).find('upgradeXDCR') != -1 or \
hasattr(self, 'skip_buckets_handle') and self.skip_buckets_handle:
self.log.info("any cluster operation in setup will be skipped")
self.log.info("============== basetestcase setup was finished for test #{0} {1} =============="\
.format(self.case_number, self._testMethodName))
return
# avoid clean up if the previous test has been tear down
if not self.input.param("skip_cleanup", True) or self.case_number == 1 or self.case_number > 1000:
if self.case_number > 1000:
self.log.warn("teardDown for previous test failed. will retry..")
self.case_number -= 1000
self.tearDown()
self.cluster = Cluster()
self.quota = self._initialize_nodes(self.cluster, self.servers, self.disabled_consistent_view,
self.rebalanceIndexWaitingDisabled, self.rebalanceIndexPausingDisabled,
self.maxParallelIndexers, self.maxParallelReplicaIndexers, self.port)
try:
if (str(self.__class__).find('rebalanceout.RebalanceOutTests') != -1) or \
(str(self.__class__).find('memorysanitytests.MemorySanity') != -1) or \
str(self.__class__).find('negativetests.NegativeTests') != -1:
# rebalance all nodes into the cluster before each test
self.cluster.rebalance(self.servers[:self.num_servers], self.servers[1:self.num_servers], [])
elif self.nodes_init > 1:
self.cluster.rebalance(self.servers[:1], self.servers[1:self.nodes_init], [])
elif str(self.__class__).find('ViewQueryTests') != -1 and \
not self.input.param("skip_rebalance", False):
self.cluster.rebalance(self.servers, self.servers[1:], [])
except BaseException, e:
# increase case_number to retry tearDown in setup for the next test
self.case_number += 1000
#.........这里部分代码省略.........
示例4: BaseTestCase
# 需要导入模块: from couchbase.cluster import Cluster [as 别名]
# 或者: from couchbase.cluster.Cluster import rebalance [as 别名]
class BaseTestCase(unittest.TestCase):
def setUp(self):
self.log = logger.Logger.get_logger()
self.input = TestInputSingleton.input
self.servers = self.input.servers
self.buckets = []
self.master = self.servers[0]
self.cluster = Cluster()
self.wait_timeout = self.input.param("wait_timeout", 60)
#number of case that is performed from testrunner( increment each time)
self.case_number = self.input.param("case_number", 0)
self.default_bucket = self.input.param("default_bucket", True)
if self.default_bucket:
self.default_bucket_name = "default"
self.standard_buckets = self.input.param("standard_buckets", 0)
self.sasl_buckets = self.input.param("sasl_buckets", 0)
self.total_buckets = self.sasl_buckets + self.default_bucket + self.standard_buckets
self.num_servers = self.input.param("servers", len(self.servers))
#initial number of items in the cluster
self.nodes_init = self.input.param("nodes_init", 1)
self.num_replicas = self.input.param("replicas", 1)
self.num_items = self.input.param("items", 1000)
self.dgm_run = self.input.param("dgm_run", False)
#max items number to verify in ValidateDataTask, None - verify all
self.max_verify = self.input.param("max_verify", None)
#we don't change consistent_view on server by default
self.disabled_consistent_view = self.input.param("disabled_consistent_view", None)
self.log.info("============== basetestcase setup was started for test #{0} {1}=============="\
.format(self.case_number, self._testMethodName))
#avoid clean up if the previous test has been tear down
if not self.input.param("skip_cleanup", True) or self.case_number == 1:
self.tearDown()
self.cluster = Cluster()
if str(self.__class__).find('rebalanceout.RebalanceOutTests') != -1:
#rebalance all nodes into the cluster before each test
self.cluster.rebalance(self.servers[:self.num_servers], self.servers[1:self.num_servers], [])
elif self.nodes_init > 1:
self.cluster.rebalance(self.servers[:1], self.servers[1:self.nodes_init], [])
self.quota = self._initialize_nodes(self.cluster, self.servers, self.disabled_consistent_view)
if self.dgm_run:
self.quota = 256
if self.total_buckets > 0:
self.bucket_size = self._get_bucket_size(self.quota, self.total_buckets)
if self.default_bucket:
self.cluster.create_default_bucket(self.master, self.bucket_size, self.num_replicas)
self.buckets.append(Bucket(name="default", authType="sasl", saslPassword="",
num_replicas=self.num_replicas, bucket_size=self.bucket_size))
self._create_sasl_buckets(self.master, self.sasl_buckets)
self._create_standard_buckets(self.master, self.standard_buckets)
self.log.info("============== basetestcase setup was finished for test #{0} {1} =============="\
.format(self.case_number, self._testMethodName))
self._log_start(self)
def tearDown(self):
try:
if (hasattr(self, '_resultForDoCleanups') and len(self._resultForDoCleanups.failures) > 0 \
and TestInputSingleton.input.param("stop-on-failure", False))\
or self.input.param("skip_cleanup", False):
self.log.warn("CLEANUP WAS SKIPPED")
else:
self.log.info("============== basetestcase cleanup was started for test #{0} {1} =============="\
.format(self.case_number, self._testMethodName))
rest = RestConnection(self.master)
alerts = rest.get_alerts()
if alerts is not None and len(alerts) != 0:
self.log.warn("Alerts were found: {0}".format(alerts))
if rest._rebalance_progress_status() == 'running':
self.log.warning("rebalancing is still running, test should be verified")
stopped = rest.stop_rebalance()
self.assertTrue(stopped, msg="unable to stop rebalance")
BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
ClusterOperationHelper.cleanup_cluster(self.servers)
time.sleep(10)
ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
self.log.info("============== basetestcase cleanup was finished for test #{0} {1} =============="\
.format(self.case_number, self._testMethodName))
finally:
#stop all existing task manager threads
self.cluster.shutdown()
self._log_finish(self)
@staticmethod
def _log_start(self):
try:
msg = "{0} : {1} started ".format(datetime.datetime.now(), self._testMethodName)
RestConnection(self.servers[0]).log_client_error(msg)
except:
pass
@staticmethod
def _log_finish(self):
try:
msg = "{0} : {1} finished ".format(datetime.datetime.now(), self._testMethodName)
RestConnection(self.servers[0]).log_client_error(msg)
except:
pass
#.........这里部分代码省略.........