本文整理汇总了Python中membase.helper.bucket_helper.BucketOperationHelper.base_bucket_ratio方法的典型用法代码示例。如果您正苦于以下问题:Python BucketOperationHelper.base_bucket_ratio方法的具体用法?Python BucketOperationHelper.base_bucket_ratio怎么用?Python BucketOperationHelper.base_bucket_ratio使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类membase.helper.bucket_helper.BucketOperationHelper
的用法示例。
在下文中一共展示了BucketOperationHelper.base_bucket_ratio方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: common_setup
# 需要导入模块: from membase.helper.bucket_helper import BucketOperationHelper [as 别名]
# 或者: from membase.helper.bucket_helper.BucketOperationHelper import base_bucket_ratio [as 别名]
def common_setup(self):
self.cluster_helper = Cluster()
self.log = logger.Logger.get_logger()
self.cluster_run = False
self.input = TestInputSingleton.input
self.servers = self.input.servers
serverInfo = self.servers[0]
rest = RestConnection(serverInfo)
if len(set([server.ip for server in self.servers])) == 1:
ip = rest.get_nodes_self().ip
for server in self.servers:
server.ip = ip
self.cluster_run = True
self.case_number = self.input.param("case_number", 0)
self.replica = self.input.param("replica", 1)
self.keys_count = self.input.param("keys-count", 1000)
self.load_ratio = self.input.param("load-ratio", 1)
self.ratio_expiry = self.input.param("ratio-expiry", 0.03)
self.ratio_deletes = self.input.param("ratio-deletes", 0.13)
self.num_buckets = self.input.param("num-buckets", 1)
self.failover_factor = self.num_swap = self.input.param("num-swap", 1)
self.num_initial_servers = self.input.param("num-initial-servers", 3)
self.fail_orchestrator = self.swap_orchestrator = self.input.param("swap-orchestrator", False)
self.do_access = self.input.param("do-access", True)
self.load_started = False
self.loaders = []
try:
# Clear the state from Previous invalid run
if rest._rebalance_progress_status() == "running":
self.log.warning("rebalancing is still running, previous test should be verified")
stopped = rest.stop_rebalance()
self.assertTrue(stopped, msg="unable to stop rebalance")
self.log.info(
"============== SwapRebalanceBase setup was started for test #{0} {1}==============".format(
self.case_number, self._testMethodName
)
)
SwapRebalanceBase.reset(self)
# Make sure the test is setup correctly
min_servers = int(self.num_initial_servers) + int(self.num_swap)
msg = "minimum {0} nodes required for running swap rebalance"
self.assertTrue(len(self.servers) >= min_servers, msg=msg.format(min_servers))
self.log.info("picking server : {0} as the master".format(serverInfo))
node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
info = rest.get_nodes_self()
rest.init_cluster(username=serverInfo.rest_username, password=serverInfo.rest_password)
rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
if self.num_buckets > 10:
BaseTestCase.change_max_buckets(self, self.num_buckets)
self.log.info(
"============== SwapRebalanceBase setup was finished for test #{0} {1} ==============".format(
self.case_number, self._testMethodName
)
)
SwapRebalanceBase._log_start(self)
except Exception, e:
self.cluster_helper.shutdown()
self.fail(e)
示例2: common_setUp
# 需要导入模块: from membase.helper.bucket_helper import BucketOperationHelper [as 别名]
# 或者: from membase.helper.bucket_helper.BucketOperationHelper import base_bucket_ratio [as 别名]
def common_setUp(self):
self.log = logger.Logger.get_logger()
self.input = TestInputSingleton.input
self.created_views = {}
self.servers = self.input.servers
self.replica = self.input.param("replica", 1)
self.failover_factor = self.input.param("failover-factor", 1)
self.num_docs = self.input.param("num-docs", 10000)
self.num_design_docs = self.input.param("num-design-docs", 20)
self.expiry_ratio = self.input.param("expiry-ratio", 0.1)
self.num_buckets = self.input.param("num-buckets", 1)
self.case_number = self.input.param("case_number", 0)
self.dgm_run = self.input.param("dgm_run", False)
#avoid clean up if the previous test has been tear down
if not self.input.param("skip_cleanup", True) or self.case_number == 1:
ViewBaseTests._common_clenup(self)
master = self.servers[0]
rest = RestConnection(master)
rest.set_reb_cons_view(disable=False)
node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
mem_quota = int(rest.get_nodes_self().mcdMemoryReserved * node_ram_ratio)
if self.dgm_run:
mem_quota = 256
rest.init_cluster(master.rest_username, master.rest_password)
rest.init_cluster_memoryQuota(master.rest_username, master.rest_password, memoryQuota=mem_quota)
if self.num_buckets == 1:
ViewBaseTests._create_default_bucket(self, replica=self.replica)
else:
ViewBaseTests._create_multiple_buckets(self, replica=self.replica)
ViewBaseTests._log_start(self)
db_compaction = self.input.param("db_compaction", 30)
view_compaction = self.input.param("view_compaction", 30)
rest.set_auto_compaction(dbFragmentThresholdPercentage=db_compaction,
viewFragmntThresholdPercentage=view_compaction)
示例3: common_setup
# 需要导入模块: from membase.helper.bucket_helper import BucketOperationHelper [as 别名]
# 或者: from membase.helper.bucket_helper.BucketOperationHelper import base_bucket_ratio [as 别名]
def common_setup(input, testcase, bucket_ram_ratio=(2.8 / 3.0), replica=0):
log = logger.Logger.get_logger()
servers = input.servers
BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase)
ClusterOperationHelper.cleanup_cluster(servers)
ClusterHelper.wait_for_ns_servers_or_assert(servers, testcase)
serverInfo = servers[0]
log.info('picking server : {0} as the master'.format(serverInfo))
#if all nodes are on the same machine let's have the bucket_ram_ratio as bucket_ram_ratio * 1/len(servers)
node_ram_ratio = BucketOperationHelper.base_bucket_ratio(servers)
rest = RestConnection(serverInfo)
info = rest.get_nodes_self()
rest.init_cluster(username=serverInfo.rest_username,
password=serverInfo.rest_password)
rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
if "ascii" in TestInputSingleton.input.test_params\
and TestInputSingleton.input.test_params["ascii"].lower() == "true":
BucketOperationHelper.create_multiple_buckets(serverInfo, replica, node_ram_ratio * bucket_ram_ratio,
howmany=1, sasl=False)
else:
BucketOperationHelper.create_multiple_buckets(serverInfo, replica, node_ram_ratio * bucket_ram_ratio,
howmany=1, sasl=True)
buckets = rest.get_buckets()
for bucket in buckets:
ready = BucketOperationHelper.wait_for_memcached(serverInfo, bucket.name)
testcase.assertTrue(ready, "wait_for_memcached failed")
示例4: common_setup
# 需要导入模块: from membase.helper.bucket_helper import BucketOperationHelper [as 别名]
# 或者: from membase.helper.bucket_helper.BucketOperationHelper import base_bucket_ratio [as 别名]
def common_setup(self):
self.log = logger.Logger.get_logger()
self.input = TestInputSingleton.input
self.servers = self.input.servers
serverInfo = self.servers[0]
rest = RestConnection(serverInfo)
# Clear the state from Previous invalid run
rest.stop_rebalance()
self.load_started = False
self.loaders = []
SwapRebalanceBase.common_tearDown(self)
# Initialize test params
self.replica = self.input.param("replica", 1)
self.keys_count = self.input.param("keys-count", 100000)
self.load_ratio = self.input.param("load-ratio", 1)
self.num_buckets = self.input.param("num-buckets", 1)
self.failover_factor = self.num_swap = self.input.param("num-swap", 1)
self.num_initial_servers = self.input.param("num-initial-servers", 3)
self.fail_orchestrator = self.swap_orchestrator = self.input.param("swap-orchestrator", False)
# Make sure the test is setup correctly
min_servers = int(self.num_initial_servers) + int(self.num_swap)
msg = "minimum {0} nodes required for running swap rebalance"
self.assertTrue(len(self.servers) >= min_servers,
msg=msg.format(min_servers))
self.log.info('picking server : {0} as the master'.format(serverInfo))
node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
info = rest.get_nodes_self()
rest.init_cluster(username=serverInfo.rest_username, password=serverInfo.rest_password)
rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
示例5: setUp
# 需要导入模块: from membase.helper.bucket_helper import BucketOperationHelper [as 别名]
# 或者: from membase.helper.bucket_helper.BucketOperationHelper import base_bucket_ratio [as 别名]
def setUp(self):
self.log = logger.Logger.get_logger()
self.input = TestInputSingleton.input
self.assertTrue(self.input, msg="input parameters missing...")
self.servers = self.input.servers
self.master = self.servers[0]
rest = RestConnection(self.master)
rest.init_cluster(username=self.master.rest_username,
password=self.master.rest_password)
info = rest.get_nodes_self()
node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
BucketOperationHelper.delete_all_buckets_or_assert(servers=self.servers, test_case=self)
ClusterOperationHelper.cleanup_cluster(servers=self.servers)
credentials = self.input.membase_settings
ClusterOperationHelper.add_all_nodes_or_assert(master=self.master, all_servers=self.servers, rest_settings=credentials, test_case=self)
rest = RestConnection(self.master)
nodes = rest.node_statuses()
otpNodeIds = []
for node in nodes:
otpNodeIds.append(node.id)
rebalanceStarted = rest.rebalance(otpNodeIds, [])
self.assertTrue(rebalanceStarted,
"unable to start rebalance on master node {0}".format(self.master.ip))
self.log.info('started rebalance operation on master node {0}'.format(self.master.ip))
rebalanceSucceeded = rest.monitorRebalance()
示例6: create_buckets
# 需要导入模块: from membase.helper.bucket_helper import BucketOperationHelper [as 别名]
# 或者: from membase.helper.bucket_helper.BucketOperationHelper import base_bucket_ratio [as 别名]
def create_buckets(servers, testcase, howmany=1, replica=1, bucket_ram_ratio=(2.0 / 3.0)):
node_ram_ratio = BucketOperationHelper.base_bucket_ratio(servers)
master = servers[0]
BucketOperationHelper.create_multiple_buckets(master, replica, node_ram_ratio * bucket_ram_ratio, howmany=howmany)
rest = RestConnection(master)
buckets = rest.get_buckets()
for bucket in buckets:
ready = BucketOperationHelper.wait_for_memcached(master, bucket.name)
testcase.assertTrue(ready, "wait_for_memcached failed")
示例7: cluster_initialization
# 需要导入模块: from membase.helper.bucket_helper import BucketOperationHelper [as 别名]
# 或者: from membase.helper.bucket_helper.BucketOperationHelper import base_bucket_ratio [as 别名]
def cluster_initialization(servers):
log = logger.Logger().get_logger()
master = servers[0]
log.info('picking server : {0} as the master'.format(master))
#if all nodes are on the same machine let's have the bucket_ram_ratio as bucket_ram_ratio * 1/len(servers)
node_ram_ratio = BucketOperationHelper.base_bucket_ratio(servers)
rest = RestConnection(master)
info = rest.get_nodes_self()
rest.init_cluster(username=master.rest_username, password=master.rest_password)
rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
示例8: common_setup
# 需要导入模块: from membase.helper.bucket_helper import BucketOperationHelper [as 别名]
# 或者: from membase.helper.bucket_helper.BucketOperationHelper import base_bucket_ratio [as 别名]
def common_setup(self):
self.log = logger.Logger.get_logger()
self.input = TestInputSingleton.input
self.servers = self.input.servers
master = self.servers[0]
rest = RestConnection(master)
# Cleanup previous state
self.task_manager = None
rest.stop_rebalance()
RebalanceBaseTest.reset(self)
# Initialize test params
self.replica = self.input.param("replica", 1)
# By default we use keys-count for LoadTask
# Use keys-count=-1 to use load-ratio
self.keys_count = self.input.param("keys-count", 30000)
self.load_ratio = self.input.param("load-ratio", 6)
self.expiry_ratio = self.input.param("expiry-ratio", 0.1)
self.delete_ratio = self.input.param("delete-ratio", 0.1)
self.access_ratio = self.input.param("access-ratio", 0.8)
self.num_buckets = self.input.param("num-buckets", 1)
self.num_rebalance = self.input.param("num-rebalance", 1)
self.do_ascii = self.input.param("ascii", False)
self.do_verify = self.input.param("do-verify", True)
self.repeat = self.input.param("repeat", 1)
self.max_ops_per_second = self.input.param("max_ops_per_second", 500)
self.min_item_size = self.input.param("min_item_size", 128)
self.do_stop = self.input.param("do-stop", False)
self.skip_cleanup = self.input.param("skip-cleanup", False)
self.checkResidentRatio = self.input.param("checkResidentRatio", False)
self.activeRatio = self.input.param("activeRatio", 50)
self.replicaRatio = self.input.param("replicaRatio", 50)
self.case_number = self.input.param("case_number", 0)
self.log.info('picking server : {0} as the master'.format(master))
node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
info = rest.get_nodes_self()
rest.init_cluster(username=master.rest_username,
password=master.rest_password)
rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
BucketOperationHelper.create_multiple_buckets(master, self.replica, node_ram_ratio * (2.0 / 3.0),
howmany=self.num_buckets, sasl=not self.do_ascii)
buckets = rest.get_buckets()
for bucket in buckets:
ready = BucketOperationHelper.wait_for_memcached(master, bucket.name)
self.assertTrue(ready, "wait_for_memcached failed")
# Initialize and start the taskManager
self.task_manager = taskmanager.TaskManager()
self.task_manager.start()
示例9: setUp
# 需要导入模块: from membase.helper.bucket_helper import BucketOperationHelper [as 别名]
# 或者: from membase.helper.bucket_helper.BucketOperationHelper import base_bucket_ratio [as 别名]
def setUp(self):
self.log = logger.Logger.get_logger()
self.input = TestInputSingleton.input
self.servers = self.input.servers
serverInfo = self.servers[0]
rest = RestConnection(serverInfo)
info = rest.get_nodes_self()
node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
rest.init_cluster(username=serverInfo.rest_username,
password=serverInfo.rest_password)
rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
示例10: _create_default_bucket
# 需要导入模块: from membase.helper.bucket_helper import BucketOperationHelper [as 别名]
# 或者: from membase.helper.bucket_helper.BucketOperationHelper import base_bucket_ratio [as 别名]
def _create_default_bucket(self):
helper = RestHelper(self.rest)
if not helper.bucket_exists(self.bucket):
node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
info = self.rest.get_nodes_self()
available_ram = int(info.memoryQuota * node_ram_ratio)
if available_ram < 256:
available_ram = 256
self.rest.create_bucket(bucket=self.bucket, ramQuotaMB=available_ram)
ready = BucketOperationHelper.wait_for_memcached(self.master, self.bucket)
self.testcase.assertTrue(ready, "wait_for_memcached failed")
self.testcase.assertTrue(helper.bucket_exists(self.bucket), "unable to create {0} bucket".format(self.bucket))
示例11: _create_default_bucket
# 需要导入模块: from membase.helper.bucket_helper import BucketOperationHelper [as 别名]
# 或者: from membase.helper.bucket_helper.BucketOperationHelper import base_bucket_ratio [as 别名]
def _create_default_bucket(self):
name = "default"
master = self.master
rest = RestConnection(master)
node_ram_ratio = BucketOperationHelper.base_bucket_ratio(TestInputSingleton.input.servers)
info = rest.get_nodes_self()
available_ram = info.memoryQuota * node_ram_ratio
rest.create_bucket(bucket=name, ramQuotaMB=int(available_ram))
ready = BucketOperationHelper.wait_for_memcached(master, name)
self.assertTrue(ready, msg="wait_for_memcached failed")
self.load_thread = None
self.shutdown_load_data = False
示例12: _create_default_bucket
# 需要导入模块: from membase.helper.bucket_helper import BucketOperationHelper [as 别名]
# 或者: from membase.helper.bucket_helper.BucketOperationHelper import base_bucket_ratio [as 别名]
def _create_default_bucket(self, replica=1):
name = "default"
master = self.servers[0]
rest = RestConnection(master)
helper = RestHelper(RestConnection(master))
if not helper.bucket_exists(name):
node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
info = rest.get_nodes_self()
available_ram = info.memoryQuota * node_ram_ratio
rest.create_bucket(bucket=name, ramQuotaMB=int(available_ram), replicaNumber=replica)
ready = BucketOperationHelper.wait_for_memcached(master, name)
self.assertTrue(ready, msg="wait_for_memcached failed")
self.assertTrue(helper.bucket_exists(name), msg="unable to create {0} bucket".format(name))
示例13: setup_cluster
# 需要导入模块: from membase.helper.bucket_helper import BucketOperationHelper [as 别名]
# 或者: from membase.helper.bucket_helper.BucketOperationHelper import base_bucket_ratio [as 别名]
def setup_cluster(self, do_rebalance=True):
node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
mem_quota = int(self.rest.get_nodes_self().mcdMemoryReserved * node_ram_ratio)
self.rest.init_cluster(self.master.rest_username, self.master.rest_password)
self.rest.init_cluster_memoryQuota(self.master.rest_username, self.master.rest_password, memoryQuota=mem_quota)
for server in self.servers:
ClusterOperationHelper.cleanup_cluster([server])
ClusterOperationHelper.wait_for_ns_servers_or_assert([self.master], self.testcase)
if do_rebalance:
rebalanced = ClusterOperationHelper.add_and_rebalance(self.servers)
self.testcase.assertTrue(rebalanced, "cluster is not rebalanced")
self._create_default_bucket()
示例14: test_database_fragmentation
# 需要导入模块: from membase.helper.bucket_helper import BucketOperationHelper [as 别名]
# 或者: from membase.helper.bucket_helper.BucketOperationHelper import base_bucket_ratio [as 别名]
def test_database_fragmentation(self):
percent_threshold = self.autocompaction_value
bucket_name = "default"
MAX_RUN = 100
item_size = 1024
update_item_size = item_size * ((float(97 - percent_threshold)) / 100)
serverInfo = self.servers[0]
self.log.info(serverInfo)
rest = RestConnection(serverInfo)
remote_client = RemoteMachineShellConnection(serverInfo)
output, rq_content, header = rest.set_auto_compaction("false", dbFragmentThresholdPercentage=percent_threshold, viewFragmntThresholdPercentage=100)
if not output and (percent_threshold <= MIN_COMPACTION_THRESHOLD or percent_threshold >= MAX_COMPACTION_THRESHOLD):
self.assertFalse(output, "it should be impossible to set compaction value = {0}%".format(percent_threshold))
import json
self.assertTrue(json.loads(rq_content).has_key("errors"), "Error is not present in response")
self.assertTrue(json.loads(rq_content)["errors"].find("Allowed range is 2 - 100") > -1, \
"Error 'Allowed range is 2 - 100' expected, but was '{0}'".format(json.loads(rq_content)["errors"]))
self.log.info("Response contains error = '%(errors)s' as expected" % json.loads(rq_content))
elif (output and percent_threshold >= MIN_COMPACTION_THRESHOLD
and percent_threshold <= MAX_RUN):
node_ram_ratio = BucketOperationHelper.base_bucket_ratio(TestInputSingleton.input.servers)
info = rest.get_nodes_self()
available_ram = info.memoryQuota * (node_ram_ratio) / 2
items = (int(available_ram * 1000) / 2) / item_size
rest.create_bucket(bucket=bucket_name, ramQuotaMB=int(available_ram), authType='sasl',
saslPassword='password', replicaNumber=1, proxyPort=11211)
BucketOperationHelper.wait_for_memcached(serverInfo, bucket_name)
BucketOperationHelper.wait_for_vbuckets_ready_state(serverInfo, bucket_name)
self.log.info("start to load {0}K keys with {1} bytes/key".format(items, item_size))
self.insert_key(serverInfo, bucket_name, items, item_size)
self.log.info("sleep 10 seconds before the next run")
time.sleep(10)
self.log.info("start to update {0}K keys with smaller value {1} bytes/key".format(items,
int(update_item_size)))
self.insert_key(serverInfo, bucket_name, items, int(update_item_size))
compact_run = remote_client.wait_till_compaction_end(rest, bucket_name, timeout_in_seconds=180)
if not compact_run:
self.log.error("auto compaction does not run")
elif compact_run:
self.log.info("auto compaction runs successfully")
else:
self.log.error("Unknown error")
示例15: _create_default_bucket
# 需要导入模块: from membase.helper.bucket_helper import BucketOperationHelper [as 别名]
# 或者: from membase.helper.bucket_helper.BucketOperationHelper import base_bucket_ratio [as 别名]
def _create_default_bucket(self):
name = "default"
master = self.servers[0]
rest = RestConnection(master)
helper = RestHelper(RestConnection(master))
if not helper.bucket_exists(name):
node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
info = rest.get_nodes_self()
available_ram = info.mcdMemoryReserved * node_ram_ratio
rest.create_bucket(bucket=name, ramQuotaMB=int(available_ram))
ready = BucketOperationHelper.wait_for_memcached(master, name)
self.assertTrue(ready, msg="wait_for_memcached failed")
self.assertTrue(helper.bucket_exists(name),
msg="unable to create {0} bucket".format(name))
self.load_thread = None
self.shutdown_load_data = False