本文整理汇总了Python中membase.api.rest_client.RestConnection.set_auto_compaction方法的典型用法代码示例。如果您正苦于以下问题:Python RestConnection.set_auto_compaction方法的具体用法?Python RestConnection.set_auto_compaction怎么用?Python RestConnection.set_auto_compaction使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类membase.api.rest_client.RestConnection
的用法示例。
在下文中一共展示了RestConnection.set_auto_compaction方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: common_setUp
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import set_auto_compaction [as 别名]
def common_setUp(self):
self.log = logger.Logger.get_logger()
self.input = TestInputSingleton.input
self.created_views = {}
self.servers = self.input.servers
self.replica = self.input.param("replica", 1)
self.failover_factor = self.input.param("failover-factor", 1)
self.num_docs = self.input.param("num-docs", 10000)
self.num_design_docs = self.input.param("num-design-docs", 20)
self.expiry_ratio = self.input.param("expiry-ratio", 0.1)
self.num_buckets = self.input.param("num-buckets", 1)
self.case_number = self.input.param("case_number", 0)
self.dgm_run = self.input.param("dgm_run", False)
#avoid clean up if the previous test has been tear down
if not self.input.param("skip_cleanup", True) or self.case_number == 1:
ViewBaseTests._common_clenup(self)
master = self.servers[0]
rest = RestConnection(master)
rest.set_reb_cons_view(disable=False)
node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
mem_quota = int(rest.get_nodes_self().mcdMemoryReserved * node_ram_ratio)
if self.dgm_run:
mem_quota = 256
rest.init_cluster(master.rest_username, master.rest_password)
rest.init_cluster_memoryQuota(master.rest_username, master.rest_password, memoryQuota=mem_quota)
if self.num_buckets == 1:
ViewBaseTests._create_default_bucket(self, replica=self.replica)
else:
ViewBaseTests._create_multiple_buckets(self, replica=self.replica)
ViewBaseTests._log_start(self)
db_compaction = self.input.param("db_compaction", 30)
view_compaction = self.input.param("view_compaction", 30)
rest.set_auto_compaction(dbFragmentThresholdPercentage=db_compaction,
viewFragmntThresholdPercentage=view_compaction)
示例2: execute
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import set_auto_compaction [as 别名]
def execute(self, task_manager):
rest = RestConnection(self.server)
try:
rest.set_auto_compaction(parallelDBAndVC = self.config["parallelDBAndVC"],
dbFragmentThreshold = self.config["dbFragmentThreshold"],
viewFragmntThreshold = self.config["viewFragmntThreshold"],
dbFragmentThresholdPercentage = self.config["dbFragmentThresholdPercentage"],
viewFragmntThresholdPercentage = self.config["viewFragmntThresholdPercentage"],
allowedTimePeriodFromHour = self.config["allowedTimePeriodFromHour"],
allowedTimePeriodFromMin = self.config["allowedTimePeriodFromMin"],
allowedTimePeriodToHour = self.config["allowedTimePeriodToHour"],
allowedTimePeriodToMin = self.config["allowedTimePeriodToMin"],
allowedTimePeriodAbort = self.config["allowedTimePeriodAbort"],
bucket = self.bucket)
self.state = CHECKING
task_manager.schedule(self, 10)
except Exception as e:
self.state = FINISHED
self.set_exception(e)
示例3: test_database_fragmentation
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import set_auto_compaction [as 别名]
def test_database_fragmentation(self):
percent_threshold = self.autocompaction_value
bucket_name = "default"
MAX_RUN = 100
item_size = 1024
update_item_size = item_size * ((float(97 - percent_threshold)) / 100)
serverInfo = self.servers[0]
self.log.info(serverInfo)
rest = RestConnection(serverInfo)
remote_client = RemoteMachineShellConnection(serverInfo)
output, rq_content, header = rest.set_auto_compaction("false", dbFragmentThresholdPercentage=percent_threshold, viewFragmntThresholdPercentage=100)
if not output and (percent_threshold <= MIN_COMPACTION_THRESHOLD or percent_threshold >= MAX_COMPACTION_THRESHOLD):
self.assertFalse(output, "it should be impossible to set compaction value = {0}%".format(percent_threshold))
import json
self.assertTrue(json.loads(rq_content).has_key("errors"), "Error is not present in response")
self.assertTrue(json.loads(rq_content)["errors"].find("Allowed range is 2 - 100") > -1, \
"Error 'Allowed range is 2 - 100' expected, but was '{0}'".format(json.loads(rq_content)["errors"]))
self.log.info("Response contains error = '%(errors)s' as expected" % json.loads(rq_content))
elif (output and percent_threshold >= MIN_COMPACTION_THRESHOLD
and percent_threshold <= MAX_RUN):
node_ram_ratio = BucketOperationHelper.base_bucket_ratio(TestInputSingleton.input.servers)
info = rest.get_nodes_self()
available_ram = info.memoryQuota * (node_ram_ratio) / 2
items = (int(available_ram * 1000) / 2) / item_size
rest.create_bucket(bucket=bucket_name, ramQuotaMB=int(available_ram), authType='sasl',
saslPassword='password', replicaNumber=1, proxyPort=11211)
BucketOperationHelper.wait_for_memcached(serverInfo, bucket_name)
BucketOperationHelper.wait_for_vbuckets_ready_state(serverInfo, bucket_name)
self.log.info("start to load {0}K keys with {1} bytes/key".format(items, item_size))
self.insert_key(serverInfo, bucket_name, items, item_size)
self.log.info("sleep 10 seconds before the next run")
time.sleep(10)
self.log.info("start to update {0}K keys with smaller value {1} bytes/key".format(items,
int(update_item_size)))
self.insert_key(serverInfo, bucket_name, items, int(update_item_size))
compact_run = remote_client.wait_till_compaction_end(rest, bucket_name, timeout_in_seconds=180)
if not compact_run:
self.log.error("auto compaction does not run")
elif compact_run:
self.log.info("auto compaction runs successfully")
else:
self.log.error("Unknown error")
示例4: test_database_fragmentation
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import set_auto_compaction [as 别名]
def test_database_fragmentation(self):
self.log.info('start test_database_fragmentation')
self.err = None
BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
percent_threshold = self.autocompaction_value
bucket_name = "default"
MAX_RUN = 100
item_size = 1024
update_item_size = item_size * ((float(100 - percent_threshold)) / 100)
serverInfo = self.servers[0]
self.log.info(serverInfo)
rest = RestConnection(serverInfo)
remote_client = RemoteMachineShellConnection(serverInfo)
output, rq_content, header = rest.set_auto_compaction("false", dbFragmentThresholdPercentage=percent_threshold, viewFragmntThresholdPercentage=None)
if not output and (percent_threshold <= MIN_COMPACTION_THRESHOLD or percent_threshold >= MAX_COMPACTION_THRESHOLD):
self.assertFalse(output, "it should be impossible to set compaction value = {0}%".format(percent_threshold))
import json
self.assertTrue(json.loads(rq_content).has_key("errors"), "Error is not present in response")
self.assertTrue(str(json.loads(rq_content)["errors"]).find("Allowed range is 2 - 100") > -1, \
"Error 'Allowed range is 2 - 100' expected, but was '{0}'".format(str(json.loads(rq_content)["errors"])))
self.log.info("Response contains error = '%(errors)s' as expected" % json.loads(rq_content))
elif (output and percent_threshold >= MIN_COMPACTION_THRESHOLD
and percent_threshold <= MAX_RUN):
node_ram_ratio = BucketOperationHelper.base_bucket_ratio(TestInputSingleton.input.servers)
info = rest.get_nodes_self()
available_ram = info.memoryQuota * (node_ram_ratio) / 2
items = (int(available_ram * 1000) / 2) / item_size
print "ITEMS =============%s" % items
rest.create_bucket(bucket=bucket_name, ramQuotaMB=int(available_ram), authType='sasl',
saslPassword='password', replicaNumber=1, proxyPort=11211)
BucketOperationHelper.wait_for_memcached(serverInfo, bucket_name)
BucketOperationHelper.wait_for_vbuckets_ready_state(serverInfo, bucket_name)
self.log.info("******start to load {0}K keys with {1} bytes/key".format(items, item_size))
#self.insert_key(serverInfo, bucket_name, items, item_size)
generator = BlobGenerator('compact', 'compact-', int(item_size), start=0, end=(items * 1000))
self._load_all_buckets(self.master, generator, "create", 0, 1, batch_size=1000)
self.log.info("sleep 10 seconds before the next run")
time.sleep(10)
self.log.info("********start to update {0}K keys with smaller value {1} bytes/key".format(items,
int(update_item_size)))
generator_update = BlobGenerator('compact', 'compact-', int(update_item_size), start=0, end=(items * 1000))
if self.during_ops:
if self.during_ops == "change_port":
self.change_port(new_port=self.input.param("new_port", "9090"))
self.master.port = self.input.param("new_port", "9090")
elif self.during_ops == "change_password":
old_pass = self.master.rest_password
self.change_password(new_password=self.input.param("new_password", "new_pass"))
self.master.rest_password = self.input.param("new_password", "new_pass")
rest = RestConnection(self.master)
insert_thread = Thread(target=self.load,
name="insert",
args=(self.master, self.autocompaction_value,
self.default_bucket_name, generator_update))
try:
self.log.info('starting the load thread')
insert_thread.start()
compact_run = remote_client.wait_till_compaction_end(rest, bucket_name,
timeout_in_seconds=(self.wait_timeout * 10))
if not compact_run:
self.fail("auto compaction does not run")
elif compact_run:
self.log.info("auto compaction run successfully")
except Exception, ex:
self.log.info("exception in auto compaction")
if self.during_ops:
if self.during_ops == "change_password":
self.change_password(new_password=old_pass)
elif self.during_ops == "change_port":
self.change_port(new_port='8091',
current_port=self.input.param("new_port", "9090"))
if str(ex).find("enospc") != -1:
self.is_crashed.set()
self.log.error("Disk is out of space, unable to load more data")
insert_thread._Thread__stop()
else:
insert_thread._Thread__stop()
raise ex
else:
insert_thread.join()
if self.err is not None:
self.fail(self.err)
示例5: test_settingsCluster
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import set_auto_compaction [as 别名]
def test_settingsCluster(self):
ops = self.input.param("ops", None)
source = 'ns_server'
user = self.master.rest_username
password = self.master.rest_password
rest = RestConnection(self.master)
if (ops == 'memoryQuota'):
expectedResults = {'memory_quota':512, 'source':source, 'user':user, 'ip':self.ipAddress, 'port':12345, 'cluster_name':'', 'index_memory_quota':256}
rest.init_cluster_memoryQuota(expectedResults['user'], password, expectedResults['memory_quota'])
elif (ops == 'loadSample'):
expectedResults = {'name':'gamesim-sample', 'source':source, "user":user, 'ip':self.ipAddress, 'port':12345}
rest.addSamples()
#Get a REST Command for loading sample
elif (ops == 'enableAutoFailover'):
expectedResults = {'max_nodes':1, "timeout":120, 'source':source, "user":user, 'ip':self.ipAddress, 'port':12345}
rest.update_autofailover_settings(True, expectedResults['timeout'])
elif (ops == 'disableAutoFailover'):
expectedResults = {'max_nodes':1, "timeout":120, 'source':source, "user":user, 'ip':self.ipAddress, 'port':12345}
rest.update_autofailover_settings(False, expectedResults['timeout'])
elif (ops == 'resetAutoFailover'):
expectedResults = {'source':source, "user":user, 'ip':self.ipAddress, 'port':12345}
rest.reset_autofailover()
elif (ops == 'enableClusterAlerts'):
expectedResults = {"encrypt":False, "email_server:port":25, "host":"localhost", "email_server:user":"ritam", "alerts":["auto_failover_node", "auto_failover_maximum_reached"], \
"recipients":["[email protected]"], "sender":"[email protected]", "source":"ns_server", "user":"Administrator", 'ip':self.ipAddress, 'port':1234}
rest.set_alerts_settings('[email protected]', '[email protected]', 'ritam', 'password',)
elif (ops == 'disableClusterAlerts'):
rest.set_alerts_settings('[email protected]', '[email protected]', 'ritam', 'password',)
expectedResults = {'source':source, "user":user, 'ip':self.ipAddress, 'port':1234}
rest.disable_alerts()
elif (ops == 'modifyCompactionSettingsPercentage'):
expectedResults = {"parallel_db_and_view_compaction":False,
"database_fragmentation_threshold:percentage":50,
"view_fragmentation_threshold:percentage":50,
"purge_interval":3,
"source":"ns_server",
"user":"Administrator",
'source':source,
"user":user,
'ip':self.ipAddress,
'port':1234}
rest.set_auto_compaction(dbFragmentThresholdPercentage=50, viewFragmntThresholdPercentage=50)
elif (ops == 'modifyCompactionSettingsPercentSize'):
expectedResults = {"parallel_db_and_view_compaction":False,
"database_fragmentation_threshold:percentage":50,
"database_fragmentation_threshold:size":10,
"view_fragmentation_threshold:percentage":50,
"view_fragmentation_threshold:size":10,
"purge_interval":3,
"source":"ns_server",
"user":"Administrator",
'source':source,
"user":user,
'ip':self.ipAddress,
'port':1234}
rest.set_auto_compaction(dbFragmentThresholdPercentage=50,
viewFragmntThresholdPercentage=50,
dbFragmentThreshold=10,
viewFragmntThreshold=10)
elif (ops == 'modifyCompactionSettingsTime'):
expectedResults = {"parallel_db_and_view_compaction":False,
"database_fragmentation_threshold:percentage":50,
"database_fragmentation_threshold:size":10,
"view_fragmentation_threshold:percentage":50,
"view_fragmentation_threshold:size":10,
"allowed_time_period:abort_outside":True,
"allowed_time_period:to_minute":15,
"allowed_time_period:from_minute":12,
"allowed_time_period:to_hour":1,
"allowed_time_period:from_hour":1,
"purge_interval":3,
"source":"ns_server",
"user":"Administrator",
'source':source,
"user":user,
'ip':self.ipAddress,
'port':1234,
}
rest.set_auto_compaction(dbFragmentThresholdPercentage=50,
viewFragmntThresholdPercentage=50,
dbFragmentThreshold=10,
viewFragmntThreshold=10,
allowedTimePeriodFromHour=1,
allowedTimePeriodFromMin=12,
allowedTimePeriodToHour=1,
allowedTimePeriodToMin=15,
allowedTimePeriodAbort='true')
elif (ops == "AddGroup"):
expectedResults = {'group_name':'add group', 'source':source, 'user':user, 'ip':self.ipAddress, 'port':1234}
#.........这里部分代码省略.........
示例6: set_auto_compaction
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import set_auto_compaction [as 别名]
def set_auto_compaction(server, parallel_compaction, percent_threshold):
rest = RestConnection(server)
rest.set_auto_compaction(parallel_compaction,
dbFragmentThresholdPercentage=percent_threshold,
viewFragmntThresholdPercentage=percent_threshold)
示例7: PerfBase
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import set_auto_compaction [as 别名]
#.........这里部分代码省略.........
"""Set up ep_engine side compaction ratio"""
for server in self.input.servers:
shell = RemoteMachineShellConnection(server)
cmd = "/opt/couchbase/bin/cbepctl localhost:11210 "\
"set flush_param db_frag_threshold {0}".format(comp_ratio)
self._exec_and_log(shell, cmd)
shell.disconnect()
def set_autocompaction(self, disable_view_compaction=False):
"""Set custom auto-compaction settings"""
try:
# Parallel database and view compaction
parallel_compaction = self.param("parallel_compaction",
PerfDefaults.parallel_compaction)
# Database fragmentation threshold
db_compaction = self.parami("db_compaction",
PerfDefaults.db_compaction)
print "[perf.setUp] database compaction = %d" % db_compaction
# ep_engine fragementation threshold
ep_compaction = self.parami("ep_compaction",
PerfDefaults.ep_compaction)
self.set_ep_compaction(ep_compaction)
print "[perf.setUp] ep_engine compaction = %d" % ep_compaction
# View fragmentation threshold
if disable_view_compaction:
view_compaction = 100
else:
view_compaction = self.parami("view_compaction",
PerfDefaults.view_compaction)
# Set custom auto-compaction settings
self.rest.set_auto_compaction(parallelDBAndVC=parallel_compaction,
dbFragmentThresholdPercentage=db_compaction,
viewFragmntThresholdPercentage=view_compaction)
except Exception as e:
# It's very hard to determine what exception it can raise.
# Therefore we have to use general handler.
print "ERROR while changing compaction settings: {0}".format(e)
def tearDown(self):
if self.parami("tear_down", 0) == 1:
print "[perf.tearDown] tearDown routine skipped"
return
print "[perf.tearDown] tearDown routine starts"
if self.parami("tear_down_proxy", 1) == 1:
self.tear_down_proxy()
else:
print "[perf.tearDown] Proxy tearDown skipped"
if self.sc is not None:
self.sc.stop()
self.sc = None
if self.parami("tear_down_bucket", 0) == 1:
self.tear_down_buckets()
else:
print "[perf.tearDown] Bucket tearDown skipped"
if self.parami("tear_down_cluster", 1) == 1:
self.tear_down_cluster()
else:
print "[perf.tearDown] Cluster tearDown skipped"