本文整理汇总了Python中lib.membase.api.rest_client.RestConnection.set_service_memoryQuota方法的典型用法代码示例。如果您正苦于以下问题:Python RestConnection.set_service_memoryQuota方法的具体用法?Python RestConnection.set_service_memoryQuota怎么用?Python RestConnection.set_service_memoryQuota使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类lib.membase.api.rest_client.RestConnection
的用法示例。
在下文中一共展示了RestConnection.set_service_memoryQuota方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: CBASBaseTest
# 需要导入模块: from lib.membase.api.rest_client import RestConnection [as 别名]
# 或者: from lib.membase.api.rest_client.RestConnection import set_service_memoryQuota [as 别名]
class CBASBaseTest(BaseTestCase):
def setUp(self, add_defualt_cbas_node = True):
self.log = logger.Logger.get_logger()
if self._testMethodDoc:
self.log.info("\n\nStarting Test: %s \n%s"%(self._testMethodName,self._testMethodDoc))
else:
self.log.info("\n\nStarting Test: %s"%(self._testMethodName))
super(CBASBaseTest, self).setUp()
self.cbas_node = self.input.cbas
self.cbas_servers = []
self.kv_servers = []
for server in self.servers:
if "cbas" in server.services:
self.cbas_servers.append(server)
if "kv" in server.services:
self.kv_servers.append(server)
self.analytics_helper = AnalyticsHelper()
self._cb_cluster = self.cluster
self.travel_sample_docs_count = 31591
self.beer_sample_docs_count = 7303
invalid_ip = '10.111.151.109'
self.cb_bucket_name = self.input.param('cb_bucket_name', 'travel-sample')
self.cbas_bucket_name = self.input.param('cbas_bucket_name', 'travel')
self.cb_bucket_password = self.input.param('cb_bucket_password', None)
self.expected_error = self.input.param("error", None)
if self.expected_error:
self.expected_error = self.expected_error.replace("INVALID_IP",invalid_ip)
self.expected_error = self.expected_error.replace("PORT",self.master.port)
self.cb_server_ip = self.input.param("cb_server_ip", None)
self.cb_server_ip = self.cb_server_ip.replace('INVALID_IP',invalid_ip) if self.cb_server_ip is not None else None
self.cbas_dataset_name = self.input.param("cbas_dataset_name", 'travel_ds')
self.cbas_bucket_name_invalid = self.input.param('cbas_bucket_name_invalid', self.cbas_bucket_name)
self.cbas_dataset2_name = self.input.param('cbas_dataset2_name', None)
self.skip_create_dataset = self.input.param('skip_create_dataset', False)
self.disconnect_if_connected = self.input.param('disconnect_if_connected', False)
self.cbas_dataset_name_invalid = self.input.param('cbas_dataset_name_invalid', self.cbas_dataset_name)
self.skip_drop_connection = self.input.param('skip_drop_connection',False)
self.skip_drop_dataset = self.input.param('skip_drop_dataset', False)
self.query_id = self.input.param('query_id',None)
self.mode = self.input.param('mode',None)
self.num_concurrent_queries = self.input.param('num_queries', 5000)
self.concurrent_batch_size = self.input.param('concurrent_batch_size', 100)
self.compiler_param = self.input.param('compiler_param', None)
self.compiler_param_val = self.input.param('compiler_param_val', None)
self.expect_reject = self.input.param('expect_reject', False)
self.expect_failure = self.input.param('expect_failure', False)
self.index_name = self.input.param('index_name', None)
self.index_fields = self.input.param('index_fields', None)
if self.index_fields:
self.index_fields = self.index_fields.split("-")
self.otpNodes = []
self.rest = RestConnection(self.master)
self.log.info("Setting the min possible memory quota so that adding mode nodes to the cluster wouldn't be a problem.")
self.rest.set_service_memoryQuota(service='memoryQuota', memoryQuota=MIN_KV_QUOTA)
self.rest.set_service_memoryQuota(service='ftsMemoryQuota', memoryQuota=FTS_QUOTA)
self.rest.set_service_memoryQuota(service='indexMemoryQuota', memoryQuota=INDEX_QUOTA)
self.rest.set_service_memoryQuota(service='cbasMemoryQuota', memoryQuota=CBAS_QUOTA)
# Drop any existing buckets and datasets
if self.cbas_node:
self.cleanup_cbas()
if not self.cbas_node and len(self.cbas_servers)>=1:
self.cbas_node = self.cbas_servers[0]
if "cbas" in self.master.services:
self.cleanup_cbas()
if add_defualt_cbas_node:
if self.master.ip != self.cbas_node.ip:
self.otpNodes.append(self.add_node(self.cbas_node))
else:
self.otpNodes = self.rest.node_statuses()
''' This cbas cleanup is actually not needed.
When a node is added to the cluster, it is automatically cleaned-up.'''
self.cleanup_cbas()
self.cbas_servers.remove(self.cbas_node)
self.log.info("============== CBAS_BASE setup was finished for test #{0} {1} ==============" \
.format(self.case_number, self._testMethodName))
def create_default_bucket(self):
node_info = self.rest.get_nodes_self()
if node_info.memoryQuota and int(node_info.memoryQuota) > 0 :
ram_available = node_info.memoryQuota
self.bucket_size = ram_available - 1
default_params=self._create_bucket_params(server=self.master, size=self.bucket_size,
replicas=self.num_replicas, bucket_type=self.bucket_type,
enable_replica_index=self.enable_replica_index,
eviction_policy=self.eviction_policy, lww=self.lww)
self.cluster.create_default_bucket(default_params)
self.buckets.append(Bucket(name="default", authType="sasl", saslPassword="",
num_replicas=self.num_replicas, bucket_size=self.bucket_size,
eviction_policy=self.eviction_policy, lww=self.lww,
type=self.bucket_type))
if self.enable_time_sync:
self._set_time_sync_on_buckets( ['default'] )
#.........这里部分代码省略.........
示例2: EventingBaseTest
# 需要导入模块: from lib.membase.api.rest_client import RestConnection [as 别名]
# 或者: from lib.membase.api.rest_client.RestConnection import set_service_memoryQuota [as 别名]
class EventingBaseTest(QueryHelperTests, BaseTestCase):
panic_count = 0
def setUp(self):
if self._testMethodDoc:
log.info("\n\nStarting Test: %s \n%s" % (self._testMethodName, self._testMethodDoc))
else:
log.info("\n\nStarting Test: %s" % (self._testMethodName))
self.input = TestInputSingleton.input
self.input.test_params.update({"default_bucket": False})
super(EventingBaseTest, self).setUp()
self.master = self.servers[0]
self.server = self.master
self.restServer = self.get_nodes_from_services_map(service_type="eventing")
self.rest = RestConnection(self.restServer)
self.log.info(
"Setting the min possible memory quota so that adding mode nodes to the cluster wouldn't be a problem.")
self.rest.set_service_memoryQuota(service='memoryQuota', memoryQuota=330)
self.rest.set_service_memoryQuota(service='indexMemoryQuota', memoryQuota=INDEX_QUOTA)
# self.rest.set_service_memoryQuota(service='eventingMemoryQuota', memoryQuota=EVENTING_QUOTA)
self.src_bucket_name = self.input.param('src_bucket_name', 'src_bucket')
self.eventing_log_level = self.input.param('eventing_log_level', 'TRACE')
self.dst_bucket_name = self.input.param('dst_bucket_name', 'dst_bucket')
self.dst_bucket_name1 = self.input.param('dst_bucket_name1', 'dst_bucket1')
self.metadata_bucket_name = self.input.param('metadata_bucket_name', 'metadata')
self.create_functions_buckets = self.input.param('create_functions_buckets', True)
self.docs_per_day = self.input.param("doc-per-day", 1)
random.seed(datetime.time)
self.function_name = "Function_{0}_{1}".format(random.randint(1, 1000000000), self._testMethodName)
def tearDown(self):
# catch panics and print it in the test log
self.check_eventing_logs_for_panic()
super(EventingBaseTest, self).tearDown()
def create_save_function_body(self, appname, appcode, description="Sample Description",
checkpoint_interval=10000, cleanup_timers=False,
dcp_stream_boundary="everything", deployment_status=True,
rbacpass="password", rbacrole="admin", rbacuser="cbadminbucket",
skip_timer_threshold=86400,
sock_batch_size=1, tick_duration=5000, timer_processing_tick_interval=500,
timer_worker_pool_size=3, worker_count=3, processing_status=True,
cpp_worker_thread_count=1, multi_dst_bucket=False, execution_timeout=3,
data_chan_size=10000, worker_queue_cap=100000):
body = {}
body['appname'] = appname
script_dir = os.path.dirname(__file__)
abs_file_path = os.path.join(script_dir, appcode)
fh = open(abs_file_path, "r")
body['appcode'] = fh.read()
fh.close()
body['depcfg'] = {}
body['depcfg']['buckets'] = []
body['depcfg']['buckets'].append({"alias": self.dst_bucket_name, "bucket_name": self.dst_bucket_name})
if multi_dst_bucket:
body['depcfg']['buckets'].append({"alias": self.dst_bucket_name1, "bucket_name": self.dst_bucket_name1})
body['depcfg']['metadata_bucket'] = self.metadata_bucket_name
body['depcfg']['source_bucket'] = self.src_bucket_name
body['settings'] = {}
body['settings']['checkpoint_interval'] = checkpoint_interval
body['settings']['cleanup_timers'] = cleanup_timers
body['settings']['dcp_stream_boundary'] = dcp_stream_boundary
body['settings']['deployment_status'] = deployment_status
body['settings']['description'] = description
body['settings']['log_level'] = self.eventing_log_level
body['settings']['rbacpass'] = rbacpass
body['settings']['rbacrole'] = rbacrole
body['settings']['rbacuser'] = rbacuser
body['settings']['skip_timer_threshold'] = skip_timer_threshold
body['settings']['sock_batch_size'] = sock_batch_size
body['settings']['tick_duration'] = tick_duration
body['settings']['timer_processing_tick_interval'] = timer_processing_tick_interval
body['settings']['timer_worker_pool_size'] = timer_worker_pool_size
body['settings']['worker_count'] = worker_count
body['settings']['processing_status'] = processing_status
body['settings']['cpp_worker_thread_count'] = cpp_worker_thread_count
body['settings']['execution_timeout'] = execution_timeout
body['settings']['data_chan_size'] = data_chan_size
body['settings']['worker_queue_cap'] = worker_queue_cap
return body
def wait_for_bootstrap_to_complete(self, name):
result = self.rest.get_deployed_eventing_apps()
count = 0
while name not in result and count < 20:
self.sleep(30, message="Waiting for eventing node to come out of bootstrap state...")
count += 1
result = self.rest.get_deployed_eventing_apps()
if count == 20:
raise Exception(
'Eventing took lot of time to come out of bootstrap state or did not successfully bootstrap')
def wait_for_undeployment(self, name):
result = self.rest.get_deployed_eventing_apps()
count = 0
while name in result and count < 20:
self.sleep(30, message="Waiting for undeployment of function...")
count += 1
result = self.rest.get_deployed_eventing_apps()
if count == 20:
#.........这里部分代码省略.........