本文整理汇总了Python中membase.api.rest_client.RestConnection.change_bucket_props方法的典型用法代码示例。如果您正苦于以下问题:Python RestConnection.change_bucket_props方法的具体用法?Python RestConnection.change_bucket_props怎么用?Python RestConnection.change_bucket_props使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类membase.api.rest_client.RestConnection
的用法示例。
在下文中一共展示了RestConnection.change_bucket_props方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_bucketEvents
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import change_bucket_props [as 别名]
def test_bucketEvents(self):
ops = self.input.param("ops", None)
user = self.master.rest_username
source = 'ns_server'
rest = RestConnection(self.master)
if (ops in ['create']):
expectedResults = {'bucket_name':'TestBucket', 'ram_quota':104857600, 'num_replicas':1,
'replica_index':False, 'eviction_policy':'value_only', 'type':'membase', \
'auth_type':'sasl', "autocompaction":'false', "purge_interval":"undefined", \
"flush_enabled":False, "num_threads":3, "source":source, \
"user":user, "ip":self.ipAddress, "port":57457, 'sessionid':'', 'conflict_resolution_type':'seqno', \
'storage_mode':'couchstore','max_ttl':400,'compression_mode':'passive'}
rest.create_bucket(expectedResults['bucket_name'], expectedResults['ram_quota'] / 1048576, expectedResults['auth_type'], 'password', expectedResults['num_replicas'], \
'11211', 'membase', 0, expectedResults['num_threads'], 0, 'valueOnly', maxTTL=expectedResults['max_ttl'])
elif (ops in ['update']):
expectedResults = {'bucket_name':'TestBucket', 'ram_quota':209715200, 'num_replicas':1, 'replica_index':False, 'eviction_policy':'value_only', 'type':'membase', \
'auth_type':'sasl', "autocompaction":'false', "purge_interval":"undefined", "flush_enabled":'true', "num_threads":3, "source":source, \
"user":user, "ip":self.ipAddress, "port":57457 , 'sessionid':'','storage_mode':'couchstore', 'max_ttl':400}
rest.create_bucket(expectedResults['bucket_name'], expectedResults['ram_quota'] / 1048576, expectedResults['auth_type'], 'password', expectedResults['num_replicas'], '11211', 'membase', \
0, expectedResults['num_threads'], 0 , 'valueOnly', maxTTL=expectedResults['max_ttl'])
expectedResults = {'bucket_name':'TestBucket', 'ram_quota':104857600, 'num_replicas':1, 'replica_index':True, 'eviction_policy':'value_only', 'type':'membase', \
'auth_type':'sasl', "autocompaction":'false', "purge_interval":"undefined", "flush_enabled":True, "num_threads":3, "source":source, \
"user":user, "ip":self.ipAddress, "port":57457,'storage_mode':'couchstore', 'max_ttl':200}
rest.change_bucket_props(expectedResults['bucket_name'], expectedResults['ram_quota'] / 1048576, expectedResults['auth_type'], 'password', expectedResults['num_replicas'], \
'11211', 1, 1, maxTTL=expectedResults['max_ttl'])
elif (ops in ['delete']):
expectedResults = {'bucket_name':'TestBucket', 'ram_quota':104857600, 'num_replicas':1, 'replica_index':True, 'eviction_policy':'value_only', 'type':'membase', \
'auth_type':'sasl', "autocompaction":'false', "purge_interval":"undefined", "flush_enabled":False, "num_threads":3, "source":source, \
"user":user, "ip":self.ipAddress, "port":57457}
rest.create_bucket(expectedResults['bucket_name'], expectedResults['ram_quota'] / 1048576, expectedResults['auth_type'], 'password', expectedResults['num_replicas'], \
'11211', 'membase', 1, expectedResults['num_threads'], 0 , 'valueOnly')
rest.delete_bucket(expectedResults['bucket_name'])
elif (ops in ['flush']):
expectedResults = {'bucket_name':'TestBucket', 'ram_quota':100, 'num_replicas':1, 'replica_index':True, 'eviction_policy':'value_only', 'type':'membase', \
'auth_type':'sasl', "autocompaction":'false', "purge_interval":"undefined", "flush_enabled":True, "num_threads":3, "source":source, \
"user":user, "ip":self.ipAddress, "port":57457,'storage_mode':'couchstore'}
rest.create_bucket(expectedResults['bucket_name'], expectedResults['ram_quota'], expectedResults['auth_type'], 'password', expectedResults['num_replicas'], \
'11211', 'membase', 1, expectedResults['num_threads'], 1, 'valueOnly')
self.sleep(10)
rest.flush_bucket(expectedResults['bucket_name'])
self.checkConfig(self.eventID, self.master, expectedResults)
示例2: rebalance_in_with_bucket_password_change
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import change_bucket_props [as 别名]
def rebalance_in_with_bucket_password_change(self):
if self.sasl_buckets == 0:
self.fail("no sasl buckets are specified!")
new_pass = self.input.param("new_pass", "new_pass")
servs_in = self.servers[self.nodes_init:self.nodes_init + self.nodes_in]
nodes_in_second = self.input.param("nodes_in_second", 1)
servs_in_second = self.servers[self.nodes_init + self.nodes_in:
self.nodes_init + self.nodes_in + nodes_in_second]
servs_init = self.servers[:self.nodes_init]
servs_result = self.servers[:self.nodes_init + self.nodes_in]
rebalance = self.cluster.async_rebalance(servs_init, servs_in, [])
rebalance.result()
rest = RestConnection(self.master)
bucket_to_change = [bucket for bucket in self.buckets
if bucket.authType =='sasl' and bucket.name !='default'][0]
rest.change_bucket_props(bucket_to_change, saslPassword=new_pass)
rebalance = self.cluster.async_rebalance(servs_result, servs_in_second, [])
rebalance.result()
示例3: SecondaryIndexingClusterOpsTests
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import change_bucket_props [as 别名]
class SecondaryIndexingClusterOpsTests(BaseSecondaryIndexingTests):
def setUp(self):
super(SecondaryIndexingClusterOpsTests, self).setUp()
server = self.get_nodes_from_services_map(service_type = "n1ql")
self.rest = RestConnection(server)
def tearDown(self):
super(SecondaryIndexingClusterOpsTests, self).tearDown()
def test_remove_bucket_and_query(self):
#Initialization operation
self.run_multi_operations(buckets = self.buckets,
query_definitions = self.query_definitions,
create_index=True, drop_index = False,
query_with_explain = self.run_query_with_explain, query = self.run_query)
#Remove bucket and recreate it
for bucket in self.buckets:
self.rest.delete_bucket(bucket.name)
#Verify the result set is empty
self.verify_index_absence(query_definitions = self.query_definitions, buckets = self.buckets)
def test_change_bucket_properties(self):
#Initialization operation
self.run_multi_operations(buckets = self.buckets,
query_definitions = self.query_definitions,
create_index = True, drop_index = False,
query_with_explain = True, query = True)
#Change Bucket Properties
for bucket in self.buckets:
self.rest.change_bucket_props(bucket,
ramQuotaMB=None,
authType=None,
saslPassword=None,
replicaNumber=0,
proxyPort=None,
replicaIndex=None,
flushEnabled=False)
#Run query and query explain
self.run_multi_operations(buckets = self.buckets,
query_definitions = self.query_definitions,
create_index = False, drop_index = True,
query_with_explain = True, query = True)
def test_flush_bucket_and_query(self):
#Initialization operation
self.run_multi_operations(buckets=self.buckets,
query_definitions=self.query_definitions,
create_index=True, drop_index=False,
query_with_explain=True, query=True)
#Remove bucket and recreate it
for bucket in self.buckets:
self.rest.flush_bucket(bucket.name)
rollback_exception = True
query_try_count = 0
while rollback_exception and query_try_count < 10:
self.sleep(5)
query_try_count += 1
#Query and bucket with empty result set
try:
self.multi_query_using_index_with_emptyresult(
query_definitions=self.query_definitions, buckets=self.buckets)
rollback_exception = False
except Exception, ex:
msg = "Indexer rollback"
if msg not in str(ex):
rollback_exception = False
self.log.info(ex)
raise
self.assertFalse(rollback_exception, "Indexer still in rollback after 50 secs.")
示例4: SecondaryIndexingClusterOpsTests
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import change_bucket_props [as 别名]
class SecondaryIndexingClusterOpsTests(BaseSecondaryIndexingTests):
def setUp(self):
super(SecondaryIndexingClusterOpsTests, self).setUp()
server = self.get_nodes_from_services_map(service_type = "n1ql")
self.rest = RestConnection(server)
def tearDown(self):
super(SecondaryIndexingClusterOpsTests, self).tearDown()
def test_remove_bucket_and_query(self):
#Initialization operation
self.run_multi_operations(buckets = self.buckets,
query_definitions = self.query_definitions,
create_index = self.run_create_index, drop_index = False,
query_with_explain = self.run_query_with_explain, query = self.run_query)
#Remove bucket and recreate it
for bucket in self.buckets:
self.rest.delete_bucket(bucket.name)
#Verify the result set is empty
self.verify_index_absence(query_definitions = self.query_definitions, buckets = self.buckets)
def test_change_bucket_properties(self):
#Initialization operation
self.run_multi_operations(buckets = self.buckets,
query_definitions = self.query_definitions,
create_index = True, drop_index = False,
query_with_explain = True, query = True)
#Change Bucket Properties
for bucket in self.buckets:
self.rest.change_bucket_props(bucket,
ramQuotaMB=None,
authType=None,
saslPassword=None,
replicaNumber=0,
proxyPort=None,
replicaIndex=None,
flushEnabled=False)
#Run query and query explain
self.run_multi_operations(buckets = self.buckets,
query_definitions = self.query_definitions,
create_index = False, drop_index = True,
query_with_explain = True, query = True)
def test_flush_bucket_and_query(self):
#Initialization operation
self.run_multi_operations(buckets = self.buckets,
query_definitions = self.query_definitions,
create_index = True, drop_index = False,
query_with_explain = True, query = True)
#Remove bucket and recreate it
for bucket in self.buckets:
self.rest.flush_bucket(bucket.name)
self.sleep(2)
#Query and bucket with empty result set
self.multi_query_using_index_with_empty_result(query_definitions = self.query_definitions,
buckets = self.buckets)
def test_delete_create_bucket_and_query(self):
#Initialization operation
self.run_multi_operations(buckets = self.buckets,
query_definitions = self.query_definitions,
create_index = self.run_create_index, drop_index = False,
query_with_explain = self.run_query_with_explain, query = self.run_query)
#Remove bucket and recreate it
for bucket in self.buckets:
self.rest.delete_bucket(bucket.name)
self.sleep(2)
#Flush bucket and recreate it
self._bucket_creation()
self.sleep(2)
ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
#Verify the result set is empty
self.verify_index_absence(query_definitions = self.query_definitions, buckets = self.buckets)
index_map = self.get_index_stats()
self.assertTrue(len(index_map) == 0, "Index Stats still show {0}".format(index_map))
def test_data_loss(self):
#Initialization operation
self.run_multi_operations(buckets = self.buckets,
query_definitions = self.query_definitions,
create_index = True, drop_index = False,
query_with_explain = False, query = False)
self._verify_bucket_count_with_index_count()
try:
servr_out = self.servers[1:self.nodes_init]
failover_task = self.cluster.async_failover([self.master],
failover_nodes = servr_out, graceful=False)
failover_task.result()
rebalance = self.cluster.async_rebalance(self.servers[:1],
[], servr_out)
rebalance.result()
# get the items in the index and check if the data loss is reflected correctly
self.sleep(2)
except Exception, ex:
raise
finally: