本文整理汇总了Python中membase.api.rest_client.RestConnection.set_internalSetting方法的典型用法代码示例。如果您正苦于以下问题:Python RestConnection.set_internalSetting方法的具体用法?Python RestConnection.set_internalSetting怎么用?Python RestConnection.set_internalSetting使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类membase.api.rest_client.RestConnection
的用法示例。
在下文中一共展示了RestConnection.set_internalSetting方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_internalSettingsXDCR
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import set_internalSetting [as 别名]
def test_internalSettingsXDCR(self):
ops = self.input.param("ops",None)
value = self.input.param("value",None)
rest = RestConnection(self.master)
user = self.master.rest_username
source = 'ns_server'
input = self.input.param("input",None)
rest.set_internalSetting(input,value)
expectedResults = {"user":user, "local_cluster_name":self.master.ip+":8091", ops:value,
"source":source}
self.checkConfig(self.eventID,self.master,expectedResults)
示例2: test_internalSettingLocal
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import set_internalSetting [as 别名]
def test_internalSettingLocal(self):
ops = self.input.param("ops",None)
if ":" in ops:
ops = ops.replace(":",",")
ops = '{' + ops + '}'
value = self.input.param("value",None)
rest = RestConnection(self.master)
user = self.master.rest_username
source = 'ns_server'
input = self.input.param("input",None)
rest.set_internalSetting(input,value)
expectedResults = {"user":user, ops:value,"source":source,"ip":self.ipAddress, "port":57457}
self.checkConfig(self.eventID,self.master,expectedResults)
示例3: test_backward_compatibility
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import set_internalSetting [as 别名]
def test_backward_compatibility(self):
self.c1_version = self.initial_version
self.c2_version = self.upgrade_versions[0]
# install older version on C1
self._install(self.servers[:self.src_init])
#install latest version on C2
self.initial_version = self.c2_version
self._install(self.servers[self.src_init:])
self.initial_version = self.c1_version
self.create_buckets()
# workaround for MB-15761
if float(self.initial_version[:2]) < 3.0 and self._demand_encryption:
rest = RestConnection(self.dest_master)
rest.set_internalSetting('certUseSha1',"true")
rest.regenerate_cluster_certificate()
self._join_all_clusters()
if float(self.c1_version[:2]) >= 3.0:
for cluster in self.get_cb_clusters():
for remote_cluster in cluster.get_remote_clusters():
remote_cluster.pause_all_replications()
self.sleep(60)
bucket = self.src_cluster.get_bucket_by_name('default')
self._operations()
self._load_bucket(bucket, self.src_master, self.gen_create, 'create', exp=0)
bucket = self.src_cluster.get_bucket_by_name('sasl_bucket_1')
self._load_bucket(bucket, self.src_master, self.gen_create, 'create', exp=0)
bucket = self.dest_cluster.get_bucket_by_name('standard_bucket_1')
gen_create2 = BlobGenerator('loadTwo', 'loadTwo', self._value_size, end=self.num_items)
self._load_bucket(bucket, self.dest_master, gen_create2, 'create', exp=0)
if float(self.c1_version[:2]) >= 3.0:
for cluster in self.get_cb_clusters():
for remote_cluster in cluster.get_remote_clusters():
remote_cluster.resume_all_replications()
self._wait_for_replication_to_catchup()
if float(self.c1_version[:2]) > 2.5:
for remote_cluster in self.src_cluster.get_remote_clusters():
remote_cluster.modify()
for remote_cluster in self.dest_cluster.get_remote_clusters():
remote_cluster.modify()
self.sleep(30)
bucket = self.src_cluster.get_bucket_by_name('sasl_bucket_1')
gen_create3 = BlobGenerator('loadThree', 'loadThree', self._value_size, end=self.num_items)
self._load_bucket(bucket, self.src_master, gen_create3, 'create', exp=0)
bucket = self.dest_cluster.get_bucket_by_name('sasl_bucket_1')
gen_create4 = BlobGenerator('loadFour', 'loadFour', self._value_size, end=self.num_items)
self._load_bucket(bucket, self.dest_master, gen_create4, 'create', exp=0)
bucket = self.src_cluster.get_bucket_by_name('default')
self._load_bucket(bucket, self.src_master, gen_create2, 'create', exp=0)
self.merge_all_buckets()
self.sleep(60)
self._post_upgrade_ops()
self.sleep(60)
self.verify_results()
if float(self.initial_version[:2]) == 3.1 and float(self.upgrade_versions[0][:2]) == 4.1:
goxdcr_log = NodeHelper.get_goxdcr_log_dir(self._input.servers[0])\
+ '/goxdcr.log*'
for node in self.src_cluster.get_nodes():
count = NodeHelper.check_goxdcr_log(
node,
"Failed to repair connections to target cluster",
goxdcr_log)
self.assertEqual(count, 0, "Failed to repair connections to target cluster "
"error message found in " + str(node.ip))
self.log.info("Failed to repair connections to target cluster "
"error message not found in " + str(node.ip))
示例4: test_backward_compatibility
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import set_internalSetting [as 别名]
def test_backward_compatibility(self):
if self.bucket_type == "ephemeral" and float(self.initial_version[:3]) < 5.0:
self.log.info("Ephemeral buckets not available in version " + str(self.initial_version))
self.skip_this_version = True
return
self.c1_version = self.initial_version
self.c2_version = self.upgrade_versions[0]
if self.c1_version[:3] >= self.c2_version[:3]:
self.log.info("Initial version greater than upgrade version - not supported")
self.skip_this_version = True
return
# install older version on C1
self._install(self.servers[:self.src_init])
#install latest version on C2
self.initial_version = self.c2_version
self._install(self.servers[self.src_init:])
self.initial_version = self.c1_version
self.create_buckets()
# workaround for MB-15761
if float(self.initial_version[:2]) < 3.0 and self._demand_encryption:
rest = RestConnection(self.dest_master)
rest.set_internalSetting('certUseSha1',"true")
rest.regenerate_cluster_certificate()
self._join_all_clusters()
if float(self.c1_version[:2]) >= 3.0:
for cluster in self.get_cb_clusters():
for remote_cluster in cluster.get_remote_clusters():
remote_cluster.pause_all_replications()
self.sleep(60)
bucket = self.src_cluster.get_bucket_by_name('default')
self._operations()
self._load_bucket(bucket, self.src_master, self.gen_create, 'create', exp=0)
bucket = self.src_cluster.get_bucket_by_name('sasl_bucket_1')
self._load_bucket(bucket, self.src_master, self.gen_create, 'create', exp=0)
bucket = self.dest_cluster.get_bucket_by_name('standard_bucket_1')
gen_create2 = BlobGenerator('loadTwo', 'loadTwo', self._value_size, end=self.num_items)
self._load_bucket(bucket, self.dest_master, gen_create2, 'create', exp=0)
if float(self.c1_version[:2]) >= 3.0:
for cluster in self.get_cb_clusters():
for remote_cluster in cluster.get_remote_clusters():
remote_cluster.resume_all_replications()
self._wait_for_replication_to_catchup()
if float(self.c1_version[:2]) > 2.5:
for remote_cluster in self.src_cluster.get_remote_clusters():
remote_cluster.modify()
for remote_cluster in self.dest_cluster.get_remote_clusters():
remote_cluster.modify()
self.sleep(30)
bucket = self.src_cluster.get_bucket_by_name('sasl_bucket_1')
gen_create3 = BlobGenerator('loadThree', 'loadThree', self._value_size, end=self.num_items)
self._load_bucket(bucket, self.src_master, gen_create3, 'create', exp=0)
bucket = self.dest_cluster.get_bucket_by_name('sasl_bucket_1')
gen_create4 = BlobGenerator('loadFour', 'loadFour', self._value_size, end=self.num_items)
self._load_bucket(bucket, self.dest_master, gen_create4, 'create', exp=0)
bucket = self.src_cluster.get_bucket_by_name('default')
self._load_bucket(bucket, self.src_master, gen_create2, 'create', exp=0)
self.merge_all_buckets()
self.sleep(60)
self._post_upgrade_ops()
self.sleep(60)
self.verify_results()
if float(self.initial_version[:3]) == 3.1 and float(self.upgrade_versions[0][:3]) == 4.1:
goxdcr_log = NodeHelper.get_goxdcr_log_dir(self._input.servers[0])\
+ '/goxdcr.log*'
for node in self.src_cluster.get_nodes():
count1 = NodeHelper.check_goxdcr_log(
node,
"Received error response from memcached in target cluster",
goxdcr_log)
count2 = NodeHelper.check_goxdcr_log(
node,
"EINVAL",
goxdcr_log)
count3 = NodeHelper.check_goxdcr_log(
node,
"Failed to repair connections to target cluster",
goxdcr_log)
count4 = NodeHelper.check_goxdcr_log(
node,
"received error response from setMeta client. Repairing connection. response status=EINVAL",
goxdcr_log)
count5 = NodeHelper.check_goxdcr_log(
node,
"GOGC in new global setting is 0, which is not a valid value and can only have come from "
"upgrade. Changed it to 100 instead.",
goxdcr_log)
if count1 > 0 or count2 > 0:
self.assertEqual(count3, 0, "Failed to repair connections to target cluster "
"error message found in " + str(node.ip))
self.log.info("Failed to repair connections to target cluster "
"error message not found as expected in " + str(node.ip))
self.assertEqual(count4, 0, "Disconnect errors found in " + str(node.ip))
#.........这里部分代码省略.........
示例5: test_backward_compatibility
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import set_internalSetting [as 别名]
def test_backward_compatibility(self):
self.c1_version = self.initial_version
self.c2_version = self.upgrade_versions[0]
# install older version on C1
self._install(self.servers[:self.src_init])
#install latest version on C2
self.initial_version = self.c2_version
self._install(self.servers[self.src_init:])
self.initial_version = self.c1_version
self.create_buckets()
# workaround for MB-15761
if float(self.initial_version[:2]) < 3.0 and self._demand_encryption:
rest = RestConnection(self.dest_master)
rest.set_internalSetting('certUseSha1',"true")
rest.regenerate_cluster_certificate()
self._join_all_clusters()
if float(self.c1_version[:2]) >= 3.0:
for cluster in self.get_cb_clusters():
for remote_cluster in cluster.get_remote_clusters():
remote_cluster.pause_all_replications()
self.sleep(60)
bucket = self.src_cluster.get_bucket_by_name('default')
self._operations()
self._load_bucket(bucket, self.src_master, self.gen_create, 'create', exp=0)
bucket = self.src_cluster.get_bucket_by_name('sasl_bucket_1')
self._load_bucket(bucket, self.src_master, self.gen_create, 'create', exp=0)
bucket = self.dest_cluster.get_bucket_by_name('standard_bucket_1')
gen_create2 = BlobGenerator('loadTwo', 'loadTwo', self._value_size, end=self.num_items)
self._load_bucket(bucket, self.dest_master, gen_create2, 'create', exp=0)
if float(self.c1_version[:2]) >= 3.0:
for cluster in self.get_cb_clusters():
for remote_cluster in cluster.get_remote_clusters():
remote_cluster.resume_all_replications()
self._wait_for_replication_to_catchup()
if float(self.c1_version[:2]) > 2.5:
for remote_cluster in self.src_cluster.get_remote_clusters():
remote_cluster.modify()
for remote_cluster in self.dest_cluster.get_remote_clusters():
remote_cluster.modify()
self.sleep(30)
bucket = self.src_cluster.get_bucket_by_name('sasl_bucket_1')
gen_create3 = BlobGenerator('loadThree', 'loadThree', self._value_size, end=self.num_items)
self._load_bucket(bucket, self.src_master, gen_create3, 'create', exp=0)
bucket = self.dest_cluster.get_bucket_by_name('sasl_bucket_1')
gen_create4 = BlobGenerator('loadFour', 'loadFour', self._value_size, end=self.num_items)
self._load_bucket(bucket, self.dest_master, gen_create4, 'create', exp=0)
bucket = self.src_cluster.get_bucket_by_name('default')
self._load_bucket(bucket, self.src_master, gen_create2, 'create', exp=0)
self.merge_all_buckets()
self.sleep(60)
self._post_upgrade_ops()
self.sleep(60)
self.verify_results()