本文整理汇总了Python中membase.api.rest_client.RestConnection.add_remote_cluster方法的典型用法代码示例。如果您正苦于以下问题:Python RestConnection.add_remote_cluster方法的具体用法?Python RestConnection.add_remote_cluster怎么用?Python RestConnection.add_remote_cluster使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类membase.api.rest_client.RestConnection
的用法示例。
在下文中一共展示了RestConnection.add_remote_cluster方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: cluster_xdcr_remote_clusters_read
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import add_remote_cluster [as 别名]
def cluster_xdcr_remote_clusters_read(self,username,password,host,port=8091, servers=None,cluster=None,httpCode=None,user_role=None):
remote_cluster_name = 'rbac_cluster'
rest = RestConnection(servers[0])
remote_server01 = servers[1]
remote_server02 = servers[2]
rest_remote01 = RestConnection(remote_server01)
rest_remote01.delete_bucket()
rest_remote01.create_bucket(bucket='default', ramQuotaMB=100)
rest_remote02 = RestConnection(remote_server02)
#------ First Test the Get Requests for XDCR --------------#
#Remove all remote cluster references
rest.remove_all_replications()
rest.remove_all_remote_clusters()
#Add remote cluster reference and replications
rest.add_remote_cluster(remote_server01.ip,8091,'Administrator','password',remote_cluster_name)
time.sleep(20)
replication_id = rest.start_replication('continuous','default',remote_cluster_name)
_cluster_xdcr_remote_clusters_read ={
"remove_cluser_read":"/pools/default/remoteClusters;GET",
}
result = self._return_http_code(_cluster_xdcr_remote_clusters_read,username,password,host=host,port=8091, httpCode=httpCode, user_role=user_role)
示例2: start_replication
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import add_remote_cluster [as 别名]
def start_replication(self, master, slave, replication_type='continuous',
buckets=None, bidir=False, suffix='A'):
"""Add remote cluster and start replication"""
master_rest_conn = RestConnection(master)
remote_reference = 'remote_cluster_' + suffix
master_rest_conn.add_remote_cluster(slave.ip, slave.port,
slave.rest_username,
slave.rest_password,
remote_reference)
if not buckets:
buckets = self.get_buckets()
else:
buckets = self.get_buckets(reversed=True)
for bucket in buckets:
master_rest_conn.start_replication(replication_type, bucket,
remote_reference)
if self.parami('num_buckets', 1) > 1 and suffix == 'A':
self.start_replication(slave, master, replication_type, buckets,
suffix='B')
if bidir:
self.start_replication(slave, master, replication_type, buckets,
suffix='B')
示例3: test_basic_xdcr_with_cert_regenerate
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import add_remote_cluster [as 别名]
def test_basic_xdcr_with_cert_regenerate(self):
cluster1 = self.servers[0:2]
cluster2 = self.servers[2:4]
remote_cluster_name = 'sslcluster'
restCluster1 = RestConnection(cluster1[0])
restCluster2 = RestConnection(cluster2[0])
try:
#Setup cluster1
x509main(cluster1[0]).setup_master()
x509main(cluster1[1])._setup_node_certificates(reload_cert=False)
restCluster1.add_node('Administrator','password',cluster1[1].ip)
known_nodes = ['[email protected]'+cluster1[0].ip,'[email protected]' + cluster1[1].ip]
restCluster1.rebalance(known_nodes)
self.assertTrue(self.check_rebalance_complete(restCluster1),"Issue with rebalance")
restCluster1.create_bucket(bucket='default', ramQuotaMB=100)
restCluster1.remove_all_replications()
restCluster1.remove_all_remote_clusters()
#Setup cluster2
x509main(cluster2[0]).setup_master()
x509main(cluster2[1])._setup_node_certificates(reload_cert=False)
restCluster2.add_node('Administrator','password',cluster2[1].ip)
known_nodes = ['[email protected]'+cluster2[0].ip,'[email protected]' + cluster2[1].ip]
restCluster2.rebalance(known_nodes)
self.assertTrue(self.check_rebalance_complete(restCluster2),"Issue with rebalance")
restCluster2.create_bucket(bucket='default', ramQuotaMB=100)
test = x509main.CACERTFILEPATH + x509main.CACERTFILE
data = open(test, 'rb').read()
restCluster1.add_remote_cluster(cluster2[0].ip,cluster2[0].port,'Administrator','password',remote_cluster_name,certificate=data)
replication_id = restCluster1.start_replication('continuous','default',remote_cluster_name)
#restCluster1.set_xdcr_param('default','default','pauseRequested',True)
x509main(self.master)._delete_inbox_folder()
x509main(self.master)._generate_cert(self.servers,root_cn="CB\ Authority")
self.log.info ("Setting up the first cluster for new certificate")
x509main(cluster1[0]).setup_master()
x509main(cluster1[1])._setup_node_certificates(reload_cert=False)
self.log.info ("Setting up the second cluster for new certificate")
x509main(cluster2[0]).setup_master()
x509main(cluster2[1])._setup_node_certificates(reload_cert=False)
status = restCluster1.is_replication_paused('default','default')
if not status:
restCluster1.set_xdcr_param('default','default','pauseRequested',False)
restCluster1.set_xdcr_param('default','default','pauseRequested',True)
status = restCluster1.is_replication_paused('default','default')
self.assertTrue(status,"Replication has not started after certificate upgrade")
finally:
known_nodes = ['[email protected]'+cluster2[0].ip,'[email protected]'+cluster2[1].ip]
restCluster2.rebalance(known_nodes,['[email protected]' + cluster2[1].ip])
self.assertTrue(self.check_rebalance_complete(restCluster2),"Issue with rebalance")
restCluster2.delete_bucket()
示例4: _link_create_replications
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import add_remote_cluster [as 别名]
def _link_create_replications(self, master_1, master_2, cluster_name):
rest = RestConnection(master_1)
rest.add_remote_cluster(master_2.ip, master_2.port, master_1.rest_username,
master_1.rest_password, cluster_name)
time.sleep(30)
if len(self._buckets) == 0:
self._buckets = rest.get_buckets()
for bucket in set(self._buckets):
rep_database, rep_id = rest.start_replication("continuous", bucket, cluster_name)
示例5: test_continuous_unidirectional_deletes_2
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import add_remote_cluster [as 别名]
def test_continuous_unidirectional_deletes_2(self):
cluster_ref_a = "cluster_ref_a"
master_a = self._input.clusters.get(0)[0]
rest_conn_a = RestConnection(master_a)
cluster_ref_b = "cluster_ref_b"
master_b = self._input.clusters.get(1)[0]
rest_conn_b = RestConnection(master_b)
# Load some data on cluster a. Do it a few times so that the seqnos are
# bumped up and then delete it.
kvstore = ClientKeyValueStore()
self._params["ops"] = "set"
load_thread_list = []
for i in [1, 2, 3]:
task_def = RebalanceDataGenerator.create_loading_tasks(self._params)
load_thread = RebalanceDataGenerator.start_load(rest_conn_a,
self._buckets[0],
task_def, kvstore)
load_thread_list.append(load_thread)
for lt in load_thread_list:
lt.start()
for lt in load_thread_list:
lt.join()
time.sleep(10)
self._params["ops"] = "delete"
task_def = RebalanceDataGenerator.create_loading_tasks(self._params)
load_thread = RebalanceDataGenerator.start_load(rest_conn_a,
self._buckets[0],
task_def, kvstore)
load_thread.start()
load_thread.join()
# Start replication to replicate the deletes from cluster a
# to cluster b where the keys never existed.
replication_type = "continuous"
rest_conn_a.add_remote_cluster(master_b.ip, master_b.port,
master_b.rest_username,
master_b.rest_password, cluster_ref_b)
(rep_database, rep_id) = rest_conn_a.start_replication(replication_type,
self._buckets[0],
cluster_ref_b)
self._state.append((rest_conn_a, cluster_ref_b, rep_database, rep_id))
time.sleep(15)
# Verify replicated data#
self.assertTrue(XDCRBaseTest.verify_del_items(rest_conn_a,
rest_conn_b,
self._buckets[0],
kvstore.keys(),
self._poll_sleep,
self._poll_timeout),
"Changes feed verification failed")
示例6: cluster_bucket_xdcr_write
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import add_remote_cluster [as 别名]
def cluster_bucket_xdcr_write(self,username,password,host,port=8091,servers=None,cluster=None,httpCode=None,user_role=None):
_cluster_xdcr_settings_read = {
"create_replication":"controller/createReplication;POST",
"cancel_XDCR":"controller/cancelXDCR/<xid>;POST",
"delete_XDCR":"controller/cancelXDCR/<xid>;DELETE"
}
rest = RestConnection(servers[0])
rest.remove_all_replications()
rest.remove_all_remote_clusters()
remote_cluster_name = 'rbac_cluster'
rest = RestConnection(servers[0])
remote_server01 = servers[1]
remote_server02 = servers[2]
rest_remote01 = RestConnection(remote_server01)
rest_remote01.delete_bucket()
rest_remote01.create_bucket(bucket='default1', ramQuotaMB=100,proxyPort=11252)
rest_remote02 = RestConnection(remote_server02)
remote_id = rest.add_remote_cluster(remote_server01.ip,8091,'Administrator','password',remote_cluster_name)
time.sleep(10)
#replication_id = rest.start_replication('continuous','default',remote_cluster_name)
param_map = {'replicationType': 'continuous','toBucket': 'default1','fromBucket': 'default','toCluster': remote_cluster_name,
'type': 'capi'}
create_replication = {"create_replication":"controller/createReplication;POST;"+str(param_map)}
result = self._return_http_code(create_replication,username,password,host=host,port=port, httpCode=httpCode, user_role=user_role)
rest.remove_all_replications()
rest.remove_all_remote_clusters()
remote_id = rest.add_remote_cluster(remote_server01.ip,8091,'Administrator','password',remote_cluster_name)
time.sleep(20)
replication_id = rest.start_replication('continuous',fromBucket='default',toCluster=remote_cluster_name,toBucket='default1')
replication_id = replication_id.replace("/","%2F")
cancel_replication = {"cancel_XDCR":"controller/cancelXDCR/" + replication_id + ";POST"}
result = self._return_http_code(cancel_replication,username,password,host=host,port=port, httpCode=httpCode, user_role=user_role)
rest.remove_all_replications()
rest.remove_all_remote_clusters()
remote_id = rest.add_remote_cluster(remote_server01.ip,8091,'Administrator','password',remote_cluster_name)
time.sleep(20)
replication_id = rest.start_replication('continuous',fromBucket='default',toCluster=remote_cluster_name,toBucket='default1')
replication_id = replication_id.replace("/","%2F")
cancel_replication = {"cancel_XDCR":"controller/cancelXDCR/" + replication_id + ";DELETE"}
result = self._return_http_code(cancel_replication,username,password,host=host,port=port, httpCode=httpCode, user_role=user_role)
rest.remove_all_replications()
rest.remove_all_remote_clusters()
rest_remote01.delete_bucket('default1')
示例7: test_continuous_unidirectional_sets_deletes
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import add_remote_cluster [as 别名]
def test_continuous_unidirectional_sets_deletes(self):
cluster_ref_b = "cluster_ref_a"
master_a = self._input.clusters.get(0)[0]
rest_conn_a = RestConnection(master_a)
cluster_ref_b = "cluster_ref_b"
master_b = self._input.clusters.get(1)[0]
rest_conn_b = RestConnection(master_b)
# Start replication
replication_type = "continuous"
rest_conn_a.add_remote_cluster(master_b.ip, master_b.port,
master_b.rest_username,
master_b.rest_password, cluster_ref_b)
(rep_database, rep_id) = rest_conn_a.start_replication(replication_type,
self._buckets[0],
cluster_ref_b)
self._state.append((rest_conn_a, cluster_ref_b, rep_database, rep_id))
# Start load
kvstore = ClientKeyValueStore()
self._params["ops"] = "set"
task_def = RebalanceDataGenerator.create_loading_tasks(self._params)
load_thread = RebalanceDataGenerator.start_load(rest_conn_a,
self._buckets[0],
task_def, kvstore)
load_thread.start()
load_thread.join()
# Do some deletes
self._params["ops"] = "delete"
self._params["count"] = self._num_items/5
task_def = RebalanceDataGenerator.create_loading_tasks(self._params)
load_thread = RebalanceDataGenerator.start_load(rest_conn_a,
self._buckets[0],
task_def, kvstore)
load_thread.start()
load_thread.join()
# Verify replication
self.assertTrue(XDCRBaseTest.verify_replicated_data(rest_conn_b,
self._buckets[0],
kvstore,
self._poll_sleep,
self._poll_timeout),
"Verification of replicated data failed")
self.assertTrue(XDCRBaseTest.verify_replicated_revs(rest_conn_a,
rest_conn_b,
self._buckets[0],
self._poll_sleep,
self._poll_timeout),
"Verification of replicated revisions failed")
示例8: cluster_xdcr_settings_write
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import add_remote_cluster [as 别名]
def cluster_xdcr_settings_write(self,username,password,host,port=8091,servers=None,cluster=None,httpCode=None,user_role=None):
_cluster_xdcr_settings_read = {
"replication_settings":"settings/replications;POST;{'httpConnections': 20}"
}
rest = RestConnection(servers[0])
rest.remove_all_replications()
rest.remove_all_remote_clusters()
remote_cluster_name = 'rbac_cluster'
rest = RestConnection(servers[0])
remote_server01 = servers[1]
remote_server02 = servers[2]
rest_remote01 = RestConnection(remote_server01)
rest_remote01.delete_bucket()
rest_remote01.create_bucket(bucket='default', ramQuotaMB=100)
rest_remote02 = RestConnection(remote_server02)
rest_remote02.delete_bucket()
remote_id = rest.add_remote_cluster(remote_server01.ip,8091,'Administrator','password',remote_cluster_name)
time.sleep(20)
replication_id = rest.start_replication('continuous','default',remote_cluster_name)
result = self._return_http_code(_cluster_xdcr_settings_read,username,password,host=host,port=port, httpCode=httpCode, user_role=user_role)
rest.remove_all_replications()
rest.remove_all_remote_clusters()
rest_remote01.delete_bucket()
示例9: cluster_xdcr_remote_clusters_write
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import add_remote_cluster [as 别名]
def cluster_xdcr_remote_clusters_write(self,username,password,host,port=8091, servers=None,cluster=None,httpCode=None,user_role=None):
rest = RestConnection(servers[0])
rest.remove_all_replications()
rest.remove_all_remote_clusters()
_cluster_xdcr_remove_cluster_write = {
"remoteClusters":"pools/default/remoteClusters;POST",
"remote_cluster_id":"pools/default/remoteClusters/<id>;PUT",
"delete_remote":"pools/default/remoteClusters/<id>;DELETE"
}
params = {'hostname': "{0}:{1}".format(servers[1].ip, servers[1].port),'username': 'Administrator','password': 'password','name':'rbac_remote01'}
add_node = {"remoteClusters":"pools/default/remoteClusters;POST;" + str(params)}
result = self._return_http_code(add_node,username,password,host=host,port=port, httpCode=httpCode, user_role=user_role)
rest.remove_all_replications()
rest.remove_all_remote_clusters()
remote_cluster_name = 'rbac_cluster'
rest = RestConnection(servers[0])
remote_server01 = servers[1]
remote_server02 = servers[2]
rest_remote01 = RestConnection(remote_server01)
rest_remote01.delete_bucket()
rest_remote01.create_bucket(bucket='default', ramQuotaMB=100)
rest_remote02 = RestConnection(remote_server02)
rest.remove_all_replications()
rest.remove_all_remote_clusters()
remote_id = rest.add_remote_cluster(remote_server01.ip,8091,'Administrator','password',remote_cluster_name)
time.sleep(20)
delete_remote = {"delete_remote":"pools/default/remoteClusters/" + str(remote_cluster_name) + ";DELETE"}
result = self._return_http_code(delete_remote,username,password,host=host,port=port, httpCode=httpCode, user_role=user_role)
示例10: cluster_bucket_xdcr_read
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import add_remote_cluster [as 别名]
def cluster_bucket_xdcr_read(self,username,password,host,port=8091,servers=None,cluster=None,httpCode=None,user_role=None):
_cluster_bucket_xdcr_read = {
"replication_settings":"settings/replications/<id>;GET"
}
rest = RestConnection(servers[0])
rest.remove_all_replications()
rest.remove_all_remote_clusters()
remote_cluster_name = 'rbac_cluster'
rest = RestConnection(servers[0])
remote_server01 = servers[1]
remote_server02 = servers[2]
rest_remote01 = RestConnection(remote_server01)
rest_remote01.delete_bucket()
rest_remote01.create_bucket(bucket='default', ramQuotaMB=100)
rest_remote02 = RestConnection(remote_server02)
remote_id = rest.add_remote_cluster(remote_server01.ip,8091,'Administrator','password',remote_cluster_name)
replication_id = rest.start_replication('continuous','default',remote_cluster_name)
replication_id = replication_id.replace("/","%2F")
bucket_xdcr_read = {"replication_settings":"settings/replications/" + replication_id + ";GET"}
result = self._return_http_code(bucket_xdcr_read,username,password,host=host,port=port, httpCode=httpCode, user_role=user_role)
rest.remove_all_replications()
rest.remove_all_remote_clusters()
rest_remote01.delete_bucket()
示例11: _start_es_replication
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import add_remote_cluster [as 别名]
def _start_es_replication(self, bucket='default', xdcr_params={}):
rest_conn = RestConnection(self.src_cluster.get_master_node())
if bucket == 'default':
self.log.info("Creating default bucket")
rest_conn.create_bucket(bucket='default', ramQuotaMB=100, authType='none', saslPassword='', replicaNumber=1,
proxyPort=11211, bucketType='membase', replica_index=1, threadsNumber=3,
flushEnabled=1, lww=False)
self.src_cluster.add_bucket(ramQuotaMB=100, bucket='default', authType='none',
saslPassword='', replicaNumber=1, proxyPort=11211, bucketType='membase',
evictionPolicy='valueOnly')
elif bucket == 'sasl':
self.log.info("Creating sasl bucket")
rest_conn.create_bucket(bucket='sasl', ramQuotaMB=100, authType='sasl', saslPassword='password', replicaNumber=1,
proxyPort=11211, bucketType='membase', replica_index=1, threadsNumber=3,
flushEnabled=1, lww=False)
self.src_cluster.add_bucket(ramQuotaMB=100, bucket='sasl', authType='sasl',
saslPassword='password', replicaNumber=1, proxyPort=11211, bucketType='membase',
evictionPolicy='valueOnly')
elif bucket == 'standard':
self.log.info("Creating standard bucket")
rest_conn.create_bucket(bucket='standard', ramQuotaMB=100, authType='none', saslPassword='', replicaNumber=1,
proxyPort=STANDARD_BUCKET_PORT, bucketType='membase', replica_index=1, threadsNumber=3,
flushEnabled=1, lww=False)
self.src_cluster.add_bucket(ramQuotaMB=100, bucket='standard', authType='none',
saslPassword='', replicaNumber=1, proxyPort=STANDARD_BUCKET_PORT, bucketType='membase',
evictionPolicy='valueOnly')
elif bucket== 'lww':
self.log.info("Creating lww bucket")
rest_conn.create_bucket(bucket='lww', ramQuotaMB=100, authType='none', saslPassword='', replicaNumber=1,
proxyPort=11211, bucketType='membase', replica_index=1, threadsNumber=3,
flushEnabled=1, lww=True)
self.src_cluster.add_bucket(ramQuotaMB=100, bucket='lww', authType='none',
saslPassword='', replicaNumber=1, proxyPort=11211, bucketType='membase',
evictionPolicy='valueOnly')
esrest_conn = EsRestConnection(self.dest_cluster.get_master_node())
esrest_conn.create_index(bucket)
rest_conn.add_remote_cluster(remoteIp=self.dest_master.ip, remotePort=9091, username='Administrator',
password='password', name='es')
self.src_cluster.get_remote_clusters().append(XDCRRemoteClusterRef(self.src_cluster, self.dest_cluster,
Utility.get_rc_name(self.src_cluster.get_name(),
self.dest_cluster.get_name())))
repl_id = rest_conn.start_replication(replicationType='continuous', fromBucket=bucket, toCluster='es',
rep_type='capi', toBucket=bucket, xdcr_params=xdcr_params)
return repl_id
示例12: test_basic_xdcr_with_cert
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import add_remote_cluster [as 别名]
def test_basic_xdcr_with_cert(self):
cluster1 = self.servers[0:2]
cluster2 = self.servers[2:4]
remote_cluster_name = 'sslcluster'
restCluster1 = RestConnection(cluster1[0])
restCluster2 = RestConnection(cluster2[0])
try:
#Setup cluster1
x509main(cluster1[0]).setup_master()
x509main(cluster1[1])._setup_node_certificates(reload_cert=False)
restCluster1.add_node('Administrator','password',cluster1[1].ip)
known_nodes = ['[email protected]'+cluster1[0].ip,'[email protected]' + cluster1[1].ip]
restCluster1.rebalance(known_nodes)
self.assertTrue(self.check_rebalance_complete(restCluster1),"Issue with rebalance")
restCluster1.create_bucket(bucket='default', ramQuotaMB=100)
restCluster1.remove_all_replications()
restCluster1.remove_all_remote_clusters()
#Setup cluster2
x509main(cluster2[0]).setup_master()
x509main(cluster2[1])._setup_node_certificates(reload_cert=False)
restCluster2.add_node('Administrator','password',cluster2[1].ip)
known_nodes = ['[email protected]'+cluster2[0].ip,'[email protected]' + cluster2[1].ip]
restCluster2.rebalance(known_nodes)
self.assertTrue(self.check_rebalance_complete(restCluster2),"Issue with rebalance")
restCluster2.create_bucket(bucket='default', ramQuotaMB=100)
test = x509main.CACERTFILEPATH + x509main.CACERTFILE
data = open(test, 'rb').read()
restCluster1.add_remote_cluster(cluster2[0].ip,cluster2[0].port,'Administrator','password',remote_cluster_name,certificate=data)
replication_id = restCluster1.start_replication('continuous','default',remote_cluster_name)
if replication_id is not None:
self.assertTrue(True,"Replication was not created successfully")
finally:
known_nodes = ['[email protected]'+cluster2[0].ip,'[email protected]'+cluster2[1].ip]
restCluster2.rebalance(known_nodes,['[email protected]' + cluster2[1].ip])
self.assertTrue(self.check_rebalance_complete(restCluster2),"Issue with rebalance")
restCluster2.delete_bucket()
示例13: _XDCR_role_test
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import add_remote_cluster [as 别名]
def _XDCR_role_test(self):
params = {}
remote_cluster_name = 'rbac_cluster'
remote_server01 = self.servers[1]
remote_server02 = self.servers[2]
read_role = '_replication_admin_read'
write_role = '_replication_admin_write'
rest_remote01 = RestConnection(remote_server01)
rest_remote01.create_bucket(bucket='default', ramQuotaMB=100)
rest_remote02 = RestConnection(remote_server02)
rest_remote02.create_bucket(bucket='default', ramQuotaMB=100)
#------ First Test the Get Requests for XDCR --------------#
#Remove all remote cluster references
self.rest.remove_all_replications()
self.rest.remove_all_remote_clusters()
#Add remote cluster reference and replications
self.rest.add_remote_cluster(remote_server01.ip,8091,'Administrator','password',remote_cluster_name)
replication_id = self.rest.start_replication('continuous','default',remote_cluster_name)
masDict,tc_status = self.rbac._iterate_role_mapping(read_role,"Administrator","password",{'replication_id':replication_id})
self.rest.remove_all_replications()
self.rest.remove_all_remote_clusters()
rest_remote01.remove_all_replications()
rest_remote01.remove_all_remote_clusters()
rest_remote02.remove_all_replications()
rest_remote02.remove_all_remote_clusters()
# ----------- Second Test for POST requests for XDCR ---------------#
self.rest.remove_all_replications()
self.rest.remove_all_remote_clusters()
rest_remote01.remove_all_replications()
rest_remote01.remove_all_remote_clusters()
rest_remote02.remove_all_replications()
rest_remote02.remove_all_remote_clusters()
self.rest.add_remote_cluster(remote_server01.ip,8091,'Administrator','password',"onetotwo")
#self.rest.add_remote_cluster(remote_server02.ip,8091,'Administrator','password','onetothree')
#rest_remote01.add_remote_cluster(remote_server02.ip,8091,'Administrator','password',"twotothree")
rest_remote01.add_remote_cluster(self.master.ip,8091,'Administrator','password','twotoone')
rest_remote02.add_remote_cluster(remote_server01.ip,8091,'Administrator','password',"threetotwo")
rest_remote02.add_remote_cluster(self.master.ip,8091,'Administrator','password','threetoone')
params['remote_cluster_name']='onetotwo'
params['remoteCluster01'] = {'username': 'Administrator', 'password': 'password', 'hostname': '192.168.46.103:8091', 'name': 'onetothree'}
params['create_replication'] = {'replicationType': 'continuous','toBucket': 'default','fromBucket': 'default','toCluster': 'twotoone','type': 'xmem'}
params['replication_id'] = rest_remote01.start_replication('continuous','default','twotoone')
masDict,tc_status = self.rbac._iterate_role_mapping('_replication_admin_write01',"Administrator","password",params)
masDict,tc_status = self.rbac._iterate_role_mapping('_replication_admin_write02',"Administrator","password",params,self.servers[1])
'''
示例14: test_incremental_rebalance_out_continuous_bidirectional_sets_deletes
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import add_remote_cluster [as 别名]
def test_incremental_rebalance_out_continuous_bidirectional_sets_deletes(self):
cluster_ref_a = "cluster_ref_a"
master_a = self._input.clusters.get(0)[0]
rest_conn_a = RestConnection(master_a)
cluster_ref_b = "cluster_ref_b"
master_b = self._input.clusters.get(1)[0]
rest_conn_b = RestConnection(master_b)
# Setup bi-directional continuous replication
replication_type = "continuous"
rest_conn_a.add_remote_cluster(master_b.ip, master_b.port,
master_b.rest_username,
master_b.rest_password, cluster_ref_b)
rest_conn_b.add_remote_cluster(master_a.ip, master_a.port,
master_a.rest_username,
master_a.rest_password, cluster_ref_a)
(rep_database_a, rep_id_a) = rest_conn_a.start_replication(
replication_type, self._buckets[0],
cluster_ref_b)
(rep_database_b, rep_id_b) = rest_conn_b.start_replication(
replication_type, self._buckets[0],
cluster_ref_a)
self._state.append((rest_conn_a, cluster_ref_b, rep_database_a, rep_id_a))
self._state.append((rest_conn_b, cluster_ref_a, rep_database_b, rep_id_b))
load_thread_list = []
# Start load
kvstore = ClientKeyValueStore()
self._params["ops"] = "set"
task_def = RebalanceDataGenerator.create_loading_tasks(self._params)
load_thread = RebalanceDataGenerator.start_load(rest_conn_a,
self._buckets[0],
task_def, kvstore)
load_thread.start()
load_thread.join()
# Do some deletes
self._params["ops"] = "delete"
self._params["count"] = self._num_items/5
task_def = RebalanceDataGenerator.create_loading_tasks(self._params)
load_thread = RebalanceDataGenerator.start_load(rest_conn_a,
self._buckets[0],
task_def, kvstore)
load_thread_list.append(load_thread)
# Start all loads concurrently
for lt in load_thread_list:
lt.start()
# Trigger rebalance on both source and destination clusters
servers_a = self._input.clusters.get(0)
servers_b = self._input.clusters.get(1)
rebalanced_servers_a = []
rebalanced_servers_b = []
which_servers_a = []
which_servers_b = []
# Rebalance all the nodes together
RebalanceHelper.rebalance_in(servers_a, len(servers_a)-1)
RebalanceHelper.rebalance_in(servers_b, len(servers_b)-1)
rebalanced_servers_a.extend(servers_a)
rebalanced_servers_b.extend(servers_b)
nodes_a = rest_conn_a.node_statuses()
nodes_b = rest_conn_b.node_statuses()
# Incremental rebalance out one node in cluster_a, then cluster_b
while len(nodes_a) > 1:
toBeEjectedNode = RebalanceHelper.pick_node(master_a)
self.log.info("current nodes : {0}".format(RebalanceHelper.getOtpNodeIds(master_a)))
self.log.info("removing node {0} and rebalance afterwards".format(toBeEjectedNode.id))
rest_conn_a.rebalance(otpNodes=[node.id for node in rest_conn_a.node_statuses()], \
ejectedNodes=[toBeEjectedNode.id])
self.assertTrue(rest_conn_a.monitorRebalance(),
msg="rebalance operation failed after adding node {0}".format(toBeEjectedNode.id))
while len(nodes_b) > 1:
toBeEjectedNode = RebalanceHelper.pick_node(master_b)
self.log.info("current nodes : {0}".format(RebalanceHelper.getOtpNodeIds(master_b)))
self.log.info("removing node {0} and rebalance afterwards".format(toBeEjectedNode.id))
rest_conn_b.rebalance(otpNodes=[node.id for node in rest_conn_b.node_statuses()],\
ejectedNodes=[toBeEjectedNode.id])
self.assertTrue(rest_conn_b.monitorRebalance(),
msg="rebalance operation failed after adding node {0}".format(toBeEjectedNode.id))
break
for node in nodes_b:
for rebalanced_server in rebalanced_servers_b:
if rebalanced_server.ip.find(node.ip) != -1:
rebalanced_servers_b.remove(rebalanced_server)
break
nodes_b = rest_conn_a.node_statuses()
for node in nodes_a:
for rebalanced_server in rebalanced_servers_a:
if rebalanced_server.ip.find(node.ip) != -1:
#.........这里部分代码省略.........
示例15: test_failover_source_sets
# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import add_remote_cluster [as 别名]
def test_failover_source_sets(self):
replication_type = "continuous"
self.log.info("Force initial rebalance.")
# This test starts with a 2-2 unidirectional replication from cluster a
# to cluster b; during the replication, we trigger failover of one node
# on source cluster , resulting a 1-2 replication.
# After all loading finish, verify data and rev on both clusters.
replication_type = "continuous"
self.log.info("Force initial rebalance.")
cluster_ref_a = "cluster_ref_a"
master_a = self._input.clusters.get(0)[0]
rest_conn_a = RestConnection(master_a)
cluster_ref_b = "cluster_ref_b"
master_b = self._input.clusters.get(1)[0]
rest_conn_b = RestConnection(master_b)
self.log.info("START XDC replication...")
# Start replication
rest_conn_a.add_remote_cluster(master_b.ip, master_b.port,
master_b.rest_username,
master_b.rest_password, cluster_ref_b)
(rep_database, rep_id) = rest_conn_a.start_replication(replication_type,
self._buckets[0],
cluster_ref_b)
self._state.append((rest_conn_a, cluster_ref_b, rep_database, rep_id))
# Start load
self.log.info("START loading data...")
load_thread_list = []
kvstore = ClientKeyValueStore()
self._params["ops"] = "set"
task_def = RebalanceDataGenerator.create_loading_tasks(self._params)
load_thread = RebalanceDataGenerator.start_load(rest_conn_a,
self._buckets[0],
task_def, kvstore)
load_thread.start()
# sleep a while to allow more data loaded
time.sleep(5)
self.log.info("current nodes on source cluster: {0}".format(RebalanceHelper.getOtpNodeIds(master_a)))
# Trigger failover, we fail over one node each time until there is only one node remaining
self.log.info("DURING replication, start failover...")
self.log.info("FAILOVER nodes on Cluster A ...")
nodes_a = rest_conn_a.node_statuses()
while len(nodes_a) > 1:
toBeFailedOverNode = RebalanceHelper.pick_node(master_a)
self.log.info("failover node {0}".format(toBeFailedOverNode.id))
rest_conn_a.fail_over(toBeFailedOverNode)
self.log.info("rebalance after failover")
rest_conn_a.rebalance(otpNodes=[node.id for node in rest_conn_a.node_statuses()], \
ejectedNodes=[toBeFailedOverNode.id])
self.assertTrue(rest_conn_a.monitorRebalance(),
msg="rebalance operation failed after removing node {0}".format(toBeFailedOverNode.id))
nodes_a = rest_conn_a.node_statuses()
self.log.info("ALL failed over done...")
# Wait for loading threads to finish
for lt in load_thread_list:
lt.join()
self.log.info("All loading threads finished")
# Verify replication
self.log.info("START data verification at cluster A...")
self.assertTrue(XDCRBaseTest.verify_replicated_data(rest_conn_a,
self._buckets[0],
kvstore,
self._poll_sleep,
self._poll_timeout),
"Verification of replicated data failed")
self.log.info("START data verification at cluster B...")
self.assertTrue(XDCRBaseTest.verify_replicated_data(rest_conn_b,
self._buckets[0],
kvstore,
self._poll_sleep,
self._poll_timeout),
"Verification of replicated data failed")
self.log.info("START revision verification on both clusters...")
self.assertTrue(XDCRBaseTest.verify_replicated_revs(rest_conn_a,
rest_conn_b,
self._buckets[0],
self._poll_sleep,
self._poll_timeout),
"Verification of replicated revisions failed")