本文整理汇总了Python中lib.membase.api.rest_client.RestConnection.start_replication方法的典型用法代码示例。如果您正苦于以下问题:Python RestConnection.start_replication方法的具体用法?Python RestConnection.start_replication怎么用?Python RestConnection.start_replication使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类lib.membase.api.rest_client.RestConnection
的用法示例。
在下文中一共展示了RestConnection.start_replication方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: start_replication
# 需要导入模块: from lib.membase.api.rest_client import RestConnection [as 别名]
# 或者: from lib.membase.api.rest_client.RestConnection import start_replication [as 别名]
def start_replication(self, master, slave, replication_type='continuous',
buckets=None, bidir=False, suffix='A'):
"""Add remote cluster and start replication"""
master_rest_conn = RestConnection(master)
remote_reference = 'remote_cluster_' + suffix
master_rest_conn.add_remote_cluster(slave.ip, slave.port,
slave.rest_username,
slave.rest_password,
remote_reference)
if not buckets:
buckets = self.get_buckets()
else:
buckets = self.get_buckets(reversed=True)
for bucket in buckets:
master_rest_conn.start_replication(replication_type, bucket,
remote_reference)
if self.parami('num_buckets', 1) > 1 and suffix == 'A':
self.start_replication(slave, master, replication_type, buckets,
suffix='B')
if bidir:
self.start_replication(slave, master, replication_type, buckets,
suffix='B')
示例2: test_cbcollect_with_redaction_enabled_with_xdcr
# 需要导入模块: from lib.membase.api.rest_client import RestConnection [as 别名]
# 或者: from lib.membase.api.rest_client.RestConnection import start_replication [as 别名]
def test_cbcollect_with_redaction_enabled_with_xdcr(self):
rest_src = RestConnection(self.master)
rest_src.remove_all_replications()
rest_src.remove_all_remote_clusters()
rest_dest = RestConnection(self.servers[1])
rest_dest_helper = RestHelper(rest_dest)
try:
rest_src.remove_all_replications()
rest_src.remove_all_remote_clusters()
self.set_redaction_level()
rest_src.add_remote_cluster(self.servers[1].ip, self.servers[1].port,
self.servers[1].rest_username,
self.servers[1].rest_password, "C2")
""" at dest cluster """
self.add_built_in_server_user(node=self.servers[1])
rest_dest.create_bucket(bucket='default', ramQuotaMB=512)
bucket_ready = rest_dest_helper.vbucket_map_ready('default')
if not bucket_ready:
self.fail("Bucket default at dest not created after 120 seconds.")
repl_id = rest_src.start_replication('continuous', 'default', "C2")
if repl_id is not None:
self.log.info("Replication created successfully")
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
tasks = self._async_load_all_buckets(self.master, gen, "create", 0)
for task in tasks:
task.result()
self.sleep(10)
""" enable firewall """
if self.interrupt_replication:
RemoteUtilHelper.enable_firewall(self.master, xdcr=True)
""" start collect logs """
self.start_logs_collection()
result = self.monitor_logs_collection()
""" verify logs """
try:
logs_path = result["perNode"]["[email protected]" + str(self.master.ip)]["path"]
except KeyError:
logs_path = result["perNode"]["[email protected]"]["path"]
redactFileName = logs_path.split('/')[-1]
nonredactFileName = logs_path.split('/')[-1].replace('-redacted', '')
remotepath = logs_path[0:logs_path.rfind('/')+1]
self.verify_log_files_exist(remotepath=remotepath,
redactFileName=redactFileName,
nonredactFileName=nonredactFileName)
self.log.info("Verify on log ns_server.goxdcr.log")
self.verify_log_redaction(remotepath=remotepath,
redactFileName=redactFileName,
nonredactFileName=nonredactFileName,
logFileName="ns_server.goxdcr.log")
finally:
""" clean up xdcr """
rest_dest.delete_bucket()
rest_src.remove_all_replications()
rest_src.remove_all_remote_clusters()
if self.interrupt_replication:
shell = RemoteMachineShellConnection(self.master)
shell.disable_firewall()
shell.disconnect()
示例3: test_xdcr_and_indexing_with_eventing
# 需要导入模块: from lib.membase.api.rest_client import RestConnection [as 别名]
# 或者: from lib.membase.api.rest_client.RestConnection import start_replication [as 别名]
def test_xdcr_and_indexing_with_eventing(self):
rest_src = RestConnection(self.servers[0])
rest_dst = RestConnection(self.servers[2])
self.n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
self.n1ql_helper = N1QLHelper(shell=self.shell,
max_verify=self.max_verify,
buckets=self.buckets,
item_flag=self.item_flag,
n1ql_port=self.n1ql_port,
full_docs_list=self.full_docs_list,
log=self.log, input=self.input,
master=self.master,
use_rest=True
)
self.n1ql_helper.create_primary_index(using_gsi=True, server=self.n1ql_node)
try:
rest_src.remove_all_replications()
rest_src.remove_all_remote_clusters()
rest_src.add_remote_cluster(self.servers[2].ip, self.servers[2].port, self.servers[0].rest_username,
self.servers[0].rest_password, "C2")
rest_dst.create_bucket(bucket=self.src_bucket_name, ramQuotaMB=100)
# setup xdcr relationship
repl_id = rest_src.start_replication('continuous', self.src_bucket_name, "C2")
if repl_id is not None:
self.log.info("Replication created successfully")
self.load(self.gens_load, buckets=self.src_bucket, flag=self.item_flag, verify_data=False,
batch_size=self.batch_size)
body = self.create_save_function_body(self.function_name, HANDLER_CODE.DELETE_BUCKET_OP_ON_DELETE)
# deploy function
self.deploy_function(body)
# Wait for eventing to catch up with all the update mutations and verify results
self.verify_eventing_results(self.function_name, self.docs_per_day * 2016)
stats_xdcr_dst = rest_dst.get_bucket_stats(self.src_bucket_name)
index_bucket_map = self.n1ql_helper.get_index_count_using_primary_index(self.buckets, self.n1ql_node)
actual_count = index_bucket_map[self.src_bucket_name]
log.info("No of docs in xdcr destination bucket : {0}".format(stats_xdcr_dst["curr_items"]))
log.info("No of docs indexed by primary index: {0}".format(actual_count))
if stats_xdcr_dst["curr_items"] != self.docs_per_day * 2016:
self.fail("xdcr did not replicate all documents, actual : {0} expected : {1}".format(
stats_xdcr_dst["curr_items"], self.docs_per_day * 2016))
if actual_count != self.docs_per_day * 2016:
self.fail("Not all the items were indexed, actual : {0} expected : {1}".format(
actual_count, self.docs_per_day * 2016))
# delete all documents
self.load(self.gens_load, buckets=self.src_bucket, flag=self.item_flag, verify_data=False,
batch_size=self.batch_size, op_type='delete')
# Wait for eventing to catch up with all the delete mutations and verify results
self.verify_eventing_results(self.function_name, 0, skip_stats_validation=True)
stats_xdcr_dst = rest_dst.get_bucket_stats(self.src_bucket_name)
index_bucket_map = self.n1ql_helper.get_index_count_using_primary_index(self.buckets, self.n1ql_node)
actual_count = index_bucket_map[self.src_bucket_name]
log.info("No of docs in xdcr destination bucket : {0}".format(stats_xdcr_dst["curr_items"]))
log.info("No of docs indexed by primary index: {0}".format(actual_count))
if stats_xdcr_dst["curr_items"] != 0:
self.fail("xdcr did not replicate all documents, actual : {0} expected : {1}".format(
stats_xdcr_dst["curr_items"], 0))
if actual_count != 0:
self.fail("Not all the items were indexed, actual : {0} expected : {1}".format(actual_count, 0))
self.undeploy_and_delete_function(body)
finally:
self.n1ql_helper.drop_primary_index(using_gsi=True, server=self.n1ql_node)
rest_dst.delete_bucket()