本文整理汇总了Python中keywords.MobileRestClient.MobileRestClient.stop_replication方法的典型用法代码示例。如果您正苦于以下问题:Python MobileRestClient.stop_replication方法的具体用法?Python MobileRestClient.stop_replication怎么用?Python MobileRestClient.stop_replication使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keywords.MobileRestClient.MobileRestClient
的用法示例。
在下文中一共展示了MobileRestClient.stop_replication方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_client_to_sync_gateway_complex_replication_with_revs_limit
# 需要导入模块: from keywords.MobileRestClient import MobileRestClient [as 别名]
# 或者: from keywords.MobileRestClient.MobileRestClient import stop_replication [as 别名]
#.........这里部分代码省略.........
# Start replication ls_db -> sg_db
repl_one = client.start_replication(
url=ls_url,
continuous=True,
from_db=ls_db,
to_url=sg_admin_url, to_db=sg_db
)
client.verify_docs_present(url=sg_admin_url, db=sg_db, expected_docs=ls_db_docs)
# Delay is to the updates here due to couchbase/couchbase-lite-ios#1277.
# Basically, if your revs depth is small and someone is updating a doc past the revs depth before a push replication,
# the push replication will have no common ancestor with sync_gateway causing conflicts to be created.
# Adding a delay between updates helps this situation. There is an alternative for CBL mac and CBL NET to change the default revs client depth
# but that is not configurable for Android.
# Currently adding a delay will allow the replication to act as expected for all platforms now.
client.update_docs(url=sg_url, db=sg_db, docs=ls_db_docs, number_updates=num_revs, delay=0.1, auth=sg_session)
client.update_docs(url=ls_url, db=ls_db, docs=ls_db_docs, number_updates=num_revs, delay=0.1)
# Start replication ls_db <- sg_db
repl_two = client.start_replication(
url=ls_url,
continuous=True,
from_url=sg_admin_url, from_db=sg_db,
to_db=ls_db
)
client.wait_for_replication_status_idle(url=ls_url, replication_id=repl_one)
client.wait_for_replication_status_idle(url=ls_url, replication_id=repl_two)
client.compact_database(url=ls_url, db=ls_db)
# LiteServ should only have 20 revisions due to built in client revs limit
client.verify_revs_num_for_docs(url=ls_url, db=ls_db, docs=ls_db_docs, expected_revs_per_doc=20)
# Sync Gateway should have 100 revisions due to the specified revs_limit in the sg config and possible conflict winners from the liteserv db
client.verify_max_revs_num_for_docs(url=sg_url, db=sg_db, docs=ls_db_docs, expected_max_number_revs_per_doc=100, auth=sg_session)
client.delete_conflicts(url=ls_url, db=ls_db, docs=ls_db_docs)
expected_generation = num_revs + 1
client.verify_docs_rev_generations(url=ls_url, db=ls_db, docs=ls_db_docs, expected_generation=expected_generation)
client.verify_docs_rev_generations(url=sg_url, db=sg_db, docs=ls_db_docs, expected_generation=expected_generation, auth=sg_session)
client.delete_docs(url=ls_url, db=ls_db, docs=ls_db_docs)
client.verify_docs_deleted(url=ls_url, db=ls_db, docs=ls_db_docs)
client.verify_docs_deleted(url=sg_admin_url, db=sg_db, docs=ls_db_docs)
ls_db_docs = client.add_docs(url=ls_url, db=ls_db, number=num_docs, id_prefix=ls_db, channels=sg_user_channels)
assert len(ls_db_docs) == 10
expected_revs = num_revs + 20 + 2
client.update_docs(url=ls_url, db=ls_db, docs=ls_db_docs, delay=0.1, number_updates=num_revs)
client.verify_max_revs_num_for_docs(url=ls_url, db=ls_db, docs=ls_db_docs, expected_max_number_revs_per_doc=expected_revs)
expected_generation = (num_revs * 2) + 3
client.verify_docs_rev_generations(url=ls_url, db=ls_db, docs=ls_db_docs, expected_generation=expected_generation)
client.compact_database(url=ls_url, db=ls_db)
client.verify_revs_num_for_docs(url=ls_url, db=ls_db, docs=ls_db_docs, expected_revs_per_doc=20)
client.stop_replication(
url=ls_url,
continuous=True,
from_db=ls_db,
to_url=sg_admin_url, to_db=sg_db
)
client.stop_replication(
url=ls_url,
continuous=True,
from_url=sg_admin_url, from_db=sg_db,
to_db=ls_db
)
client.wait_for_no_replications(url=ls_url)
client.delete_conflicts(url=ls_url, db=ls_db, docs=ls_db_docs)
client.delete_conflicts(url=sg_url, db=sg_db, docs=ls_db_docs, auth=sg_session)
client.delete_docs(url=ls_url, db=ls_db, docs=ls_db_docs)
# Start push pull and verify that all docs are deleted
# Start replication ls_db -> sg_db
repl_one = client.start_replication(
url=ls_url,
continuous=True,
from_db=ls_db,
to_url=sg_admin_url, to_db=sg_db
)
# Start replication ls_db <- sg_db
repl_two = client.start_replication(
url=ls_url,
continuous=True,
from_url=sg_admin_url, from_db=sg_db,
to_db=ls_db
)
client.verify_docs_deleted(url=ls_url, db=ls_db, docs=ls_db_docs)
client.verify_docs_deleted(url=sg_admin_url, db=sg_db, docs=ls_db_docs)
示例2: test_multiple_replications_created_with_unique_properties
# 需要导入模块: from keywords.MobileRestClient import MobileRestClient [as 别名]
# 或者: from keywords.MobileRestClient.MobileRestClient import stop_replication [as 别名]
def test_multiple_replications_created_with_unique_properties(setup_client_syncgateway_test):
"""Regression test for couchbase/couchbase-lite-java-core#1386
1. Setup SGW with a remote database name db for an example
2. Create a local database such as ls_db
3. Send POST /_replicate with source = ls_db, target = http://localhost:4985/db, continuous = true
4. Send POST /_replicate with source = ls_db, target = http://localhost:4985/db, continuous = true, doc_ids=["doc1", "doc2"]
5. Send POST /_replicate with source = ls_db, target = http://localhost:4985/db, continuous = true, filter="filter1"
6. Make sure that the session_id from each POST /_replicate are different.
7. Send GET /_active_tasks to make sure that there are 3 tasks created.
8. Send 3 POST /_replicate withe the same parameter as Step 3=5 plus cancel=true to stop those replicators
9. Repeat Step 3 - 8 with source = and target = db for testing the pull replicator.
"""
sg_db = "db"
ls_db = "ls_db"
cluster_config = setup_client_syncgateway_test["cluster_config"]
ls_url = setup_client_syncgateway_test["ls_url"]
sg_one_admin = setup_client_syncgateway_test["sg_admin_url"]
sg_one_public = setup_client_syncgateway_test["sg_url"]
sg_helper = SyncGateway()
sg_helper.start_sync_gateway(
cluster_config=cluster_config,
url=sg_one_public,
config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS)
)
log_info("Running 'test_multiple_replications_created_with_unique_properties'")
log_info("ls_url: {}".format(ls_url))
log_info("sg_one_admin: {}".format(sg_one_admin))
log_info("sg_one_public: {}".format(sg_one_public))
client = MobileRestClient()
client.create_database(url=ls_url, name=ls_db)
########
# PUSH #
########
# Start 3 unique push replication requests
repl_one = client.start_replication(
url=ls_url,
continuous=True,
from_db=ls_db,
to_url=sg_one_admin,
to_db=sg_db
)
client.wait_for_replication_status_idle(ls_url, repl_one)
repl_two = client.start_replication(
url=ls_url,
continuous=True,
from_db=ls_db,
to_url=sg_one_admin,
to_db=sg_db,
doc_ids=["doc_1", "doc_2"]
)
client.wait_for_replication_status_idle(ls_url, repl_two)
# Create doc filter and add to the design doc
filters = {
"language": "javascript",
"filters": {
"sample_filter": "function(doc, req) { if (doc.type && doc.type === \"skip\") { return false; } return true; }"
}
}
client.add_design_doc(url=ls_url, db=ls_db, name="by_type", doc=json.dumps(filters))
repl_three = client.start_replication(
url=ls_url,
continuous=True,
from_db=ls_db,
to_url=sg_one_admin,
to_db=sg_db,
repl_filter="by_type/sample_filter"
)
client.wait_for_replication_status_idle(ls_url, repl_three)
# Verify 3 replicaitons are running
replications = client.get_replications(ls_url)
log_info(replications)
assert len(replications) == 3, "Number of replications, Expected: {} Actual: {}".format(
3,
len(replications)
)
# Stop repl001
client.stop_replication(
url=ls_url,
continuous=True,
from_db=ls_db,
to_url=sg_one_admin,
to_db=sg_db
)
# Stop repl002
client.stop_replication(
url=ls_url,
continuous=True,
from_db=ls_db,
#.........这里部分代码省略.........
示例3: test_replication_with_session_cookie
# 需要导入模块: from keywords.MobileRestClient import MobileRestClient [as 别名]
# 或者: from keywords.MobileRestClient.MobileRestClient import stop_replication [as 别名]
#.........这里部分代码省略.........
assert len(sg_docs) == num_docs_pushed
all_docs = client.merge(ls_docs, sg_docs)
log_info(all_docs)
client.verify_docs_present(url=sg_admin_url, db=sg_db, expected_docs=all_docs)
client.verify_docs_present(url=ls_url, db=ls_db, expected_docs=all_docs)
# GET from session endpoint /{db}/_session/{session-id}
session = client.get_session(url=sg_admin_url, db=sg_db, session_id=session_id)
assert len(session["userCtx"]["channels"]) == 2, "There should be only 2 channels for the user"
assert "ABC" in session["userCtx"]["channels"], "The channel info should contain 'ABC'"
assert session["userCtx"]["name"] == "user_1", "The user should have the name 'user_1'"
assert len(session["authentication_handlers"]) == 2, "There should be 2 authentication_handlers"
assert "default" in session["authentication_handlers"], "Did not find 'default' in authentication_headers"
assert "cookie" in session["authentication_handlers"], "Did not find 'cookie' in authentication_headers"
log_info("SESSIONs: {}".format(session))
# Delete session via sg admin port and _user rest endpoint
client.delete_session(url=sg_admin_url, db=sg_db, user_name="user_1", session_id=session_id)
# Make sure session is deleted
try:
session = client.get_session(url=sg_admin_url, db=sg_db, session_id=session_id)
except HTTPError as he:
expected_error_code = he.response.status_code
log_info(expected_error_code)
assert expected_error_code == 404, "Expected 404 status, actual {}".format(expected_error_code)
# Cancel the replications
# Stop repl_one
client.stop_replication(
url=ls_url,
continuous=True,
from_db=ls_db,
to_url=sg_url,
to_db=sg_db,
to_auth=session_header
)
# Stop repl_two
client.stop_replication(
url=ls_url,
continuous=True,
from_url=sg_url,
from_db=sg_db,
from_auth=session_header,
to_db=ls_db,
)
client.wait_for_no_replications(ls_url)
replications = client.get_replications(ls_url)
assert len(replications) == 0, "All replications should be stopped"
# Create new session and new push / pull replications
session_header = client.create_session_header(url=sg_url, db=sg_db, name="user_1", password="foo")
# Get session id from header
session_parts = re.split("=|;", session_header)
session_id = session_parts[1]
log_info("{}: {}".format(session_parts[0], session_id))
# Start authenticated push replication
repl_one = client.start_replication(
示例4: test_multiple_replications_not_created_with_same_properties
# 需要导入模块: from keywords.MobileRestClient import MobileRestClient [as 别名]
# 或者: from keywords.MobileRestClient.MobileRestClient import stop_replication [as 别名]
def test_multiple_replications_not_created_with_same_properties(setup_client_syncgateway_test):
"""Regression test for https://github.com/couchbase/couchbase-lite-android/issues/939
1. Create LiteServ database and launch sync_gateway with database
2. Start 5 continuous push replicators with the same source and target
3. Make sure the sample replication id is returned
4. Check that 1 one replication exists in 'active_tasks'
5. Stop the replication with POST /_replicate cancel=true
6. Start 5 continuous pull replicators with the same source and target
7. Make sure the sample replication id is returned
8. Check that 1 one replication exists in 'active_tasks'
9. Stop the replication with POST /_replicate cancel=true
"""
sg_db = "db"
ls_db = "ls_db"
cluster_config = setup_client_syncgateway_test["cluster_config"]
ls_url = setup_client_syncgateway_test["ls_url"]
sg_one_admin = setup_client_syncgateway_test["sg_admin_url"]
sg_one_public = setup_client_syncgateway_test["sg_url"]
sg_helper = SyncGateway()
sg_helper.start_sync_gateway(
cluster_config=cluster_config,
url=sg_one_public,
config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS)
)
log_info("Running 'test_multiple_replications_not_created_with_same_properties'")
log_info("ls_url: {}".format(ls_url))
log_info("sg_one_admin: {}".format(sg_one_admin))
log_info("sg_one_public: {}".format(sg_one_public))
client = MobileRestClient()
client.create_database(url=ls_url, name=ls_db)
repl_id_num = 0
response_one_id_num = 0
response_two_id_num = 0
# launch 50 concurrent push replication requests with the same source / target
with ThreadPoolExecutor(max_workers=10) as executor:
futures = [executor.submit(
client.start_replication,
url=ls_url,
continuous=True,
from_db=ls_db,
to_url=sg_one_admin,
to_db=sg_db
) for _ in range(50)]
for future in as_completed(futures):
response_one_id = future.result()
# Convert session_id from string "repl001" -> int 1
response_one_id_num = int(response_one_id.replace("repl", ""))
log_info(response_one_id_num)
# Assert that concurrent replications have a greater session id than 0
assert response_one_id_num > repl_id_num, "'response_one_id_num': {} should be greater than 'repl_id_num': {}".format(
response_one_id_num,
repl_id_num
)
# Check there is only one replication running
replications = client.get_replications(ls_url)
assert len(replications) == 1, "Number of replications, Expected: {} Actual {}".format(
1,
len(replications)
)
# Stop replication
client.stop_replication(
url=ls_url,
continuous=True,
from_db=ls_db,
to_url=sg_one_admin,
to_db=sg_db
)
# Check that no replications are running
client.wait_for_no_replications(ls_url)
replications = client.get_replications(ls_url)
assert len(replications) == 0, "Number of replications, Expected: {} Actual {}".format(
0,
len(replications)
)
# launch 50 concurrent pull replication requests with the same source / target
with ThreadPoolExecutor(max_workers=10) as executor:
futures = [executor.submit(
client.start_replication,
url=ls_url,
continuous=True,
from_db=sg_db,
from_url=sg_one_admin,
to_db=ls_db
) for _ in range(50)]
for future in as_completed(futures):
response_two_id = future.result()
#.........这里部分代码省略.........