本文整理汇总了Python中keywords.MobileRestClient.MobileRestClient.verify_docs_deleted方法的典型用法代码示例。如果您正苦于以下问题:Python MobileRestClient.verify_docs_deleted方法的具体用法?Python MobileRestClient.verify_docs_deleted怎么用?Python MobileRestClient.verify_docs_deleted使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keywords.MobileRestClient.MobileRestClient
的用法示例。
在下文中一共展示了MobileRestClient.verify_docs_deleted方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_client_to_sync_gateway_complex_replication_with_revs_limit
# 需要导入模块: from keywords.MobileRestClient import MobileRestClient [as 别名]
# 或者: from keywords.MobileRestClient.MobileRestClient import verify_docs_deleted [as 别名]
#.........这里部分代码省略.........
# Start replication ls_db -> sg_db
repl_one = client.start_replication(
url=ls_url,
continuous=True,
from_db=ls_db,
to_url=sg_admin_url, to_db=sg_db
)
client.verify_docs_present(url=sg_admin_url, db=sg_db, expected_docs=ls_db_docs)
# Delay is to the updates here due to couchbase/couchbase-lite-ios#1277.
# Basically, if your revs depth is small and someone is updating a doc past the revs depth before a push replication,
# the push replication will have no common ancestor with sync_gateway causing conflicts to be created.
# Adding a delay between updates helps this situation. There is an alternative for CBL mac and CBL NET to change the default revs client depth
# but that is not configurable for Android.
# Currently adding a delay will allow the replication to act as expected for all platforms now.
client.update_docs(url=sg_url, db=sg_db, docs=ls_db_docs, number_updates=num_revs, delay=0.1, auth=sg_session)
client.update_docs(url=ls_url, db=ls_db, docs=ls_db_docs, number_updates=num_revs, delay=0.1)
# Start replication ls_db <- sg_db
repl_two = client.start_replication(
url=ls_url,
continuous=True,
from_url=sg_admin_url, from_db=sg_db,
to_db=ls_db
)
client.wait_for_replication_status_idle(url=ls_url, replication_id=repl_one)
client.wait_for_replication_status_idle(url=ls_url, replication_id=repl_two)
client.compact_database(url=ls_url, db=ls_db)
# LiteServ should only have 20 revisions due to built in client revs limit
client.verify_revs_num_for_docs(url=ls_url, db=ls_db, docs=ls_db_docs, expected_revs_per_doc=20)
# Sync Gateway should have 100 revisions due to the specified revs_limit in the sg config and possible conflict winners from the liteserv db
client.verify_max_revs_num_for_docs(url=sg_url, db=sg_db, docs=ls_db_docs, expected_max_number_revs_per_doc=100, auth=sg_session)
client.delete_conflicts(url=ls_url, db=ls_db, docs=ls_db_docs)
expected_generation = num_revs + 1
client.verify_docs_rev_generations(url=ls_url, db=ls_db, docs=ls_db_docs, expected_generation=expected_generation)
client.verify_docs_rev_generations(url=sg_url, db=sg_db, docs=ls_db_docs, expected_generation=expected_generation, auth=sg_session)
client.delete_docs(url=ls_url, db=ls_db, docs=ls_db_docs)
client.verify_docs_deleted(url=ls_url, db=ls_db, docs=ls_db_docs)
client.verify_docs_deleted(url=sg_admin_url, db=sg_db, docs=ls_db_docs)
ls_db_docs = client.add_docs(url=ls_url, db=ls_db, number=num_docs, id_prefix=ls_db, channels=sg_user_channels)
assert len(ls_db_docs) == 10
expected_revs = num_revs + 20 + 2
client.update_docs(url=ls_url, db=ls_db, docs=ls_db_docs, delay=0.1, number_updates=num_revs)
client.verify_max_revs_num_for_docs(url=ls_url, db=ls_db, docs=ls_db_docs, expected_max_number_revs_per_doc=expected_revs)
expected_generation = (num_revs * 2) + 3
client.verify_docs_rev_generations(url=ls_url, db=ls_db, docs=ls_db_docs, expected_generation=expected_generation)
client.compact_database(url=ls_url, db=ls_db)
client.verify_revs_num_for_docs(url=ls_url, db=ls_db, docs=ls_db_docs, expected_revs_per_doc=20)
client.stop_replication(
url=ls_url,
continuous=True,
from_db=ls_db,
to_url=sg_admin_url, to_db=sg_db
)
client.stop_replication(
url=ls_url,
continuous=True,
from_url=sg_admin_url, from_db=sg_db,
to_db=ls_db
)
client.wait_for_no_replications(url=ls_url)
client.delete_conflicts(url=ls_url, db=ls_db, docs=ls_db_docs)
client.delete_conflicts(url=sg_url, db=sg_db, docs=ls_db_docs, auth=sg_session)
client.delete_docs(url=ls_url, db=ls_db, docs=ls_db_docs)
# Start push pull and verify that all docs are deleted
# Start replication ls_db -> sg_db
repl_one = client.start_replication(
url=ls_url,
continuous=True,
from_db=ls_db,
to_url=sg_admin_url, to_db=sg_db
)
# Start replication ls_db <- sg_db
repl_two = client.start_replication(
url=ls_url,
continuous=True,
from_url=sg_admin_url, from_db=sg_db,
to_db=ls_db
)
client.verify_docs_deleted(url=ls_url, db=ls_db, docs=ls_db_docs)
client.verify_docs_deleted(url=sg_admin_url, db=sg_db, docs=ls_db_docs)
示例2: test_inline_large_attachments
# 需要导入模块: from keywords.MobileRestClient import MobileRestClient [as 别名]
# 或者: from keywords.MobileRestClient.MobileRestClient import verify_docs_deleted [as 别名]
def test_inline_large_attachments(setup_client_syncgateway_test):
"""
1. Start LiteServ and Sync Gateway
2. Create 2 databases on LiteServ (ls_db1, ls_db2)
3. Start continuous push replication from ls_db1 to sg_db
4. Start continuous pull replication from sg_db to ls_db2
5. PUT 5 large inline attachments to ls_db1
6. DELETE the docs on ls_db1
7. PUT same 5 large inline attachments to ls_db1
8. Verify docs replicate to ls_db2
9. Purge ls_db1
10. Verify docs removed
"""
log_info("Running 'test_inline_large_attachments' ...")
sg_url = setup_client_syncgateway_test["sg_url"]
sg_url_admin = setup_client_syncgateway_test["sg_admin_url"]
ls_url = setup_client_syncgateway_test["ls_url"]
log_info("ls_url: {}".format(ls_url))
log_info("sg_url: {}".format(sg_url))
log_info("sg_url_admin: {}".format(sg_url_admin))
ls_db1 = "ls_db1"
ls_db2 = "ls_db2"
sg_db = "db"
client = MobileRestClient()
client.create_database(ls_url, ls_db1)
client.create_database(ls_url, ls_db2)
# Start continuous push replication from ls_db1 -> sg_db
client.start_replication(url=ls_url, continuous=True, from_db=ls_db1, to_url=sg_url, to_db=sg_db)
# Start continuous push replication from sg_db -> ls_db2
client.start_replication(url=ls_url, continuous=True, from_url=sg_url, from_db=sg_db, to_db=ls_db2)
# doc with 2.36 PNG attachment
attachment_docs = []
for i in range(5):
doc = document.create_doc(
doc_id="large_attach_{}".format(i), attachment_name="golden_gate_large.jpg", channels=["ABC"]
)
attachment_docs.append(doc)
# add large attachments to ls_db1
docs = []
for doc in attachment_docs:
docs.append(client.add_doc(ls_url, ls_db1, doc, use_post=False))
# Delete docs
client.delete_docs(ls_url, ls_db1, docs)
client.verify_docs_deleted(ls_url, ls_db1, docs)
# Recreated docs
recreated_docs = []
for doc in attachment_docs:
recreated_docs.append(client.add_doc(ls_url, ls_db1, doc, use_post=False))
client.verify_docs_present(ls_url, ls_db1, recreated_docs)
client.verify_docs_present(sg_url, sg_db, recreated_docs)
client.verify_docs_present(ls_url, ls_db2, recreated_docs)
purged_docs = client.purge_docs(ls_url, ls_db1, recreated_docs)
log_info(purged_docs)
# All purged docs should have replicated and should be gone now.
# This is currently failing due to some docs not replicating to ls_db2
client.verify_docs_deleted(ls_url, ls_db1, recreated_docs)
client.verify_docs_deleted(sg_url, sg_db, recreated_docs)
client.verify_docs_deleted(ls_url, ls_db2, recreated_docs)