本文整理汇总了Python中libraries.testkit.admin.Admin.db_resync方法的典型用法代码示例。如果您正苦于以下问题:Python Admin.db_resync方法的具体用法?Python Admin.db_resync怎么用?Python Admin.db_resync使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类libraries.testkit.admin.Admin
的用法示例。
在下文中一共展示了Admin.db_resync方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_bucket_online_offline_resync_sanity
# 需要导入模块: from libraries.testkit.admin import Admin [as 别名]
# 或者: from libraries.testkit.admin.Admin import db_resync [as 别名]
def test_bucket_online_offline_resync_sanity(params_from_base_test_setup, sg_conf_name, num_users, num_docs, num_revisions):
cluster_conf = params_from_base_test_setup["cluster_config"]
test_mode = params_from_base_test_setup["mode"]
if test_mode == "di":
pytest.skip("Unsupported feature in distributed index")
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, test_mode)
log_info("Running 'test_bucket_online_offline_resync_sanity'")
log_info("Using cluster_conf: {}".format(cluster_conf))
log_info("Using num_users: {}".format(num_users))
log_info("Using num_docs: {}".format(num_docs))
log_info("Using num_revisions: {}".format(num_revisions))
start = time.time()
cluster = Cluster(config=cluster_conf)
cluster.reset(sg_conf)
init_completed = time.time()
log_info("Initialization completed. Time taken:{}s".format(init_completed - start))
num_channels = 1
channels = ["channel-" + str(i) for i in range(num_channels)]
password = "password"
sgs = cluster.sync_gateways
admin = Admin(sgs[0])
# Register User
log_info("Register User")
user_objects = admin.register_bulk_users(target=sgs[0], db="db", name_prefix="User",
number=num_users, password=password, channels=channels)
user_x = admin.register_user(target=sgs[0], db="db", name="User-X", password="password", channels=["channel_x"])
# Add User
log_info("Add docs")
in_parallel(user_objects, 'add_docs', num_docs)
# Update docs
log_info("Update docs")
in_parallel(user_objects, 'update_docs', num_revisions)
time.sleep(10)
# Get changes for all users
in_parallel(user_objects, 'get_changes')
# every user should have same number of docs
# total/expected docs = num_users * num_docs
recieved_docs = in_parallel(user_objects, 'get_num_docs')
expected_docs = num_users * num_docs
for user_obj, docs in recieved_docs.items():
log_info('User {} got {} docs, expected docs: {}'.format(user_obj.name, docs, expected_docs))
assert docs == expected_docs
# Verify that
# user created doc-ids exist in docs received in changes feed
# expected revision is equal to received revision
expected_revision = str(num_revisions + 1)
docs_rev_dict = in_parallel(user_objects, 'get_num_revisions')
rev_errors = []
for user_obj, docs_revision_dict in docs_rev_dict.items():
for doc_id in docs_revision_dict.keys():
rev = docs_revision_dict[doc_id]
log_info('User {} doc_id {} has {} revisions, expected revision: {}'.format(user_obj.name,
doc_id, rev, expected_revision))
if rev != expected_revision:
rev_errors.append(doc_id)
log_error('User {} doc_id {} got revision {}, expected revision {}'.format(
user_obj.name,
doc_id,
rev,
expected_revision)
)
assert len(rev_errors) == 0
# Verify each User created docs are part of changes feed
output = in_parallel(user_objects, 'check_doc_ids_in_changes_feed')
assert True in output.values()
# Take "db" offline
status = admin.take_db_offline(db="db")
assert status == 200
sg_restart_config = sync_gateway_config_path_for_mode("bucket_online_offline/db_online_offline_access_restricted", test_mode)
restart_status = cluster.sync_gateways[0].restart(sg_restart_config)
assert restart_status == 0
time.sleep(10)
num_changes = admin.db_resync(db="db")
log_info("expecting num_changes {} == num_docs {} * num_users {}".format(num_changes, num_docs, num_users))
assert num_changes['payload']['changes'] == num_docs * num_users
#.........这里部分代码省略.........