本文整理汇总了Python中keywords.MobileRestClient.MobileRestClient.create_user方法的典型用法代码示例。如果您正苦于以下问题:Python MobileRestClient.create_user方法的具体用法?Python MobileRestClient.create_user怎么用?Python MobileRestClient.create_user使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keywords.MobileRestClient.MobileRestClient
的用法示例。
在下文中一共展示了MobileRestClient.create_user方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_rolling_ttl_remove_expirary
# 需要导入模块: from keywords.MobileRestClient import MobileRestClient [as 别名]
# 或者: from keywords.MobileRestClient.MobileRestClient import create_user [as 别名]
def test_rolling_ttl_remove_expirary(params_from_base_test_setup, sg_conf_name):
"""
1. PUT /db/doc1 via SG with property "_exp":3
2. Once per second for 10 seconds, update /db/doc1 with a new revision (also with "_exp":3)
3. Update /db/doc1 with a revision with no expiry
3. Get /db/doc1. Assert response is 200
"""
cluster_config = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
cluster_helper = ClusterKeywords()
topology = cluster_helper.get_cluster_topology(cluster_config)
cluster_helper.reset_cluster(
cluster_config=cluster_config,
sync_gateway_config=sg_conf
)
cbs_url = topology["couchbase_servers"][0]
sg_url = topology["sync_gateways"][0]["public"]
sg_url_admin = topology["sync_gateways"][0]["admin"]
log_info("Running 'test_rolling_ttl_remove_expirary'")
log_info("cbs_url: {}".format(cbs_url))
log_info("sg_url: {}".format(sg_url))
log_info("sg_url_admin: {}".format(sg_url_admin))
sg_db = "db"
sg_user_name = "sg_user"
sg_user_password = "[email protected]"
sg_user_channels = ["NBC", "ABC"]
client = MobileRestClient()
client.create_user(url=sg_url_admin, db=sg_db, name=sg_user_name, password=sg_user_password, channels=sg_user_channels)
sg_user_session = client.create_session(url=sg_url_admin, db=sg_db, name=sg_user_name)
doc_exp_3_body = document.create_doc(doc_id="exp_3", expiry=3, channels=sg_user_channels)
doc_exp_10_body = document.create_doc(doc_id="exp_10", expiry=10, channels=sg_user_channels)
doc_exp_3 = client.add_doc(url=sg_url, db=sg_db, doc=doc_exp_3_body, auth=sg_user_session)
doc_exp_10 = client.add_doc(url=sg_url, db=sg_db, doc=doc_exp_10_body, auth=sg_user_session)
client.update_doc(url=sg_url, db=sg_db, doc_id=doc_exp_3["id"], number_updates=10, expiry=3, delay=1, auth=sg_user_session)
client.update_doc(url=sg_url, db=sg_db, doc_id=doc_exp_3["id"], number_updates=1, auth=sg_user_session)
# If expiry was not removed in the last update, this would expire doc_exp_3
time.sleep(5)
# doc_exp_3 should still be around due to removal of expiry
doc_exp_3 = client.get_doc(url=sg_url, db=sg_db, doc_id=doc_exp_3["id"], auth=sg_user_session)
assert doc_exp_3["_id"] == "exp_3"
# doc_exp_10 should be expired due to the updates (10s) + sleep (5s)
with pytest.raises(HTTPError) as he:
client.get_doc(url=sg_url, db=sg_db, doc_id=doc_exp_10["id"], auth=sg_user_session)
assert he.value[0].startswith("404 Client Error: Not Found for url:")
示例2: test_rolling_ttl_expires
# 需要导入模块: from keywords.MobileRestClient import MobileRestClient [as 别名]
# 或者: from keywords.MobileRestClient.MobileRestClient import create_user [as 别名]
def test_rolling_ttl_expires(params_from_base_test_setup, sg_conf_name):
"""
1. PUT /db/doc1 via SG with property "_exp":3
2. Update /db/doc1 10 times with a new revision (also with "_exp":3)
3. Wait 5 seconds
4. Get /db/doc1. Assert response is 200
"""
cluster_config = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
cluster_helper = ClusterKeywords()
topology = cluster_helper.get_cluster_topology(cluster_config)
cluster_helper.reset_cluster(
cluster_config=cluster_config,
sync_gateway_config=sg_conf
)
cbs_url = topology["couchbase_servers"][0]
sg_url = topology["sync_gateways"][0]["public"]
sg_url_admin = topology["sync_gateways"][0]["admin"]
log_info("Running 'test_rolling_ttl_expires'")
log_info("cbs_url: {}".format(cbs_url))
log_info("sg_url: {}".format(sg_url))
log_info("sg_url_admin: {}".format(sg_url_admin))
sg_db = "db"
sg_user_name = "sg_user"
sg_user_password = "[email protected]"
sg_user_channels = ["NBC", "ABC"]
client = MobileRestClient()
client.create_user(url=sg_url_admin, db=sg_db, name=sg_user_name, password=sg_user_password, channels=sg_user_channels)
sg_user_session = client.create_session(url=sg_url_admin, db=sg_db, name=sg_user_name)
doc_exp_3_body = document.create_doc(doc_id="exp_3", expiry=3, channels=sg_user_channels)
doc_exp_10_body = document.create_doc(doc_id="exp_10", expiry=10, channels=sg_user_channels)
doc_exp_3 = client.add_doc(url=sg_url, db=sg_db, doc=doc_exp_3_body, auth=sg_user_session)
doc_exp_10 = client.add_doc(url=sg_url, db=sg_db, doc=doc_exp_10_body, auth=sg_user_session)
client.update_doc(url=sg_url, db=sg_db, doc_id=doc_exp_3["id"], number_updates=10, expiry=3, auth=sg_user_session)
# Sleep should allow doc_exp_3 to expire, but still be in the window to get doc_exp_10
time.sleep(5)
# doc_exp_3 should be expired
with pytest.raises(HTTPError) as he:
client.get_doc(url=sg_url, db=sg_db, doc_id=doc_exp_3["id"], auth=sg_user_session)
assert he.value[0].startswith("404 Client Error: Not Found for url:")
# doc_exp_10 should be available still
doc_exp_10_result = client.get_doc(url=sg_url, db=sg_db, doc_id=doc_exp_10["id"], auth=sg_user_session)
assert doc_exp_10_result["_id"] == "exp_10"
示例3: test_removing_expiry
# 需要导入模块: from keywords.MobileRestClient import MobileRestClient [as 别名]
# 或者: from keywords.MobileRestClient.MobileRestClient import create_user [as 别名]
def test_removing_expiry(params_from_base_test_setup, sg_conf_name):
"""
1. PUT /db/doc1 via SG with property "_exp":3
2. Update /db/doc1 with a new revision with no expiry value
3. After 10 updates, update /db/doc1 with a revision with no expiry
"""
cluster_config = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
cluster_helper = ClusterKeywords()
topology = cluster_helper.get_cluster_topology(cluster_config)
cluster_helper.reset_cluster(
cluster_config=cluster_config,
sync_gateway_config=sg_conf
)
cbs_url = topology["couchbase_servers"][0]
sg_url = topology["sync_gateways"][0]["public"]
sg_url_admin = topology["sync_gateways"][0]["admin"]
log_info("Running 'test_removing_expiry'")
log_info("cbs_url: {}".format(cbs_url))
log_info("sg_url: {}".format(sg_url))
log_info("sg_url_admin: {}".format(sg_url_admin))
sg_db = "db"
sg_user_name = "sg_user"
sg_user_password = "[email protected]"
sg_user_channels = ["NBC", "ABC"]
client = MobileRestClient()
client.create_user(url=sg_url_admin, db=sg_db, name=sg_user_name, password=sg_user_password, channels=sg_user_channels)
sg_user_session = client.create_session(url=sg_url_admin, db=sg_db, name=sg_user_name)
doc_exp_3_body = document.create_doc(doc_id="exp_3", expiry=3, channels=sg_user_channels)
doc_exp_10_body = document.create_doc(doc_id="exp_10", expiry=10, channels=sg_user_channels)
doc_exp_3 = client.add_doc(url=sg_url, db=sg_db, doc=doc_exp_3_body, auth=sg_user_session)
doc_exp_10 = client.add_doc(url=sg_url, db=sg_db, doc=doc_exp_10_body, auth=sg_user_session)
doc_exp_3_updated = client.update_doc(url=sg_url, db=sg_db, doc_id=doc_exp_3["id"], number_updates=10, auth=sg_user_session)
# Sleep should allow an expiry to happen on doc_exp_3 if it had not been removed.
# Expected behavior is that the doc_exp_3 will still be around due to the removal of the expiry
time.sleep(5)
# doc_exp_3 should no longer have an expiry and should not raise an exception
doc_exp_3_updated_result = client.get_doc(url=sg_url, db=sg_db, doc_id=doc_exp_3_updated["id"], auth=sg_user_session)
assert doc_exp_3_updated_result["_id"] == "exp_3"
# doc_exp_10 should be available still and should not raise an exception
doc_exp_10_result = client.get_doc(url=sg_url, db=sg_db, doc_id=doc_exp_10["id"], auth=sg_user_session)
assert doc_exp_10_result["_id"] == "exp_10"
示例4: test_writing_attachment_to_couchbase_server
# 需要导入模块: from keywords.MobileRestClient import MobileRestClient [as 别名]
# 或者: from keywords.MobileRestClient.MobileRestClient import create_user [as 别名]
def test_writing_attachment_to_couchbase_server(params_from_base_test_setup, sg_conf_name):
"""
1. Start sync_gateway with sync function that rejects all writes:
function(doc, oldDoc) {
throw({forbidden:"No writes!"});
}
2. Create a doc with attachment
3. Use CBS sdk to see if attachment doc exists. Doc ID will look like _sync:att:sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0= (where the suffix is the digest)
4. Assert att doc does not exist
"""
cluster_config = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
cluster_helper = ClusterKeywords()
cluster_helper.reset_cluster(cluster_config, sg_conf)
topology = cluster_helper.get_cluster_topology(cluster_config)
cbs_url = topology["couchbase_servers"][0]
sg_url = topology["sync_gateways"][0]["public"]
sg_url_admin = topology["sync_gateways"][0]["admin"]
sg_db = "db"
bucket = "data-bucket"
log_info("Running 'test_writing_attachment_to_couchbase_server'")
log_info("Using cbs_url: {}".format(cbs_url))
log_info("Using sg_url: {}".format(sg_url))
log_info("Using sg_url_admin: {}".format(sg_url_admin))
log_info("Using sg_db: {}".format(sg_db))
log_info("Using bucket: {}".format(bucket))
sg_user_name = "sg_user"
sg_user_password = "sg_user_password"
sg_user_channels = ["NBC"]
client = MobileRestClient()
client.create_user(url=sg_url_admin, db=sg_db, name=sg_user_name, password=sg_user_password, channels=sg_user_channels)
sg_user_session = client.create_session(url=sg_url_admin, db=sg_db, name=sg_user_name)
docs = client.add_docs(url=sg_url, db=sg_db, number=100, id_prefix=sg_db, channels=sg_user_channels, auth=sg_user_session)
assert len(docs) == 100
# Create doc with attachment and push to sync_gateway
doc_with_att = document.create_doc(doc_id="att_doc", content={"sample_key": "sample_val"}, attachment_name="sample_text.txt", channels=sg_user_channels)
client.add_doc(url=sg_url, db=sg_db, doc=doc_with_att, auth=sg_user_session)
server = CouchbaseServer(cbs_url)
# Assert that the attachment doc gets written to couchbase server
server_att_docs = server.get_server_docs_with_prefix(bucket=bucket, prefix="_sync:att:")
num_att_docs = len(server_att_docs)
assert num_att_docs == 1
示例5: test_setting_expiry_in_bulk_docs
# 需要导入模块: from keywords.MobileRestClient import MobileRestClient [as 别名]
# 或者: from keywords.MobileRestClient.MobileRestClient import create_user [as 别名]
def test_setting_expiry_in_bulk_docs(params_from_base_test_setup, sg_conf_name):
"""
1. PUT /db/_bulk_docs with 10 documents. Set the "_exp":3 on 5 of these documents
2. Wait five seconds
3. POST /db/_bulk_get for the 10 documents. Validate that only the 5 non-expiring documents are returned
"""
cluster_config = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
cluster_helper = ClusterKeywords()
topology = cluster_helper.get_cluster_topology(cluster_config)
cluster_helper.reset_cluster(
cluster_config=cluster_config,
sync_gateway_config=sg_conf
)
cbs_url = topology["couchbase_servers"][0]
sg_url = topology["sync_gateways"][0]["public"]
sg_url_admin = topology["sync_gateways"][0]["admin"]
log_info("Running 'test_setting_expiry_in_bulk_docs'")
log_info("cbs_url: {}".format(cbs_url))
log_info("sg_url: {}".format(sg_url))
log_info("sg_url_admin: {}".format(sg_url_admin))
sg_db = "db"
sg_user_name = "sg_user"
sg_user_password = "[email protected]"
sg_user_channels = ["NBC", "ABC"]
client = MobileRestClient()
client.create_user(url=sg_url_admin, db=sg_db, name=sg_user_name, password=sg_user_password, channels=sg_user_channels)
sg_user_session = client.create_session(url=sg_url_admin, db=sg_db, name=sg_user_name)
doc_exp_3_bodies = document.create_docs(doc_id_prefix="exp_3", number=5, expiry=3, channels=sg_user_channels)
doc_exp_10_bodies = document.create_docs(doc_id_prefix="exp_10", number=5, expiry=10, channels=sg_user_channels)
bulk_bodies = doc_exp_3_bodies + doc_exp_10_bodies
bulk_docs = client.add_bulk_docs(url=sg_url, db=sg_db, docs=bulk_bodies, auth=sg_user_session)
# Allow exp_3 docs to expire
time.sleep(5)
bulk_get_docs = client.get_bulk_docs(url=sg_url, db=sg_db, docs=bulk_docs, auth=sg_user_session)
expected_ids = ["exp_10_0", "exp_10_1", "exp_10_2", "exp_10_3", "exp_10_4"]
expected_missing_ids = ["exp_3_0", "exp_3_1", "exp_3_2", "exp_3_3", "exp_3_4"]
client.verify_doc_ids_found_in_response(response=bulk_get_docs, expected_doc_ids=expected_ids)
client.verify_doc_ids_not_found_in_response(response=bulk_get_docs, expected_missing_doc_ids=expected_missing_ids)
示例6: test_auto_prune_with_pull
# 需要导入模块: from keywords.MobileRestClient import MobileRestClient [as 别名]
# 或者: from keywords.MobileRestClient.MobileRestClient import create_user [as 别名]
def test_auto_prune_with_pull(setup_client_syncgateway_test):
"""Sanity test for autopruning with replication
1. Create a database on LiteServ (ls_db)
2. Add doc to sync gateway
3. Update doc 50 times on sync_gateway
4. Set up pull replication from sync_gateway db to LiteServ db
5. Verify number of revisions on client is default (20)
"""
cluster_config = setup_client_syncgateway_test["cluster_config"]
ls_url = setup_client_syncgateway_test["ls_url"]
sg_url = setup_client_syncgateway_test["sg_url"]
sg_admin_url = setup_client_syncgateway_test["sg_admin_url"]
client = MobileRestClient()
sg_helper = SyncGateway()
sg_helper.start_sync_gateway(
cluster_config=cluster_config, url=sg_url, config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS)
)
log_info("Running 'test_auto_prune_listener_sanity' ...")
log_info("ls_url: {}".format(ls_url))
log_info("sg_url: {}".format(sg_url))
log_info("sg_admin_url: {}".format(sg_admin_url))
num_docs = 1
num_revs = 50
sg_user_channels = ["NBC"]
sg_db = "db"
sg_user_name = "sg_user"
client.create_user(url=sg_admin_url, db=sg_db, name=sg_user_name, password="password", channels=sg_user_channels)
sg_session = client.create_session(url=sg_admin_url, db=sg_db, name=sg_user_name)
ls_db = client.create_database(url=ls_url, name="ls_db")
sg_db_docs = client.add_docs(
url=sg_url, db=sg_db, number=num_docs, id_prefix=sg_db, channels=sg_user_channels, auth=sg_session
)
assert len(sg_db_docs) == num_docs
sg_docs_update = client.update_docs(url=sg_url, db=sg_db, docs=sg_db_docs, number_updates=num_revs, auth=sg_session)
# Start continuous replication ls_db <- sg_db
repl_one = client.start_replication(url=ls_url, continuous=True, from_url=sg_admin_url, from_db=sg_db, to_db=ls_db)
client.wait_for_replication_status_idle(url=ls_url, replication_id=repl_one)
client.verify_docs_present(url=ls_url, db=ls_db, expected_docs=sg_docs_update)
client.verify_revs_num_for_docs(url=ls_url, db=ls_db, docs=sg_docs_update, expected_revs_per_doc=20)
示例7: test_load_balance_sanity
# 需要导入模块: from keywords.MobileRestClient import MobileRestClient [as 别名]
# 或者: from keywords.MobileRestClient.MobileRestClient import create_user [as 别名]
def test_load_balance_sanity(params_from_base_test_setup):
cluster_config = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf_name = "sync_gateway_default_functional_tests"
sg_conf_path = sync_gateway_config_path_for_mode(sg_conf_name, mode)
cluster_util = ClusterKeywords()
cluster_util.reset_cluster(
cluster_config=cluster_config,
sync_gateway_config=sg_conf_path
)
topology = cluster_util.get_cluster_topology(cluster_config)
admin_sg_one = topology["sync_gateways"][0]["admin"]
lb_url = topology["load_balancers"][0]
sg_db = "db"
num_docs = 1000
sg_user_name = "seth"
sg_user_password = "password"
channels = ["ABC", "CBS"]
client = MobileRestClient()
user = client.create_user(admin_sg_one, sg_db, sg_user_name, sg_user_password, channels=channels)
session = client.create_session(admin_sg_one, sg_db, sg_user_name)
log_info(user)
log_info(session)
log_info("Adding docs to the load balancer ...")
ct = ChangesTracker(url=lb_url, db=sg_db, auth=session)
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
log_info("Starting ...")
ct_task = executor.submit(ct.start)
log_info("Adding docs ...")
docs = client.add_docs(lb_url, sg_db, num_docs, "test_doc", channels=channels, auth=session)
assert len(docs) == num_docs
log_info("Adding docs done")
wait_for_changes = executor.submit(ct.wait_until, docs)
if wait_for_changes.result():
log_info("Stopping ...")
log_info("Found all docs ...")
executor.submit(ct.stop)
ct_task.result()
else:
executor.submit(ct.stop)
ct_task.result()
raise Exception("Could not find all changes in feed before timeout!!")
示例8: test_auto_prune_listener_keeps_conflicts_sanity
# 需要导入模块: from keywords.MobileRestClient import MobileRestClient [as 别名]
# 或者: from keywords.MobileRestClient.MobileRestClient import create_user [as 别名]
def test_auto_prune_listener_keeps_conflicts_sanity(setup_client_syncgateway_test):
""""
1. Create db on LiteServ and add docs
2. Create db on sync_gateway and add docs with the same id
3. Create one shot push / pull replication
4. Update LiteServ 50 times
5. Assert that pruned conflict is still present
6. Delete the current revision and check that a GET returns the old conflict as the current rev
"""
cluster_config = setup_client_syncgateway_test["cluster_config"]
ls_url = setup_client_syncgateway_test["ls_url"]
sg_url = setup_client_syncgateway_test["sg_url"]
sg_admin_url = setup_client_syncgateway_test["sg_admin_url"]
client = MobileRestClient()
sg_helper = SyncGateway()
sg_helper.start_sync_gateway(
cluster_config=cluster_config, url=sg_url, config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS)
)
log_info("Running 'test_auto_prune_listener_keeps_conflicts_sanity' ...")
log_info("ls_url: {}".format(ls_url))
log_info("sg_url: {}".format(sg_url))
log_info("sg_admin_url: {}".format(sg_admin_url))
num_docs = 1
num_revs = 100
sg_db = "db"
ls_db = "ls_db"
sg_user_name = "sg_user"
sg_user_channels = ["NBC"]
client.create_user(url=sg_admin_url, db=sg_db, name=sg_user_name, password="password", channels=sg_user_channels)
sg_session = client.create_session(url=sg_admin_url, db=sg_db, name=sg_user_name)
ls_db = client.create_database(url=ls_url, name=ls_db)
# Create docs with same prefix to create conflicts when the dbs complete 1 shot replication
ls_db_docs = client.add_docs(url=ls_url, db=ls_db, number=num_docs, id_prefix="doc", channels=sg_user_channels)
assert len(ls_db_docs) == num_docs
sg_db_docs = client.add_docs(
url=sg_url, db=sg_db, number=num_docs, id_prefix="doc", channels=sg_user_channels, auth=sg_session
)
assert len(sg_db_docs) == num_docs
# Setup one shot pull replication and wait for idle.
client.start_replication(url=ls_url, continuous=False, from_url=sg_admin_url, from_db=sg_db, to_db=ls_db)
client.wait_for_no_replications(url=ls_url)
# There should now be a conflict on the client
conflicting_revs = client.get_conflict_revs(url=ls_url, db=ls_db, doc=ls_db_docs[0])
# Get the doc with conflict rev
client.get_doc(url=ls_url, db=ls_db, doc_id=ls_db_docs[0]["id"], rev=conflicting_revs[0])
# Update doc past revs limit and make sure conflict is still available
updated_doc = client.update_doc(url=ls_url, db=ls_db, doc_id=ls_db_docs[0]["id"], number_updates=num_revs)
client.get_doc(url=ls_url, db=ls_db, doc_id=ls_db_docs[0]["id"], rev=conflicting_revs[0])
# Delete doc and ensure that the conflict is now the current rev
client.delete_doc(url=ls_url, db=ls_db, doc_id=ls_db_docs[0]["id"], rev=updated_doc["rev"])
current_doc = client.get_doc(url=ls_url, db=ls_db, doc_id=ls_db_docs[0]["id"])
assert current_doc["_rev"] == conflicting_revs[0]
示例9: test_attachment_revpos_when_ancestor_unavailable_active_revision_doesnt_share_ancestor
# 需要导入模块: from keywords.MobileRestClient import MobileRestClient [as 别名]
# 或者: from keywords.MobileRestClient.MobileRestClient import create_user [as 别名]
def test_attachment_revpos_when_ancestor_unavailable_active_revision_doesnt_share_ancestor(params_from_base_test_setup, sg_conf_name):
"""
Creates a document with an attachment, then updates that document so that
the body of the revision that originally pushed the document is no
longer available. Add a new revision that's not a child of the
active revision, and validate that it's uploaded successfully.
Example:
1. Document is created with no attachment at rev-1
2. Server adds revision with attachment at rev-2 {"hello.txt", revpos=2}
2. Document is updated multiple times on the server, goes to rev-4
3. Client attempts to add a new (conflicting) revision 3a, with ancestors rev-2a (with it's own attachment), rev-1.
4. When client attempts to push rev-3a with attachment stub {"hello.txt", revpos=2}. Should throw an error, since the revpos
of the attachment is later than the common ancestor (rev-1)
"""
cluster_config = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
cluster_helper = ClusterKeywords()
cluster_helper.reset_cluster(cluster_config, sg_conf)
topology = cluster_helper.get_cluster_topology(cluster_config)
cbs_url = topology["couchbase_servers"][0]
sg_url = topology["sync_gateways"][0]["public"]
sg_url_admin = topology["sync_gateways"][0]["admin"]
sg_db = "db"
bucket = "data-bucket"
log_info("Running 'test_attachment_revpos_when_ancestor_unavailable_active_revision_doesnt_share_ancestor'")
log_info("Using cbs_url: {}".format(cbs_url))
log_info("Using sg_url: {}".format(sg_url))
log_info("Using sg_url_admin: {}".format(sg_url_admin))
log_info("Using sg_db: {}".format(sg_db))
log_info("Using bucket: {}".format(bucket))
sg_user_name = "sg_user"
sg_user_password = "password"
sg_user_channels = ["NBC"]
client = MobileRestClient()
client.create_user(url=sg_url_admin, db=sg_db, name=sg_user_name, password=sg_user_password, channels=sg_user_channels)
sg_user_session = client.create_session(url=sg_url_admin, db=sg_db, name=sg_user_name)
doc = document.create_doc(doc_id="doc_1", content={"sample_key": "sample_val"}, channels=sg_user_channels)
doc_gen_1 = client.add_doc(url=sg_url, db=sg_db, doc=doc, auth=sg_user_session)
client.update_doc(url=sg_url, db=sg_db, doc_id=doc_gen_1["id"], attachment_name="sample_text.txt", auth=sg_user_session)
client.update_doc(url=sg_url, db=sg_db, doc_id=doc_gen_1["id"], auth=sg_user_session)
client.update_doc(url=sg_url, db=sg_db, doc_id=doc_gen_1["id"], auth=sg_user_session)
parent_rev_list = ["2-foo2", doc_gen_1["rev"]]
# Sync Gateway should error since it has no references attachment in its ancestors
with pytest.raises(HTTPError) as he:
client.add_conflict(
url=sg_url,
db=sg_db,
doc_id=doc_gen_1["id"],
parent_revisions=parent_rev_list,
new_revision="3-foo3",
auth=sg_user_session
)
assert he.value[0].startswith("400 Client Error: Bad Request for url: ")
示例10: test_client_to_sync_gateway_complex_replication_with_revs_limit
# 需要导入模块: from keywords.MobileRestClient import MobileRestClient [as 别名]
# 或者: from keywords.MobileRestClient.MobileRestClient import create_user [as 别名]
def test_client_to_sync_gateway_complex_replication_with_revs_limit(setup_client_syncgateway_test):
""" Ported from sync_gateway tests repo
... 1. Clear server buckets
... 2. Restart liteserv with _session
... 3. Restart sync_gateway wil that config
... 4. Create db on LiteServ
... 5. Add numDocs to LiteServ db
... 6. Setup push replication from LiteServ db to sync_gateway
... 7. Verify doc present on sync_gateway (number of docs)
... 8. Update sg docs numRevs * 4 = 480
... 9. Update docs on LiteServ db numRevs * 4 = 480
... 10. Setup pull replication from sg -> liteserv db
... 11. Verify all docs are replicated
... 12. compact LiteServ db (POST _compact)
... 13. Verify number of revs in LiteServ db (?revs_info=true) check rev status == available fail if revs available > revs limit
... 14. Delete LiteServ db conflicts (?conflicts=true) DELETE _conflicts
... 15. Create numDoc number of docs in LiteServ db
... 16. Update LiteServ db docs numRevs * 5 (600)
... 17. Verify LiteServ db revs is < 602
... 18. Verify LiteServ db docs revs prefix (9 * numRevs + 3)
... 19. Compact LiteServ db
... 20. Verify number of revs <= 10
... 21. Delete LiteServ docs
... 22. Delete Server bucket
... 23. Delete LiteServ db
"""
ls_db_name = "ls_db"
sg_db = "db"
sg_user_name = "sg_user"
num_docs = 10
num_revs = 100
cluster_config = setup_client_syncgateway_test["cluster_config"]
ls_url = setup_client_syncgateway_test["ls_url"]
sg_url = setup_client_syncgateway_test["sg_url"]
sg_admin_url = setup_client_syncgateway_test["sg_admin_url"]
sg_helper = SyncGateway()
sg_helper.start_sync_gateway(
cluster_config=cluster_config,
url=sg_url,
config="{}/walrus-revs-limit.json".format(SYNC_GATEWAY_CONFIGS)
)
log_info("Running 'test_client_to_sync_gateway_complex_replication_with_revs_limit'")
log_info("ls_url: {}".format(ls_url))
log_info("sg_admin_url: {}".format(sg_admin_url))
log_info("sg_url: {}".format(sg_url))
client = MobileRestClient()
# Test the endpoint, listener does not support users but should have a default response
client.get_session(url=ls_url)
sg_user_channels = ["NBC"]
client.create_user(url=sg_admin_url, db=sg_db, name=sg_user_name, password="password", channels=sg_user_channels)
sg_session = client.create_session(url=sg_admin_url, db=sg_db, name=sg_user_name)
ls_db = client.create_database(url=ls_url, name=ls_db_name)
ls_db_docs = client.add_docs(url=ls_url, db=ls_db, number=num_docs, id_prefix=ls_db, channels=sg_user_channels)
assert len(ls_db_docs) == num_docs
# Start replication ls_db -> sg_db
repl_one = client.start_replication(
url=ls_url,
continuous=True,
from_db=ls_db,
to_url=sg_admin_url, to_db=sg_db
)
client.verify_docs_present(url=sg_admin_url, db=sg_db, expected_docs=ls_db_docs)
# Delay is to the updates here due to couchbase/couchbase-lite-ios#1277.
# Basically, if your revs depth is small and someone is updating a doc past the revs depth before a push replication,
# the push replication will have no common ancestor with sync_gateway causing conflicts to be created.
# Adding a delay between updates helps this situation. There is an alternative for CBL mac and CBL NET to change the default revs client depth
# but that is not configurable for Android.
# Currently adding a delay will allow the replication to act as expected for all platforms now.
client.update_docs(url=sg_url, db=sg_db, docs=ls_db_docs, number_updates=num_revs, delay=0.1, auth=sg_session)
client.update_docs(url=ls_url, db=ls_db, docs=ls_db_docs, number_updates=num_revs, delay=0.1)
# Start replication ls_db <- sg_db
repl_two = client.start_replication(
url=ls_url,
continuous=True,
from_url=sg_admin_url, from_db=sg_db,
to_db=ls_db
)
client.wait_for_replication_status_idle(url=ls_url, replication_id=repl_one)
client.wait_for_replication_status_idle(url=ls_url, replication_id=repl_two)
client.compact_database(url=ls_url, db=ls_db)
# LiteServ should only have 20 revisions due to built in client revs limit
client.verify_revs_num_for_docs(url=ls_url, db=ls_db, docs=ls_db_docs, expected_revs_per_doc=20)
# Sync Gateway should have 100 revisions due to the specified revs_limit in the sg config and possible conflict winners from the liteserv db
client.verify_max_revs_num_for_docs(url=sg_url, db=sg_db, docs=ls_db_docs, expected_max_number_revs_per_doc=100, auth=sg_session)
#.........这里部分代码省略.........
示例11: test_string_expiry_as_unix_date
# 需要导入模块: from keywords.MobileRestClient import MobileRestClient [as 别名]
# 或者: from keywords.MobileRestClient.MobileRestClient import create_user [as 别名]
def test_string_expiry_as_unix_date(params_from_base_test_setup, sg_conf_name):
"""
1. Calculate (server time + 3 seconds) as unix time (i.e. Epoch time, e.g. 1466465122)
2. PUT /db/doc1 via SG with property "_exp":"[unix time]"
PUT /db/doc2 via SG with property "_exp":"1767225600" (Jan 1 2026) Note: the maximum epoch time supported by CBS is maxUint32, or Sun 07 Feb 2106, in case you want to move it out further than 2026.
3. Wait five seconds
4. Get /db/doc1. Assert response is 404
Get /db/doc2. Assert response is 200
"""
cluster_config = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
cluster_helper = ClusterKeywords()
topology = cluster_helper.get_cluster_topology(cluster_config)
cluster_helper.reset_cluster(
cluster_config=cluster_config,
sync_gateway_config=sg_conf
)
cbs_url = topology["couchbase_servers"][0]
sg_url = topology["sync_gateways"][0]["public"]
sg_url_admin = topology["sync_gateways"][0]["admin"]
log_info("Running 'test_string_expiry_as_unix_date'")
log_info("cbs_url: {}".format(cbs_url))
log_info("sg_url: {}".format(sg_url))
log_info("sg_url_admin: {}".format(sg_url_admin))
sg_db = "db"
sg_user_name = "sg_user"
sg_user_password = "[email protected]"
sg_user_channels = ["NBC", "ABC"]
client = MobileRestClient()
client.create_user(url=sg_url_admin, db=sg_db, name=sg_user_name, password=sg_user_password, channels=sg_user_channels)
sg_user_session = client.create_session(url=sg_url_admin, db=sg_db, name=sg_user_name)
time_util = Time()
unix_time_3s_ahead = time_util.get_unix_timestamp(delta=3)
# Convert unix timestamp to string
unix_time_3s_ahead_string = str(unix_time_3s_ahead)
# Using string representation for unix time
doc_exp_3_body = document.create_doc(doc_id="exp_3", expiry=unix_time_3s_ahead_string, channels=sg_user_channels)
doc_exp_years_body = document.create_doc(doc_id="exp_years", expiry="1767225600", channels=sg_user_channels)
doc_exp_3 = client.add_doc(url=sg_url, db=sg_db, doc=doc_exp_3_body, auth=sg_user_session)
doc_exp_years = client.add_doc(url=sg_url, db=sg_db, doc=doc_exp_years_body, auth=sg_user_session)
# Sleep should allow doc_exp_3 to expire
time.sleep(10)
# doc_exp_3 should be expired
with pytest.raises(HTTPError) as he:
client.get_doc(url=sg_url, db=sg_db, doc_id=doc_exp_3["id"], auth=sg_user_session)
assert he.value[0].startswith("404 Client Error: Not Found for url:")
# doc_exp_years should be available still
doc_exp_years_result = client.get_doc(url=sg_url, db=sg_db, doc_id=doc_exp_years["id"], auth=sg_user_session)
assert doc_exp_years_result["_id"] == "exp_years"
示例12: test_backfill_channels_oneshot_changes
# 需要导入模块: from keywords.MobileRestClient import MobileRestClient [as 别名]
# 或者: from keywords.MobileRestClient.MobileRestClient import create_user [as 别名]
def test_backfill_channels_oneshot_changes(params_from_base_test_setup, sg_conf_name, grant_type):
cluster_config = params_from_base_test_setup["cluster_config"]
topology = params_from_base_test_setup["cluster_topology"]
mode = params_from_base_test_setup["mode"]
sg_url = topology["sync_gateways"][0]["public"]
sg_admin_url = topology["sync_gateways"][0]["admin"]
sg_db = "db"
log_info("grant_type: {}".format(grant_type))
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
cluster = Cluster(cluster_config)
cluster.reset(sg_conf)
client = MobileRestClient()
admin_user_info = userinfo.UserInfo("admin", "pass", channels=["A"], roles=[])
user_b_user_info = userinfo.UserInfo("USER_B", "pass", channels=["B"], roles=[])
# Create users / sessions
client.create_user(url=sg_admin_url, db=sg_db,
name=admin_user_info.name, password=admin_user_info.password, channels=admin_user_info.channels)
client.create_user(url=sg_admin_url, db=sg_db,
name=user_b_user_info.name, password=user_b_user_info.password, channels=user_b_user_info.channels)
admin_session = client.create_session(url=sg_admin_url, db=sg_db, name=admin_user_info.name, password=admin_user_info.password)
user_b_session = client.create_session(url=sg_admin_url, db=sg_db, name=user_b_user_info.name, password=user_b_user_info.password)
# Create 50 "A" channel docs
a_docs = client.add_docs(url=sg_url, db=sg_db, number=50, id_prefix=None, auth=admin_session, channels=["A"])
assert len(a_docs) == 50
b_docs = client.add_docs(url=sg_url, db=sg_db, number=1, id_prefix="b_doc", auth=user_b_session, channels=["B"])
assert len(b_docs) == 1
user_doc = {"id": "_user/USER_B", "rev": None}
b_docs.append(user_doc)
# Loop until user_b sees b_doc_0 doc and _user/USER_B doc
client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=b_docs, auth=user_b_session, strict=True)
# Get last_seq for user_b
user_b_changes = client.get_changes(url=sg_url, db=sg_db, since=0, auth=user_b_session, feed="normal")
# Grant access to channel "A"
if grant_type == "CHANNEL-REST":
log_info("Granting user access to channel A via Admin REST user update")
# Grant via update to user in Admin API
client.update_user(url=sg_admin_url, db=sg_db,
name=user_b_user_info.name, channels=["A", "B"])
elif grant_type == "CHANNEL-SYNC":
log_info("Granting user access to channel A sync function access()")
# Grant via access() in sync_function, then id 'channel_access' will trigger an access(doc.users, doc.channels)
access_doc = document.create_doc("channel_access", channels=["A"])
access_doc["users"] = ["USER_B"]
client.add_doc(url=sg_url, db=sg_db, doc=access_doc, auth=admin_session)
elif grant_type == "ROLE-REST":
log_info("Granting user access to channel A via Admin REST role grant")
# Create role with channel A
client.create_role(url=sg_admin_url, db=sg_db, name="channel-A-role", channels=["A"])
client.update_user(url=sg_admin_url, db=sg_db, name="USER_B", roles=["channel-A-role"])
elif grant_type == "ROLE-SYNC":
log_info("Granting user access to channel A via sync function role() grant")
# Create role with channel A
client.create_role(url=sg_admin_url, db=sg_db, name="channel-A-role", channels=["A"])
# Grant via role() in sync_function, then id 'role_access' will trigger an role(doc.users, doc.roles)
role_access_doc = document.create_doc("role_access")
role_access_doc["users"] = ["USER_B"]
role_access_doc["roles"] = ["role:channel-A-role"]
client.add_doc(sg_url, db=sg_db, doc=role_access_doc, auth=admin_session)
else:
pytest.fail("Unsupported grant_type!!!!")
user_b_changes_after_grant = client.get_changes(url=sg_url, db=sg_db,
since=user_b_changes["last_seq"], auth=user_b_session, feed="normal")
# User B shoud have recieved 51 docs (a_docs + 1 _user/USER_B doc) if a REST grant or 50 changes if the grant
# is via the sync function
changes_results = user_b_changes_after_grant["results"]
assert 50 <= len(changes_results) <= 51
# Create a dictionary of id rev pair of all the docs that are not "_user/" docs from changes
ids_and_revs_from_user_changes = {
change["id"]: change["changes"][0]["rev"]
for change in changes_results if not change["id"].startswith("_user/")
}
assert len(ids_and_revs_from_user_changes) == 50
# Create a list of id rev pair of all of the channel A docs
ids_and_revs_from_a_docs = {doc["id"]: doc["rev"] for doc in a_docs}
#.........这里部分代码省略.........
示例13: test_initial_push_replication
# 需要导入模块: from keywords.MobileRestClient import MobileRestClient [as 别名]
# 或者: from keywords.MobileRestClient.MobileRestClient import create_user [as 别名]
def test_initial_push_replication(setup_client_syncgateway_test, continuous):
"""
1. Prepare LiteServ to have 10000 documents.
2. Create a single shot push / continuous replicator and to push the docs into a sync_gateway database.
3. Verify if all of the docs get pushed.
"""
sg_db = "db"
ls_db = "ls_db"
seth_channels = ["ABC", "NBC"]
num_docs = 10000
cluster_config = setup_client_syncgateway_test["cluster_config"]
ls_url = setup_client_syncgateway_test["ls_url"]
sg_one_admin = setup_client_syncgateway_test["sg_admin_url"]
sg_one_public = setup_client_syncgateway_test["sg_url"]
sg_helper = SyncGateway()
sg_helper.start_sync_gateway(
cluster_config=cluster_config,
url=sg_one_public,
config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS)
)
log_info("Running 'test_initial_push_replication', continuous: {}".format(continuous))
log_info("ls_url: {}".format(ls_url))
log_info("sg_one_admin: {}".format(sg_one_admin))
log_info("sg_one_public: {}".format(sg_one_public))
client = MobileRestClient()
client.create_user(sg_one_admin, sg_db, "seth", password="password", channels=seth_channels)
session = client.create_session(sg_one_admin, sg_db, "seth")
client.create_database(url=ls_url, name=ls_db)
# Create 'num_docs' docs on LiteServ
docs = client.add_docs(
url=ls_url,
db=ls_db,
number=num_docs,
id_prefix="seeded_doc",
generator="four_k",
channels=seth_channels
)
assert len(docs) == num_docs
# Start push replication
repl_id = client.start_replication(
url=ls_url,
continuous=continuous,
from_db=ls_db,
to_url=sg_one_admin,
to_db=sg_db
)
if continuous:
log_info("Waiting for replication status 'Idle' for: {}".format(repl_id))
client.wait_for_replication_status_idle(ls_url, repl_id)
else:
log_info("Waiting for no replications: {}".format(repl_id))
client.wait_for_no_replications(ls_url)
# Verify docs replicated to sync_gateway
client.verify_docs_present(url=sg_one_public, db=sg_db, expected_docs=docs, auth=session)
# Verify docs show up in sync_gateway's changes feed
client.verify_docs_in_changes(url=sg_one_public, db=sg_db, expected_docs=docs, auth=session)
replications = client.get_replications(url=ls_url)
if continuous:
assert len(replications) == 1, "There should only be one replication running"
assert replications[0]["status"] == "Idle", "Replication Status should be 'Idle'"
assert replications[0]["continuous"], "Running replication should be continuous"
# Only .NET has an 'error' property
if "error" in replications[0]:
assert len(replications[0]["error"]) == 0
else:
assert len(replications) == 0, "No replications should be running"
示例14: test_remove_add_channels_to_doc
# 需要导入模块: from keywords.MobileRestClient import MobileRestClient [as 别名]
# 或者: from keywords.MobileRestClient.MobileRestClient import create_user [as 别名]
def test_remove_add_channels_to_doc(params_from_base_test_setup, sg_conf_name):
cluster_config = params_from_base_test_setup["cluster_config"]
topology = params_from_base_test_setup["cluster_topology"]
mode = params_from_base_test_setup["mode"]
sg_url = topology["sync_gateways"][0]["public"]
sg_admin_url = topology["sync_gateways"][0]["admin"]
sg_db = "db"
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
cluster = Cluster(cluster_config)
cluster.reset(sg_conf)
client = MobileRestClient()
admin_user_info = userinfo.UserInfo("admin", "pass", channels=["A", "B"], roles=[])
a_user_info = userinfo.UserInfo("a_user", "pass", channels=["A"], roles=[])
admin_user_auth = client.create_user(
url=sg_admin_url,
db=sg_db,
name=admin_user_info.name,
password=admin_user_info.password,
channels=admin_user_info.channels,
)
a_user_auth = client.create_user(
url=sg_admin_url, db=sg_db, name=a_user_info.name, password=a_user_info.password, channels=a_user_info.channels
)
a_docs = client.add_docs(
url=sg_url, db=sg_db, number=50, id_prefix="a_doc", auth=admin_user_auth, channels=admin_user_info.channels
)
# Build dictionay of a_docs
a_docs_id_rev = {doc["id"]: doc["rev"] for doc in a_docs}
assert len(a_docs_id_rev) == 50
# Wait for all docs to show up in changes
client.verify_doc_id_in_changes(sg_url, sg_db, expected_doc_id="_user/a_user", auth=a_user_auth)
client.verify_docs_in_changes(sg_url, sg_db, expected_docs=a_docs, auth=a_user_auth)
# Get changes for 'a_user'
a_user_changes = client.get_changes(url=sg_url, db=sg_db, since=0, auth=a_user_auth, feed="normal")
# 'a_user' should get 50 'a_doc_*' doc and 1 '_user/a_user' doc
assert len(a_user_changes["results"]) == 51
###########################
# Remove Channels from doc
###########################
# Copy a_docs_id_rev to dictionary to scratch off values
remove_docs_scratch_off = a_docs_id_rev.copy()
assert len(remove_docs_scratch_off) == 50
# Use admin user to update the docs to remove 'A' from the channels property on the doc and add 'B'
client.update_docs(url=sg_url, db=sg_db, docs=a_docs, number_updates=1, auth=admin_user_auth, channels=["B"])
# Longpoll loop requires due to the delay that changes take to permeate to the client
changes_timeout = 10
start = time.time()
last_seq = a_user_changes["last_seq"]
while True:
# If take longer than 10 seconds, fail the test
if time.time() - start > changes_timeout:
raise keywords.exceptions.TimeoutException("Could not find all expected docs in changs feed")
# We found everything, exit loop!
if remove_docs_scratch_off == {}:
log_info("All expected docs found to be removed")
break
# Get changes for 'a_user' from last_seq
a_user_changes = client.get_changes(url=sg_url, db=sg_db, since=last_seq, auth=a_user_auth, timeout=10)
assert len(a_user_changes["results"]) > 0
# Loop over changes found and perform the following
# 1. Check that the docs is flagged with 'removed'
# 2. Cross off the doc fromt the the 'remove_docs_scratch_off'
for change in a_user_changes["results"]:
assert change["removed"] == ["A"]
assert change["changes"][0]["rev"].startswith("2-")
# This will blow up if any change is not found in that dictionary
del remove_docs_scratch_off[change["id"]]
# Update last_seq
last_seq = a_user_changes["last_seq"]
# Issue changes request from 'last_seq' and verify that the changes are up to date and returns no results
a_user_changes = client.get_changes(url=sg_url, db=sg_db, since=last_seq, auth=a_user_auth, feed="normal")
assert len(a_user_changes["results"]) == 0
#########################
# Add Channels to doc
#########################
#.........这里部分代码省略.........
示例15: test_longpoll_awaken_roles
# 需要导入模块: from keywords.MobileRestClient import MobileRestClient [as 别名]
# 或者: from keywords.MobileRestClient.MobileRestClient import create_user [as 别名]
def test_longpoll_awaken_roles(params_from_base_test_setup, sg_conf_name):
cluster_conf = params_from_base_test_setup["cluster_config"]
cluster_topology = params_from_base_test_setup["cluster_topology"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
sg_admin_url = cluster_topology["sync_gateways"][0]["admin"]
sg_url = cluster_topology["sync_gateways"][0]["public"]
log_info("sg_conf: {}".format(sg_conf))
log_info("sg_admin_url: {}".format(sg_admin_url))
log_info("sg_url: {}".format(sg_url))
cluster = Cluster(config=cluster_conf)
cluster.reset(sg_config_path=sg_conf)
admin_role = "admin_role"
admin_channel = "admin_channel"
admin_user_info = userinfo.UserInfo(name="admin", password="pass", channels=[], roles=[admin_role])
adam_user_info = userinfo.UserInfo(name="adam", password="Adampass1", channels=[], roles=[])
traun_user_info = userinfo.UserInfo(name="traun", password="Traunpass1", channels=[], roles=[])
andy_user_info = userinfo.UserInfo(name="andy", password="Andypass1", channels=[], roles=[])
sg_db = "db"
client = MobileRestClient()
# Create a role on sync_gateway
client.create_role(url=sg_admin_url, db=sg_db, name=admin_role, channels=[admin_channel])
# Create users with no channels or roles
admin_auth = client.create_user(url=sg_admin_url, db=sg_db,
name=admin_user_info.name, password=admin_user_info.password, roles=[admin_role])
adam_auth = client.create_user(url=sg_admin_url, db=sg_db,
name=adam_user_info.name, password=adam_user_info.password)
traun_auth = client.create_user(url=sg_admin_url, db=sg_db,
name=traun_user_info.name, password=traun_user_info.password)
andy_auth = client.create_user(url=sg_admin_url, db=sg_db,
name=andy_user_info.name, password=andy_user_info.password)
################################
# change feed wakes for role add
################################
# Get starting sequence of docs, use the last seq to progress past any _user docs.
adam_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=adam_auth)
traun_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=traun_auth)
andy_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=andy_auth)
# Add doc with channel associated with the admin role
admin_doc = client.add_docs(url=sg_url, db=sg_db, number=1, id_prefix="admin_doc", auth=admin_auth, channels=[admin_channel])
client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=admin_doc, auth=admin_auth)
with concurrent.futures.ProcessPoolExecutor() as ex:
# Start changes feed for 3 users from latest last_seq
adam_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=adam_changes["last_seq"], timeout=10, auth=adam_auth)
traun_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=traun_changes["last_seq"], timeout=10, auth=traun_auth)
andy_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=andy_changes["last_seq"], timeout=10, auth=andy_auth)
# Wait for changes feed to notice there are no changes and enter wait. 2 seconds should be more than enough
time.sleep(2)
# Make sure the changes future is still running and has not exited due to any new changes, the feed should be caught up
# and waiting
assert not adam_changes_task.done()
assert not traun_changes_task.done()
assert not andy_changes_task.done()
adam_auth = client.update_user(url=sg_admin_url, db=sg_db,
name=adam_user_info.name, password=adam_user_info.password, roles=[admin_role])
traun_auth = client.update_user(url=sg_admin_url, db=sg_db,
name=traun_user_info.name, password=traun_user_info.password, roles=[admin_role])
andy_auth = client.update_user(url=sg_admin_url, db=sg_db,
name=andy_user_info.name, password=andy_user_info.password, roles=[admin_role])
adam_changes = adam_changes_task.result()
assert 1 <= len(adam_changes["results"]) <= 2
assert adam_changes["results"][0]["id"] == "admin_doc_0" or adam_changes["results"][0]["id"] == "_user/adam"
traun_changes = traun_changes_task.result()
assert 1 <= len(traun_changes["results"]) <= 2
assert traun_changes["results"][0]["id"] == "admin_doc_0" or traun_changes["results"][0]["id"] == "_user/traun"
andy_changes = andy_changes_task.result()
assert 1 <= len(andy_changes["results"]) <= 2
assert andy_changes["results"][0]["id"] == "admin_doc_0" or andy_changes["results"][0]["id"] == "_user/andy"
# Check that the user docs all show up in changes feed
client.verify_doc_id_in_changes(url=sg_url, db=sg_db, expected_doc_id="_user/adam", auth=adam_auth)
client.verify_doc_id_in_changes(url=sg_url, db=sg_db, expected_doc_id="_user/traun", auth=traun_auth)
client.verify_doc_id_in_changes(url=sg_url, db=sg_db, expected_doc_id="_user/andy", auth=andy_auth)
# Check that the admin doc made it to all the changes feeds
client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=admin_doc, auth=adam_auth)
#.........这里部分代码省略.........