本文整理汇总了Python中keywords.MobileRestClient.MobileRestClient类的典型用法代码示例。如果您正苦于以下问题:Python MobileRestClient类的具体用法?Python MobileRestClient怎么用?Python MobileRestClient使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了MobileRestClient类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: setup_p2p_test
def setup_p2p_test(request, setup_p2p_suite):
"""Test setup fixture for p2p client tests"""
log_info("Setting up P2P test ...")
liteserv_one = setup_p2p_suite["liteserv_one"]
liteserv_two = setup_p2p_suite["liteserv_two"]
test_name = request.node.name
print("Starting LiteServ One ...")
ls_logging_one = "{}/logs/{}-ls1-{}-{}.txt".format(RESULTS_DIR, type(liteserv_one).__name__, test_name, datetime.datetime.now())
ls_url_one = liteserv_one.start(ls_logging_one)
print("Starting LiteServ Two ...")
ls_logging_two = "{}/logs/{}-ls2-{}-{}.txt".format(RESULTS_DIR, type(liteserv_two).__name__, test_name, datetime.datetime.now())
ls_url_two = liteserv_two.start(ls_logging_two)
# Yield values to test case via fixture argument
yield {"ls_url_one": ls_url_one, "ls_url_two": ls_url_two}
log_info("Tearing down test")
# Teardown test
client = MobileRestClient()
client.delete_databases(ls_url_one)
client.delete_databases(ls_url_two)
liteserv_one.stop()
liteserv_two.stop()
示例2: test_raw_attachment
def test_raw_attachment(setup_client_syncgateway_test):
"""
1. Add Text attachment to sync_gateway
2. Try to get the raw attachment
Pass: It is possible to get the raw attachment
"""
log_info("Running 'test_raw_attachment'")
ls_url = setup_client_syncgateway_test["ls_url"]
log_info("ls_url: {}".format(ls_url))
client = MobileRestClient()
ls_db = client.create_database(ls_url, name="ls_db")
ls_user_channels = ["NBC"]
doc_with_att = document.create_doc(
doc_id="att_doc",
content={"sample_key": "sample_val"},
attachment_name="sample_text.txt",
channels=ls_user_channels,
)
doc = client.add_doc(url=ls_url, db=ls_db, doc=doc_with_att)
att = client.get_attachment(url=ls_url, db=ls_db, doc_id=doc["id"], attachment_name="sample_text.txt")
expected_text = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.\nUt enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.\nDuis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.\nExcepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum."
assert expected_text == att
示例3: test_load_balance_sanity
def test_load_balance_sanity(params_from_base_test_setup):
cluster_config = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf_name = "sync_gateway_default_functional_tests"
sg_conf_path = sync_gateway_config_path_for_mode(sg_conf_name, mode)
cluster_util = ClusterKeywords()
cluster_util.reset_cluster(
cluster_config=cluster_config,
sync_gateway_config=sg_conf_path
)
topology = cluster_util.get_cluster_topology(cluster_config)
admin_sg_one = topology["sync_gateways"][0]["admin"]
lb_url = topology["load_balancers"][0]
sg_db = "db"
num_docs = 1000
sg_user_name = "seth"
sg_user_password = "password"
channels = ["ABC", "CBS"]
client = MobileRestClient()
user = client.create_user(admin_sg_one, sg_db, sg_user_name, sg_user_password, channels=channels)
session = client.create_session(admin_sg_one, sg_db, sg_user_name)
log_info(user)
log_info(session)
log_info("Adding docs to the load balancer ...")
ct = ChangesTracker(url=lb_url, db=sg_db, auth=session)
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
log_info("Starting ...")
ct_task = executor.submit(ct.start)
log_info("Adding docs ...")
docs = client.add_docs(lb_url, sg_db, num_docs, "test_doc", channels=channels, auth=session)
assert len(docs) == num_docs
log_info("Adding docs done")
wait_for_changes = executor.submit(ct.wait_until, docs)
if wait_for_changes.result():
log_info("Stopping ...")
log_info("Found all docs ...")
executor.submit(ct.stop)
ct_task.result()
else:
executor.submit(ct.stop)
ct_task.result()
raise Exception("Could not find all changes in feed before timeout!!")
示例4: test_net_msft_storage_engine
def test_net_msft_storage_engine(request, liteserv_with_storage_engine_from_fixture):
liteserv = liteserv_with_storage_engine_from_fixture
test_name = request.node.name
logfile = "{}/logs/{}-{}-{}.txt".format(RESULTS_DIR, type(liteserv).__name__, test_name, datetime.datetime.now())
ls_url = liteserv.start(logfile)
client = MobileRestClient()
client.create_database(ls_url, "ls_db")
liteserv.stop()
storage_engine = liteserv.storage_engine
log_info("Testing storage_engine: {}".format(storage_engine))
with open(logfile, "r") as f:
contents = f.read()
if storage_engine == "SQLite":
# Note: SQLite mode uses SQLCipher by default
assert (
"Using Couchbase.Lite.Storage.SQLCipher.SqliteCouchStore for db at C:\Users\user\Desktop\LiteServ\ls_db.cblite2"
in contents
)
assert "encryption key given" not in contents
elif storage_engine == "SQLCipher":
assert (
"Using Couchbase.Lite.Storage.SQLCipher.SqliteCouchStore for db at C:\Users\user\Desktop\LiteServ\ls_db.cblite2"
in contents
)
assert "Open C:\Users\user\Desktop\LiteServ\ls_db.cblite2\db.sqlite3" in contents
assert "encryption key given"
elif storage_engine == "ForestDB":
assert (
"Using Couchbase.Lite.Storage.ForestDB.ForestDBCouchStore for db at C:\Users\user\Desktop\LiteServ\ls_db.cblite2"
in contents
)
assert "Database is encrypted; setting CBForest encryption key" not in contents
elif storage_engine == "ForestDB+Encryption":
assert (
"Using Couchbase.Lite.Storage.ForestDB.ForestDBCouchStore for db at C:\Users\user\Desktop\LiteServ\ls_db.cblite2"
in contents
)
assert "Database is encrypted; setting CBForest encryption key" in contents
else:
pytest.xfail("Invalid Storage Engine")
示例5: test_setting_expiry_in_bulk_docs
def test_setting_expiry_in_bulk_docs(params_from_base_test_setup, sg_conf_name):
"""
1. PUT /db/_bulk_docs with 10 documents. Set the "_exp":3 on 5 of these documents
2. Wait five seconds
3. POST /db/_bulk_get for the 10 documents. Validate that only the 5 non-expiring documents are returned
"""
cluster_config = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
cluster_helper = ClusterKeywords()
topology = cluster_helper.get_cluster_topology(cluster_config)
cluster_helper.reset_cluster(
cluster_config=cluster_config,
sync_gateway_config=sg_conf
)
cbs_url = topology["couchbase_servers"][0]
sg_url = topology["sync_gateways"][0]["public"]
sg_url_admin = topology["sync_gateways"][0]["admin"]
log_info("Running 'test_setting_expiry_in_bulk_docs'")
log_info("cbs_url: {}".format(cbs_url))
log_info("sg_url: {}".format(sg_url))
log_info("sg_url_admin: {}".format(sg_url_admin))
sg_db = "db"
sg_user_name = "sg_user"
sg_user_password = "[email protected]"
sg_user_channels = ["NBC", "ABC"]
client = MobileRestClient()
client.create_user(url=sg_url_admin, db=sg_db, name=sg_user_name, password=sg_user_password, channels=sg_user_channels)
sg_user_session = client.create_session(url=sg_url_admin, db=sg_db, name=sg_user_name)
doc_exp_3_bodies = document.create_docs(doc_id_prefix="exp_3", number=5, expiry=3, channels=sg_user_channels)
doc_exp_10_bodies = document.create_docs(doc_id_prefix="exp_10", number=5, expiry=10, channels=sg_user_channels)
bulk_bodies = doc_exp_3_bodies + doc_exp_10_bodies
bulk_docs = client.add_bulk_docs(url=sg_url, db=sg_db, docs=bulk_bodies, auth=sg_user_session)
# Allow exp_3 docs to expire
time.sleep(5)
bulk_get_docs = client.get_bulk_docs(url=sg_url, db=sg_db, docs=bulk_docs, auth=sg_user_session)
expected_ids = ["exp_10_0", "exp_10_1", "exp_10_2", "exp_10_3", "exp_10_4"]
expected_missing_ids = ["exp_3_0", "exp_3_1", "exp_3_2", "exp_3_3", "exp_3_4"]
client.verify_doc_ids_found_in_response(response=bulk_get_docs, expected_doc_ids=expected_ids)
client.verify_doc_ids_not_found_in_response(response=bulk_get_docs, expected_missing_doc_ids=expected_missing_ids)
示例6: test_removing_expiry
def test_removing_expiry(params_from_base_test_setup, sg_conf_name):
"""
1. PUT /db/doc1 via SG with property "_exp":3
2. Update /db/doc1 with a new revision with no expiry value
3. After 10 updates, update /db/doc1 with a revision with no expiry
"""
cluster_config = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
cluster_helper = ClusterKeywords()
topology = cluster_helper.get_cluster_topology(cluster_config)
cluster_helper.reset_cluster(
cluster_config=cluster_config,
sync_gateway_config=sg_conf
)
cbs_url = topology["couchbase_servers"][0]
sg_url = topology["sync_gateways"][0]["public"]
sg_url_admin = topology["sync_gateways"][0]["admin"]
log_info("Running 'test_removing_expiry'")
log_info("cbs_url: {}".format(cbs_url))
log_info("sg_url: {}".format(sg_url))
log_info("sg_url_admin: {}".format(sg_url_admin))
sg_db = "db"
sg_user_name = "sg_user"
sg_user_password = "[email protected]"
sg_user_channels = ["NBC", "ABC"]
client = MobileRestClient()
client.create_user(url=sg_url_admin, db=sg_db, name=sg_user_name, password=sg_user_password, channels=sg_user_channels)
sg_user_session = client.create_session(url=sg_url_admin, db=sg_db, name=sg_user_name)
doc_exp_3_body = document.create_doc(doc_id="exp_3", expiry=3, channels=sg_user_channels)
doc_exp_10_body = document.create_doc(doc_id="exp_10", expiry=10, channels=sg_user_channels)
doc_exp_3 = client.add_doc(url=sg_url, db=sg_db, doc=doc_exp_3_body, auth=sg_user_session)
doc_exp_10 = client.add_doc(url=sg_url, db=sg_db, doc=doc_exp_10_body, auth=sg_user_session)
doc_exp_3_updated = client.update_doc(url=sg_url, db=sg_db, doc_id=doc_exp_3["id"], number_updates=10, auth=sg_user_session)
# Sleep should allow an expiry to happen on doc_exp_3 if it had not been removed.
# Expected behavior is that the doc_exp_3 will still be around due to the removal of the expiry
time.sleep(5)
# doc_exp_3 should no longer have an expiry and should not raise an exception
doc_exp_3_updated_result = client.get_doc(url=sg_url, db=sg_db, doc_id=doc_exp_3_updated["id"], auth=sg_user_session)
assert doc_exp_3_updated_result["_id"] == "exp_3"
# doc_exp_10 should be available still and should not raise an exception
doc_exp_10_result = client.get_doc(url=sg_url, db=sg_db, doc_id=doc_exp_10["id"], auth=sg_user_session)
assert doc_exp_10_result["_id"] == "exp_10"
示例7: test_numeric_expiry_as_ttl
def test_numeric_expiry_as_ttl(params_from_base_test_setup, sg_conf_name):
"""
1. PUT /db/doc1 via SG with property "_exp":3
PUT /db/doc2 via SG with property "_exp":10
2. Wait five seconds
3. Get /db/doc1. Assert response is 404
Get /db/doc2. Assert response is 200
"""
cluster_config = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
cluster_helper = ClusterKeywords()
topology = cluster_helper.get_cluster_topology(cluster_config)
cluster_helper.reset_cluster(
cluster_config=cluster_config,
sync_gateway_config=sg_conf
)
cbs_url = topology["couchbase_servers"][0]
sg_url = topology["sync_gateways"][0]["public"]
sg_url_admin = topology["sync_gateways"][0]["admin"]
log_info("Running 'test_numeric_expiry_as_ttl'")
log_info("cbs_url: {}".format(cbs_url))
log_info("sg_url: {}".format(sg_url))
log_info("sg_url_admin: {}".format(sg_url_admin))
sg_db = "db"
sg_user_name = "sg_user"
sg_user_password = "[email protected]"
sg_user_channels = ["NBC", "ABC"]
client = MobileRestClient()
client.create_user(url=sg_url_admin, db=sg_db, name=sg_user_name, password=sg_user_password, channels=sg_user_channels)
sg_user_session = client.create_session(url=sg_url_admin, db=sg_db, name=sg_user_name)
doc_exp_3_body = document.create_doc(doc_id="exp_3", expiry=3, channels=sg_user_channels)
doc_exp_10_body = document.create_doc(doc_id="exp_10", expiry=10, channels=sg_user_channels)
doc_exp_3 = client.add_doc(url=sg_url, db=sg_db, doc=doc_exp_3_body, auth=sg_user_session)
doc_exp_10 = client.add_doc(url=sg_url, db=sg_db, doc=doc_exp_10_body, auth=sg_user_session)
# Sleep should allow doc_exp_3 to expire, but still be in the window to get doc_exp_10
time.sleep(5)
# doc_exp_3 should be expired
with pytest.raises(HTTPError) as he:
client.get_doc(url=sg_url, db=sg_db, doc_id=doc_exp_3["id"], auth=sg_user_session)
assert he.value[0].startswith("404 Client Error: Not Found for url:")
# doc_exp_10 should be available still
doc_exp_10_result = client.get_doc(url=sg_url, db=sg_db, doc_id=doc_exp_10["id"], auth=sg_user_session)
assert doc_exp_10_result["_id"] == "exp_10"
示例8: setup_client_syncgateway_test
def setup_client_syncgateway_test(request, setup_client_syncgateway_suite):
"""Test setup fixture for client sync_gateway tests"""
log_info("Setting up client sync_gateway test ...")
liteserv = setup_client_syncgateway_suite
test_name = request.node.name
ls_url = liteserv.start(
"{}/logs/{}-{}-{}.txt".format(RESULTS_DIR, type(liteserv).__name__, test_name, datetime.datetime.now())
)
cluster_helper = ClusterKeywords()
sg_helper = SyncGateway()
cluster_hosts = cluster_helper.get_cluster_topology(os.environ["CLUSTER_CONFIG"])
sg_url = cluster_hosts["sync_gateways"][0]["public"]
sg_admin_url = cluster_hosts["sync_gateways"][0]["admin"]
sg_helper.stop_sync_gateway(cluster_config=os.environ["CLUSTER_CONFIG"], url=sg_url)
# Yield values to test case via fixture argument
yield {
"cluster_config": os.environ["CLUSTER_CONFIG"],
"ls_url": ls_url,
"sg_url": sg_url,
"sg_admin_url": sg_admin_url,
}
log_info("Tearing down test")
# Teardown test
client = MobileRestClient()
client.delete_databases(ls_url)
liteserv.stop()
sg_helper.stop_sync_gateway(cluster_config=os.environ["CLUSTER_CONFIG"], url=sg_url)
# if the test failed pull logs
if request.node.rep_call.failed:
logging_helper = Logging()
logging_helper.fetch_and_analyze_logs(cluster_config=os.environ["CLUSTER_CONFIG"], test_name=test_name)
示例9: test_android_storage_engine
def test_android_storage_engine(request, liteserv_with_storage_engine_from_fixture):
liteserv = liteserv_with_storage_engine_from_fixture
test_name = request.node.name
logfile = "{}/logs/{}-{}-{}.txt".format(RESULTS_DIR, type(liteserv).__name__, test_name, datetime.datetime.now())
ls_url = liteserv.start(logfile)
client = MobileRestClient()
client.create_database(ls_url, "ls_db")
liteserv.stop()
# Look in adb logcat to see if output match platform / storage engine expectation
# We can't look at the database files directly to my knowledge without a rooted device
liteserv_output = []
with open(logfile, "r") as f:
lines = f.readlines()
for line in lines:
if "LiteServ" in line:
line = line.strip()
liteserv_output.append(line)
log_info(liteserv_output)
if liteserv.storage_engine == "SQLite":
assert len(liteserv_output) == 4
assert liteserv_output[0].endswith("storageType=SQLite")
assert liteserv_output[1].endswith("dbpassword=")
elif liteserv.storage_engine == "SQLCipher":
assert len(liteserv_output) == 4
assert liteserv_output[0].endswith("storageType=SQLite")
assert liteserv_output[1].endswith("dbpassword=ls_db:pass,ls_db1:pass,ls_db2:pass")
elif liteserv.storage_engine == "ForestDB":
assert len(liteserv_output) == 4
assert liteserv_output[0].endswith("storageType=ForestDB")
assert liteserv_output[1].endswith("dbpassword=")
elif liteserv.storage_engine == "ForestDB+Encryption":
assert len(liteserv_output) == 4
assert liteserv_output[0].endswith("storageType=ForestDB")
assert liteserv_output[1].endswith("dbpassword=ls_db:pass,ls_db1:pass,ls_db2:pass")
else:
pytest.xfail("Invalid storage engine")
示例10: test_writing_attachment_to_couchbase_server
def test_writing_attachment_to_couchbase_server(params_from_base_test_setup, sg_conf_name):
"""
1. Start sync_gateway with sync function that rejects all writes:
function(doc, oldDoc) {
throw({forbidden:"No writes!"});
}
2. Create a doc with attachment
3. Use CBS sdk to see if attachment doc exists. Doc ID will look like _sync:att:sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0= (where the suffix is the digest)
4. Assert att doc does not exist
"""
cluster_config = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
cluster_helper = ClusterKeywords()
cluster_helper.reset_cluster(cluster_config, sg_conf)
topology = cluster_helper.get_cluster_topology(cluster_config)
cbs_url = topology["couchbase_servers"][0]
sg_url = topology["sync_gateways"][0]["public"]
sg_url_admin = topology["sync_gateways"][0]["admin"]
sg_db = "db"
bucket = "data-bucket"
log_info("Running 'test_writing_attachment_to_couchbase_server'")
log_info("Using cbs_url: {}".format(cbs_url))
log_info("Using sg_url: {}".format(sg_url))
log_info("Using sg_url_admin: {}".format(sg_url_admin))
log_info("Using sg_db: {}".format(sg_db))
log_info("Using bucket: {}".format(bucket))
sg_user_name = "sg_user"
sg_user_password = "sg_user_password"
sg_user_channels = ["NBC"]
client = MobileRestClient()
client.create_user(url=sg_url_admin, db=sg_db, name=sg_user_name, password=sg_user_password, channels=sg_user_channels)
sg_user_session = client.create_session(url=sg_url_admin, db=sg_db, name=sg_user_name)
docs = client.add_docs(url=sg_url, db=sg_db, number=100, id_prefix=sg_db, channels=sg_user_channels, auth=sg_user_session)
assert len(docs) == 100
# Create doc with attachment and push to sync_gateway
doc_with_att = document.create_doc(doc_id="att_doc", content={"sample_key": "sample_val"}, attachment_name="sample_text.txt", channels=sg_user_channels)
client.add_doc(url=sg_url, db=sg_db, doc=doc_with_att, auth=sg_user_session)
server = CouchbaseServer(cbs_url)
# Assert that the attachment doc gets written to couchbase server
server_att_docs = server.get_server_docs_with_prefix(bucket=bucket, prefix="_sync:att:")
num_att_docs = len(server_att_docs)
assert num_att_docs == 1
示例11: test_peer_2_peer_sanity_pull
def test_peer_2_peer_sanity_pull(setup_p2p_test):
"""
1. Create ls_db1 database on LiteServ One
2. Create ls_db2 database on LiteServ Two
3. Create continuous pull replication LiteServ 1 ls_db1 <- LiteServ 2 ls_db2
4. Add 5000 docs to LiteServ 2 ls_db2
5. Verify all docs replicate to LiteServ 1 ls_db1
6. Verify all docs show up in changes for LiteServ 1 ls_db1
"""
ls_url_one = setup_p2p_test["ls_url_one"]
ls_url_two = setup_p2p_test["ls_url_two"]
num_docs_per_db = 5000
log_info("ls_url_one: {}".format(ls_url_one))
log_info("ls_url_two: {}".format(ls_url_two))
client = MobileRestClient()
log_info("Creating databases")
ls_db1 = client.create_database(url=ls_url_one, name="ls_db1")
ls_db2 = client.create_database(url=ls_url_two, name="ls_db2")
# Setup continuous pull replication from LiteServ 2 ls_db2 to LiteServ 1 ls_db1
pull_repl = client.start_replication(
url=ls_url_one,
continuous=True,
from_url=ls_url_two, from_db=ls_db2,
to_db=ls_db1
)
client.wait_for_replication_status_idle(url=ls_url_one, replication_id=pull_repl)
ls_db2_docs = client.add_docs(url=ls_url_two, db=ls_db2, number=num_docs_per_db, id_prefix="test_ls_db2")
assert len(ls_db2_docs) == num_docs_per_db
client.verify_docs_present(url=ls_url_one, db=ls_db1, expected_docs=ls_db2_docs)
client.verify_docs_in_changes(url=ls_url_one, db=ls_db1, expected_docs=ls_db2_docs)
示例12: test_longpoll_changes_termination_heartbeat
def test_longpoll_changes_termination_heartbeat(setup_client_syncgateway_test):
"""https://github.com/couchbase/couchbase-lite-java-core/issues/1296
Create 30 longpoll _changes in a loop (with heartbeat parameter = 5s)
Cancel the request after 2s
Wait 5.1s
Create another request GET /db/ on listener and make sure the listener responds
"""
log_info("Running 'longpoll_changes_termination' ...")
ls_db = "ls_db"
cluster_config = setup_client_syncgateway_test["cluster_config"]
sg_url = setup_client_syncgateway_test["sg_url"]
ls_url = setup_client_syncgateway_test["ls_url"]
log_info("Running 'test_longpoll_changes_termination' ...")
log_info("ls_url: {}".format(ls_url))
log_info("sg_url: {}".format(sg_url))
sg_helper = SyncGateway()
sg_helper.start_sync_gateway(
cluster_config=cluster_config,
url=sg_url,
config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS)
)
client = MobileRestClient()
if client.get_server_platform(ls_url) == Platform.macosx:
pytest.skip("https://github.com/couchbase/couchbase-lite-ios/issues/1236")
client.create_database(ls_url, ls_db)
ct = ChangesTracker(ls_url, ls_db)
with ThreadPoolExecutor(max_workers=35) as executor:
futures = [executor.submit(
ct.start,
timeout=5000,
heartbeat=5000,
request_timeout=2000
) for _ in range(30)]
for futures in as_completed(futures):
log_info("Future _changes loop complete")
log_info("Futures exited")
# make sure client can still take connections
dbs = client.get_databases(url=ls_url)
log_info(dbs)
database = client.get_database(url=ls_url, db_name=ls_db)
log_info(database)
示例13: test_ios_full_life_cycle
def test_ios_full_life_cycle(request, liteserv_with_storage_engine_from_fixture):
liteserv = liteserv_with_storage_engine_from_fixture
test_name = request.node.name
logfile = "{}/logs/{}-{}-{}.txt".format(RESULTS_DIR, type(liteserv).__name__, test_name, datetime.datetime.now())
ls_url = liteserv.start(logfile)
client = MobileRestClient()
client.create_database(ls_url, "ls_db")
docs = client.add_docs(ls_url, db="ls_db", number=10, id_prefix="test_doc")
assert len(docs) == 10
client.delete_databases(ls_url)
liteserv.stop()
示例14: test_auto_prune_listener_sanity
def test_auto_prune_listener_sanity(setup_client_syncgateway_test):
"""Sanity test for the autoprune feature
1. Create a db and put a doc
2. Update the docs past the default revs_limit (20)
3. Assert the the docs only retain 20 revs
"""
ls_url = setup_client_syncgateway_test["ls_url"]
client = MobileRestClient()
log_info("Running 'test_auto_prune_listener_sanity' ...")
log_info("ls_url: {}".format(ls_url))
num_docs = 1
num_revs = 100
ls_db = client.create_database(url=ls_url, name="ls_db")
docs = client.add_docs(url=ls_url, db=ls_db, number=num_docs, id_prefix="ls_db_doc")
assert len(docs) == num_docs
client.update_docs(url=ls_url, db=ls_db, docs=docs, number_updates=num_revs)
client.verify_max_revs_num_for_docs(url=ls_url, db=ls_db, docs=docs, expected_max_number_revs_per_doc=20)
示例15: test_multiple_replications_created_with_unique_properties
def test_multiple_replications_created_with_unique_properties(setup_client_syncgateway_test):
"""Regression test for couchbase/couchbase-lite-java-core#1386
1. Setup SGW with a remote database name db for an example
2. Create a local database such as ls_db
3. Send POST /_replicate with source = ls_db, target = http://localhost:4985/db, continuous = true
4. Send POST /_replicate with source = ls_db, target = http://localhost:4985/db, continuous = true, doc_ids=["doc1", "doc2"]
5. Send POST /_replicate with source = ls_db, target = http://localhost:4985/db, continuous = true, filter="filter1"
6. Make sure that the session_id from each POST /_replicate are different.
7. Send GET /_active_tasks to make sure that there are 3 tasks created.
8. Send 3 POST /_replicate withe the same parameter as Step 3=5 plus cancel=true to stop those replicators
9. Repeat Step 3 - 8 with source = and target = db for testing the pull replicator.
"""
sg_db = "db"
ls_db = "ls_db"
cluster_config = setup_client_syncgateway_test["cluster_config"]
ls_url = setup_client_syncgateway_test["ls_url"]
sg_one_admin = setup_client_syncgateway_test["sg_admin_url"]
sg_one_public = setup_client_syncgateway_test["sg_url"]
sg_helper = SyncGateway()
sg_helper.start_sync_gateway(
cluster_config=cluster_config,
url=sg_one_public,
config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS)
)
log_info("Running 'test_multiple_replications_created_with_unique_properties'")
log_info("ls_url: {}".format(ls_url))
log_info("sg_one_admin: {}".format(sg_one_admin))
log_info("sg_one_public: {}".format(sg_one_public))
client = MobileRestClient()
client.create_database(url=ls_url, name=ls_db)
########
# PUSH #
########
# Start 3 unique push replication requests
repl_one = client.start_replication(
url=ls_url,
continuous=True,
from_db=ls_db,
to_url=sg_one_admin,
to_db=sg_db
)
client.wait_for_replication_status_idle(ls_url, repl_one)
repl_two = client.start_replication(
url=ls_url,
continuous=True,
from_db=ls_db,
to_url=sg_one_admin,
to_db=sg_db,
doc_ids=["doc_1", "doc_2"]
)
client.wait_for_replication_status_idle(ls_url, repl_two)
# Create doc filter and add to the design doc
filters = {
"language": "javascript",
"filters": {
"sample_filter": "function(doc, req) { if (doc.type && doc.type === \"skip\") { return false; } return true; }"
}
}
client.add_design_doc(url=ls_url, db=ls_db, name="by_type", doc=json.dumps(filters))
repl_three = client.start_replication(
url=ls_url,
continuous=True,
from_db=ls_db,
to_url=sg_one_admin,
to_db=sg_db,
repl_filter="by_type/sample_filter"
)
client.wait_for_replication_status_idle(ls_url, repl_three)
# Verify 3 replicaitons are running
replications = client.get_replications(ls_url)
log_info(replications)
assert len(replications) == 3, "Number of replications, Expected: {} Actual: {}".format(
3,
len(replications)
)
# Stop repl001
client.stop_replication(
url=ls_url,
continuous=True,
from_db=ls_db,
to_url=sg_one_admin,
to_db=sg_db
)
# Stop repl002
client.stop_replication(
url=ls_url,
continuous=True,
from_db=ls_db,
#.........这里部分代码省略.........