本文整理汇总了Python中keywords.MobileRestClient.MobileRestClient.get_replications方法的典型用法代码示例。如果您正苦于以下问题:Python MobileRestClient.get_replications方法的具体用法?Python MobileRestClient.get_replications怎么用?Python MobileRestClient.get_replications使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keywords.MobileRestClient.MobileRestClient
的用法示例。
在下文中一共展示了MobileRestClient.get_replications方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_replication_with_session_cookie
# 需要导入模块: from keywords.MobileRestClient import MobileRestClient [as 别名]
# 或者: from keywords.MobileRestClient.MobileRestClient import get_replications [as 别名]
def test_replication_with_session_cookie(setup_client_syncgateway_test):
"""Regression test for https://github.com/couchbase/couchbase-lite-android/issues/817
1. SyncGateway Config with guest disabled = true and One user added (e.g. user1 / 1234)
2. Create a new session on SGW for the user1 by using POST /_session.
Capture the SyncGatewaySession cookie from the set-cookie in the response header.
3. Start continuous push and pull replicator on the LiteServ with SyncGatewaySession cookie.
Make sure that both replicators start correctly
4. Delete the session from SGW by sending DELETE /_sessions/ to SGW
5. Cancel both push and pull replicator on the LiteServ
6. Repeat step 1 and 2
"""
ls_db = "ls_db"
sg_db = "db"
cluster_config = setup_client_syncgateway_test["cluster_config"]
ls_url = setup_client_syncgateway_test["ls_url"]
sg_url = setup_client_syncgateway_test["sg_url"]
sg_admin_url = setup_client_syncgateway_test["sg_admin_url"]
sg_helper = SyncGateway()
sg_helper.start_sync_gateway(
cluster_config=cluster_config,
url=sg_url,
config="{}/walrus-user.json".format(SYNC_GATEWAY_CONFIGS)
)
log_info("Running 'test_replication_with_session_cookie'")
log_info("ls_url: {}".format(ls_url))
log_info("sg_admin_url: {}".format(sg_admin_url))
log_info("sg_url: {}".format(sg_url))
client = MobileRestClient()
client.create_database(url=ls_url, name=ls_db)
# Get session header for user_1
session_header = client.create_session_header(url=sg_url, db=sg_db, name="user_1", password="foo")
# Get session id from header
session_parts = re.split("=|;", session_header)
session_id = session_parts[1]
log_info("{}: {}".format(session_parts[0], session_id))
session = (session_parts[0], session_id)
# Start authenticated push replication
repl_one = client.start_replication(
url=ls_url,
continuous=True,
from_db=ls_db,
to_url=sg_url,
to_db=sg_db,
to_auth=session_header
)
# Start authenticated pull replication
repl_two = client.start_replication(
url=ls_url,
continuous=True,
from_url=sg_url,
from_db=sg_db,
from_auth=session_header,
to_db=ls_db,
)
# Wait for 2 replications to be 'Idle', On .NET they may not be immediately available via _active_tasks
client.wait_for_replication_status_idle(ls_url, repl_one)
client.wait_for_replication_status_idle(ls_url, repl_two)
replications = client.get_replications(ls_url)
assert len(replications) == 2, "2 replications (push / pull should be running)"
num_docs_pushed = 100
# Sanity test docs
ls_docs = client.add_docs(url=ls_url, db=ls_db, number=num_docs_pushed, id_prefix="ls_doc", channels=["ABC"])
assert len(ls_docs) == num_docs_pushed
sg_docs = client.add_docs(url=sg_url, db=sg_db, number=num_docs_pushed, id_prefix="sg_doc", auth=session, channels=["ABC"])
assert len(sg_docs) == num_docs_pushed
all_docs = client.merge(ls_docs, sg_docs)
log_info(all_docs)
client.verify_docs_present(url=sg_admin_url, db=sg_db, expected_docs=all_docs)
client.verify_docs_present(url=ls_url, db=ls_db, expected_docs=all_docs)
# GET from session endpoint /{db}/_session/{session-id}
session = client.get_session(url=sg_admin_url, db=sg_db, session_id=session_id)
assert len(session["userCtx"]["channels"]) == 2, "There should be only 2 channels for the user"
assert "ABC" in session["userCtx"]["channels"], "The channel info should contain 'ABC'"
assert session["userCtx"]["name"] == "user_1", "The user should have the name 'user_1'"
assert len(session["authentication_handlers"]) == 2, "There should be 2 authentication_handlers"
assert "default" in session["authentication_handlers"], "Did not find 'default' in authentication_headers"
assert "cookie" in session["authentication_handlers"], "Did not find 'cookie' in authentication_headers"
log_info("SESSIONs: {}".format(session))
# Delete session via sg admin port and _user rest endpoint
client.delete_session(url=sg_admin_url, db=sg_db, user_name="user_1", session_id=session_id)
#.........这里部分代码省略.........
示例2: test_multiple_replications_created_with_unique_properties
# 需要导入模块: from keywords.MobileRestClient import MobileRestClient [as 别名]
# 或者: from keywords.MobileRestClient.MobileRestClient import get_replications [as 别名]
def test_multiple_replications_created_with_unique_properties(setup_client_syncgateway_test):
"""Regression test for couchbase/couchbase-lite-java-core#1386
1. Setup SGW with a remote database name db for an example
2. Create a local database such as ls_db
3. Send POST /_replicate with source = ls_db, target = http://localhost:4985/db, continuous = true
4. Send POST /_replicate with source = ls_db, target = http://localhost:4985/db, continuous = true, doc_ids=["doc1", "doc2"]
5. Send POST /_replicate with source = ls_db, target = http://localhost:4985/db, continuous = true, filter="filter1"
6. Make sure that the session_id from each POST /_replicate are different.
7. Send GET /_active_tasks to make sure that there are 3 tasks created.
8. Send 3 POST /_replicate withe the same parameter as Step 3=5 plus cancel=true to stop those replicators
9. Repeat Step 3 - 8 with source = and target = db for testing the pull replicator.
"""
sg_db = "db"
ls_db = "ls_db"
cluster_config = setup_client_syncgateway_test["cluster_config"]
ls_url = setup_client_syncgateway_test["ls_url"]
sg_one_admin = setup_client_syncgateway_test["sg_admin_url"]
sg_one_public = setup_client_syncgateway_test["sg_url"]
sg_helper = SyncGateway()
sg_helper.start_sync_gateway(
cluster_config=cluster_config,
url=sg_one_public,
config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS)
)
log_info("Running 'test_multiple_replications_created_with_unique_properties'")
log_info("ls_url: {}".format(ls_url))
log_info("sg_one_admin: {}".format(sg_one_admin))
log_info("sg_one_public: {}".format(sg_one_public))
client = MobileRestClient()
client.create_database(url=ls_url, name=ls_db)
########
# PUSH #
########
# Start 3 unique push replication requests
repl_one = client.start_replication(
url=ls_url,
continuous=True,
from_db=ls_db,
to_url=sg_one_admin,
to_db=sg_db
)
client.wait_for_replication_status_idle(ls_url, repl_one)
repl_two = client.start_replication(
url=ls_url,
continuous=True,
from_db=ls_db,
to_url=sg_one_admin,
to_db=sg_db,
doc_ids=["doc_1", "doc_2"]
)
client.wait_for_replication_status_idle(ls_url, repl_two)
# Create doc filter and add to the design doc
filters = {
"language": "javascript",
"filters": {
"sample_filter": "function(doc, req) { if (doc.type && doc.type === \"skip\") { return false; } return true; }"
}
}
client.add_design_doc(url=ls_url, db=ls_db, name="by_type", doc=json.dumps(filters))
repl_three = client.start_replication(
url=ls_url,
continuous=True,
from_db=ls_db,
to_url=sg_one_admin,
to_db=sg_db,
repl_filter="by_type/sample_filter"
)
client.wait_for_replication_status_idle(ls_url, repl_three)
# Verify 3 replicaitons are running
replications = client.get_replications(ls_url)
log_info(replications)
assert len(replications) == 3, "Number of replications, Expected: {} Actual: {}".format(
3,
len(replications)
)
# Stop repl001
client.stop_replication(
url=ls_url,
continuous=True,
from_db=ls_db,
to_url=sg_one_admin,
to_db=sg_db
)
# Stop repl002
client.stop_replication(
url=ls_url,
continuous=True,
from_db=ls_db,
#.........这里部分代码省略.........
示例3: test_initial_pull_replication
# 需要导入模块: from keywords.MobileRestClient import MobileRestClient [as 别名]
# 或者: from keywords.MobileRestClient.MobileRestClient import get_replications [as 别名]
def test_initial_pull_replication(setup_client_syncgateway_test, continuous):
"""
1. Prepare sync-gateway to have 10000 documents.
2. Create a single shot / continuous pull replicator and to pull the docs into a database.
3. Verify if all of the docs get pulled.
Referenced issue: couchbase/couchbase-lite-android#955.
"""
sg_db = "db"
ls_db = "ls_db"
num_docs = 10000
cluster_config = setup_client_syncgateway_test["cluster_config"]
ls_url = setup_client_syncgateway_test["ls_url"]
sg_one_admin = setup_client_syncgateway_test["sg_admin_url"]
sg_one_public = setup_client_syncgateway_test["sg_url"]
sg_helper = SyncGateway()
sg_helper.start_sync_gateway(
cluster_config=cluster_config,
url=sg_one_public,
config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS)
)
log_info("Running 'test_initial_pull_replication', continuous: {}".format(continuous))
log_info("ls_url: {}".format(ls_url))
log_info("sg_one_admin: {}".format(sg_one_admin))
log_info("sg_one_public: {}".format(sg_one_public))
client = MobileRestClient()
client.create_user(sg_one_admin, sg_db, "seth", password="password", channels=["ABC", "NBC"])
session = client.create_session(sg_one_admin, sg_db, "seth")
# Create 'num_docs' docs on sync_gateway
docs = client.add_docs(
url=sg_one_public,
db=sg_db,
number=num_docs,
id_prefix="seeded_doc",
generator="four_k",
auth=session
)
assert len(docs) == num_docs
client.create_database(url=ls_url, name=ls_db)
# Start oneshot pull replication
repl_id = client.start_replication(
url=ls_url,
continuous=continuous,
from_url=sg_one_admin,
from_db=sg_db,
to_db=ls_db
)
start = time.time()
if continuous:
log_info("Waiting for replication status 'Idle' for: {}".format(repl_id))
# Android will report IDLE status, and drop into the 'verify_docs_present' below
# due to https://github.com/couchbase/couchbase-lite-java-core/issues/1409
client.wait_for_replication_status_idle(ls_url, repl_id)
else:
log_info("Waiting for no replications: {}".format(repl_id))
client.wait_for_no_replications(ls_url)
# Verify docs replicated to client
client.verify_docs_present(url=ls_url, db=ls_db, expected_docs=docs, timeout=240)
all_docs_replicated_time = time.time() - start
log_info("Replication took: {}s".format(all_docs_replicated_time))
# Verify docs show up in client's changes feed
client.verify_docs_in_changes(url=ls_url, db=ls_db, expected_docs=docs)
replications = client.get_replications(url=ls_url)
if continuous:
assert len(replications) == 1, "There should only be one replication running"
assert replications[0]["status"] == "Idle", "Replication Status should be 'Idle'"
assert replications[0]["continuous"], "Running replication should be continuous"
# Only .NET has an 'error' property
if "error" in replications[0]:
assert len(replications[0]["error"]) == 0
else:
assert len(replications) == 0, "No replications should be running"
示例4: test_multiple_replications_not_created_with_same_properties
# 需要导入模块: from keywords.MobileRestClient import MobileRestClient [as 别名]
# 或者: from keywords.MobileRestClient.MobileRestClient import get_replications [as 别名]
def test_multiple_replications_not_created_with_same_properties(setup_client_syncgateway_test):
"""Regression test for https://github.com/couchbase/couchbase-lite-android/issues/939
1. Create LiteServ database and launch sync_gateway with database
2. Start 5 continuous push replicators with the same source and target
3. Make sure the sample replication id is returned
4. Check that 1 one replication exists in 'active_tasks'
5. Stop the replication with POST /_replicate cancel=true
6. Start 5 continuous pull replicators with the same source and target
7. Make sure the sample replication id is returned
8. Check that 1 one replication exists in 'active_tasks'
9. Stop the replication with POST /_replicate cancel=true
"""
sg_db = "db"
ls_db = "ls_db"
cluster_config = setup_client_syncgateway_test["cluster_config"]
ls_url = setup_client_syncgateway_test["ls_url"]
sg_one_admin = setup_client_syncgateway_test["sg_admin_url"]
sg_one_public = setup_client_syncgateway_test["sg_url"]
sg_helper = SyncGateway()
sg_helper.start_sync_gateway(
cluster_config=cluster_config,
url=sg_one_public,
config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS)
)
log_info("Running 'test_multiple_replications_not_created_with_same_properties'")
log_info("ls_url: {}".format(ls_url))
log_info("sg_one_admin: {}".format(sg_one_admin))
log_info("sg_one_public: {}".format(sg_one_public))
client = MobileRestClient()
client.create_database(url=ls_url, name=ls_db)
repl_id_num = 0
response_one_id_num = 0
response_two_id_num = 0
# launch 50 concurrent push replication requests with the same source / target
with ThreadPoolExecutor(max_workers=10) as executor:
futures = [executor.submit(
client.start_replication,
url=ls_url,
continuous=True,
from_db=ls_db,
to_url=sg_one_admin,
to_db=sg_db
) for _ in range(50)]
for future in as_completed(futures):
response_one_id = future.result()
# Convert session_id from string "repl001" -> int 1
response_one_id_num = int(response_one_id.replace("repl", ""))
log_info(response_one_id_num)
# Assert that concurrent replications have a greater session id than 0
assert response_one_id_num > repl_id_num, "'response_one_id_num': {} should be greater than 'repl_id_num': {}".format(
response_one_id_num,
repl_id_num
)
# Check there is only one replication running
replications = client.get_replications(ls_url)
assert len(replications) == 1, "Number of replications, Expected: {} Actual {}".format(
1,
len(replications)
)
# Stop replication
client.stop_replication(
url=ls_url,
continuous=True,
from_db=ls_db,
to_url=sg_one_admin,
to_db=sg_db
)
# Check that no replications are running
client.wait_for_no_replications(ls_url)
replications = client.get_replications(ls_url)
assert len(replications) == 0, "Number of replications, Expected: {} Actual {}".format(
0,
len(replications)
)
# launch 50 concurrent pull replication requests with the same source / target
with ThreadPoolExecutor(max_workers=10) as executor:
futures = [executor.submit(
client.start_replication,
url=ls_url,
continuous=True,
from_db=sg_db,
from_url=sg_one_admin,
to_db=ls_db
) for _ in range(50)]
for future in as_completed(futures):
response_two_id = future.result()
#.........这里部分代码省略.........
示例5: test_initial_push_replication
# 需要导入模块: from keywords.MobileRestClient import MobileRestClient [as 别名]
# 或者: from keywords.MobileRestClient.MobileRestClient import get_replications [as 别名]
def test_initial_push_replication(setup_client_syncgateway_test, continuous):
"""
1. Prepare LiteServ to have 10000 documents.
2. Create a single shot push / continuous replicator and to push the docs into a sync_gateway database.
3. Verify if all of the docs get pushed.
"""
sg_db = "db"
ls_db = "ls_db"
seth_channels = ["ABC", "NBC"]
num_docs = 10000
cluster_config = setup_client_syncgateway_test["cluster_config"]
ls_url = setup_client_syncgateway_test["ls_url"]
sg_one_admin = setup_client_syncgateway_test["sg_admin_url"]
sg_one_public = setup_client_syncgateway_test["sg_url"]
sg_helper = SyncGateway()
sg_helper.start_sync_gateway(
cluster_config=cluster_config,
url=sg_one_public,
config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS)
)
log_info("Running 'test_initial_push_replication', continuous: {}".format(continuous))
log_info("ls_url: {}".format(ls_url))
log_info("sg_one_admin: {}".format(sg_one_admin))
log_info("sg_one_public: {}".format(sg_one_public))
client = MobileRestClient()
client.create_user(sg_one_admin, sg_db, "seth", password="password", channels=seth_channels)
session = client.create_session(sg_one_admin, sg_db, "seth")
client.create_database(url=ls_url, name=ls_db)
# Create 'num_docs' docs on LiteServ
docs = client.add_docs(
url=ls_url,
db=ls_db,
number=num_docs,
id_prefix="seeded_doc",
generator="four_k",
channels=seth_channels
)
assert len(docs) == num_docs
# Start push replication
repl_id = client.start_replication(
url=ls_url,
continuous=continuous,
from_db=ls_db,
to_url=sg_one_admin,
to_db=sg_db
)
if continuous:
log_info("Waiting for replication status 'Idle' for: {}".format(repl_id))
client.wait_for_replication_status_idle(ls_url, repl_id)
else:
log_info("Waiting for no replications: {}".format(repl_id))
client.wait_for_no_replications(ls_url)
# Verify docs replicated to sync_gateway
client.verify_docs_present(url=sg_one_public, db=sg_db, expected_docs=docs, auth=session)
# Verify docs show up in sync_gateway's changes feed
client.verify_docs_in_changes(url=sg_one_public, db=sg_db, expected_docs=docs, auth=session)
replications = client.get_replications(url=ls_url)
if continuous:
assert len(replications) == 1, "There should only be one replication running"
assert replications[0]["status"] == "Idle", "Replication Status should be 'Idle'"
assert replications[0]["continuous"], "Running replication should be continuous"
# Only .NET has an 'error' property
if "error" in replications[0]:
assert len(replications[0]["error"]) == 0
else:
assert len(replications) == 0, "No replications should be running"
示例6: test_peer_2_peer_sanity
# 需要导入模块: from keywords.MobileRestClient import MobileRestClient [as 别名]
# 或者: from keywords.MobileRestClient.MobileRestClient import get_replications [as 别名]
def test_peer_2_peer_sanity(setup_p2p_test):
"""
1. Sanity P2P Scenario
2. Launch LiteServ 1 and LiteServ 2
3. Create a database on each LiteServ
4. Start continuous push pull replication from each db to the other
5. Add docs to each db
6. Verify the docs show up at each db
7. Verify the docs show up in the database's changes feed.
"""
ls_url_one = setup_p2p_test["ls_url_one"]
ls_url_two = setup_p2p_test["ls_url_two"]
num_docs_per_db = 1000
log_info("ls_url_one: {}".format(ls_url_one))
log_info("ls_url_two: {}".format(ls_url_two))
client = MobileRestClient()
log_info("Creating databases")
ls_db1 = client.create_database(url=ls_url_one, name="ls_db1")
ls_db2 = client.create_database(url=ls_url_two, name="ls_db2")
# Setup continuous push / pull replication from LiteServ 1 ls_db1 to LiteServ 2 ls_db2
repl_one = client.start_replication(
url=ls_url_one,
continuous=True,
from_db=ls_db1,
to_url=ls_url_two, to_db=ls_db2
)
repl_two = client.start_replication(
url=ls_url_one,
continuous=True,
from_url=ls_url_two, from_db=ls_db2,
to_db=ls_db1
)
# Setup continuous push / pull replication from LiteServ 2 ls_db2 to LiteServ 1 ls_db1
repl_three = client.start_replication(
url=ls_url_two,
continuous=True,
from_db=ls_db2,
to_url=ls_url_one, to_db=ls_db1
)
repl_four = client.start_replication(
url=ls_url_two,
continuous=True,
from_url=ls_url_one, from_db=ls_db1,
to_db=ls_db2
)
client.wait_for_replication_status_idle(url=ls_url_one, replication_id=repl_one)
client.wait_for_replication_status_idle(url=ls_url_one, replication_id=repl_two)
client.wait_for_replication_status_idle(url=ls_url_two, replication_id=repl_three)
client.wait_for_replication_status_idle(url=ls_url_two, replication_id=repl_four)
ls_url_one_replications = client.get_replications(ls_url_one)
assert len(ls_url_one_replications) == 2
ls_url_two_replications = client.get_replications(ls_url_two)
assert len(ls_url_two_replications) == 2
ls_db1_docs = client.add_docs(url=ls_url_one, db=ls_db1, number=num_docs_per_db, id_prefix="test_ls_db1")
assert len(ls_db1_docs) == num_docs_per_db
ls_db2_docs = client.add_docs(url=ls_url_two, db=ls_db2, number=num_docs_per_db, id_prefix="test_ls_db2")
assert len(ls_db2_docs) == num_docs_per_db
all_docs = client.merge(ls_db1_docs, ls_db2_docs)
assert len(all_docs) == 2000
client.verify_docs_present(url=ls_url_one, db=ls_db1, expected_docs=all_docs)
client.verify_docs_present(url=ls_url_two, db=ls_db2, expected_docs=all_docs)
client.verify_docs_in_changes(url=ls_url_one, db=ls_db1, expected_docs=all_docs)
client.verify_docs_in_changes(url=ls_url_two, db=ls_db2, expected_docs=all_docs)