本文整理汇总了Python中libraries.testkit.cluster.Cluster.reset方法的典型用法代码示例。如果您正苦于以下问题:Python Cluster.reset方法的具体用法?Python Cluster.reset怎么用?Python Cluster.reset使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类libraries.testkit.cluster.Cluster
的用法示例。
在下文中一共展示了Cluster.reset方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_offline_false_config_rest
# 需要导入模块: from libraries.testkit.cluster import Cluster [as 别名]
# 或者: from libraries.testkit.cluster.Cluster import reset [as 别名]
def test_offline_false_config_rest(params_from_base_test_setup, sg_conf_name, num_docs):
cluster_conf = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
log_info("Using cluster_conf: {}".format(cluster_conf))
log_info("Using sg_conf: {}".format(sg_conf))
log_info("Using num_docs: {}".format(num_docs))
cluster = Cluster(config=cluster_conf)
cluster.reset(sg_config_path=sg_conf)
# all db endpoints should function as expected
errors = rest_scan(cluster.sync_gateways[0], db="db", online=True, num_docs=num_docs, user_name="seth", channels=["ABC"])
assert len(errors) == 0
# Scenario 4
# Check the db has an Online state at each running sync_gateway
for sg in cluster.sync_gateways:
admin = Admin(sg)
db_info = admin.get_db_info("db")
assert db_info["state"] == "Online"
示例2: test_offline_true_config_bring_online
# 需要导入模块: from libraries.testkit.cluster import Cluster [as 别名]
# 或者: from libraries.testkit.cluster.Cluster import reset [as 别名]
def test_offline_true_config_bring_online(params_from_base_test_setup, sg_conf_name, num_docs):
cluster_conf = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
log_info("Using cluster_conf: {}".format(cluster_conf))
log_info("Using sg_conf: {}".format(sg_conf))
log_info("Using num_docs: {}".format(num_docs))
cluster = Cluster(config=cluster_conf)
cluster.reset(sg_config_path=sg_conf)
admin = Admin(cluster.sync_gateways[0])
# all db endpoints should fail with 503
errors = rest_scan(cluster.sync_gateways[0], db="db", online=False, num_docs=num_docs, user_name="seth", channels=["ABC"])
assert len(errors) == NUM_ENDPOINTS + (num_docs * 2)
for error_tuple in errors:
log_info("({},{})".format(error_tuple[0], error_tuple[1]))
assert error_tuple[1] == 503
# Scenario 9
# POST /db/_online
status = admin.bring_db_online(db="db")
assert status == 200
# all db endpoints should succeed
errors = rest_scan(cluster.sync_gateways[0], db="db", online=True, num_docs=num_docs, user_name="seth", channels=["ABC"])
assert len(errors) == 0
示例3: test_online_to_offline_check_503
# 需要导入模块: from libraries.testkit.cluster import Cluster [as 别名]
# 或者: from libraries.testkit.cluster.Cluster import reset [as 别名]
def test_online_to_offline_check_503(params_from_base_test_setup, sg_conf_name, num_docs):
cluster_conf = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
log_info("Using cluster_conf: {}".format(cluster_conf))
log_info("Using sg_conf: {}".format(sg_conf))
log_info("Using num_docs: {}".format(num_docs))
cluster = Cluster(config=cluster_conf)
cluster.reset(sg_config_path=sg_conf)
admin = Admin(cluster.sync_gateways[0])
# all db endpoints should function as expected
errors = rest_scan(cluster.sync_gateways[0], db="db", online=True, num_docs=num_docs, user_name="seth", channels=["ABC"])
assert len(errors) == 0
# Take bucket offline
status = admin.take_db_offline(db="db")
assert status == 200
# all db endpoints should return 503
errors = rest_scan(cluster.sync_gateways[0], db="db", online=False, num_docs=num_docs, user_name="seth", channels=["ABC"])
# We hit NUM_ENDPOINT unique REST endpoints + num of doc PUT failures
assert len(errors) == NUM_ENDPOINTS + (num_docs * 2)
for error_tuple in errors:
log_info("({},{})".format(error_tuple[0], error_tuple[1]))
assert error_tuple[1] == 503
示例4: test_db_offline_tap_loss_sanity
# 需要导入模块: from libraries.testkit.cluster import Cluster [as 别名]
# 或者: from libraries.testkit.cluster.Cluster import reset [as 别名]
def test_db_offline_tap_loss_sanity(params_from_base_test_setup, sg_conf_name, num_docs):
cluster_conf = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
log_info("Using cluster_conf: {}".format(cluster_conf))
log_info("Using sg_conf: {}".format(sg_conf))
log_info("Using num_docs: {}".format(num_docs))
cluster = Cluster(config=cluster_conf)
cluster.reset(sg_config_path=sg_conf)
# all db rest enpoints should succeed
errors = rest_scan(cluster.sync_gateways[0], db="db", online=True, num_docs=num_docs, user_name="seth", channels=["ABC"])
assert len(errors) == 0
# Delete bucket to sever TAP feed
status = cluster.servers[0].delete_bucket("data-bucket")
assert status == 0
# Check that bucket is in offline state
errors = rest_scan(cluster.sync_gateways[0], db="db", online=False, num_docs=num_docs, user_name="seth", channels=["ABC"])
assert len(errors) == NUM_ENDPOINTS + (num_docs * 2)
for error_tuple in errors:
log_info("({},{})".format(error_tuple[0], error_tuple[1]))
assert error_tuple[1] == 503
示例5: test_seq
# 需要导入模块: from libraries.testkit.cluster import Cluster [as 别名]
# 或者: from libraries.testkit.cluster.Cluster import reset [as 别名]
def test_seq(params_from_base_test_setup, sg_conf_name, num_users, num_docs, num_revisions):
cluster_conf = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
log_info("Running seq")
log_info("cluster_conf: {}".format(cluster_conf))
log_info("sg_conf: {}".format(sg_conf))
log_info("num_users: {}".format(num_users))
log_info("num_docs: {}".format(num_docs))
log_info("num_revisions: {}".format(num_revisions))
cluster = Cluster(config=cluster_conf)
cluster.reset(sg_config_path=sg_conf)
admin = Admin(cluster.sync_gateways[0])
# all users will share docs due to having the same channel
users = admin.register_bulk_users(target=cluster.sync_gateways[0], db="db", name_prefix="user", number=num_users, password="password", channels=["ABC"])
for user in users:
user.add_docs(num_docs, bulk=True)
for user in users:
user.update_docs(num_revisions)
time.sleep(5)
user_0_changes = users[0].get_changes(since=0)
doc_seq = user_0_changes["results"][num_docs / 2]["seq"]
# https://github.com/couchbase/sync_gateway/issues/1475#issuecomment-172426052
# verify you can issue _changes with since=12313-0::1023.15
for user in users:
changes = user.get_changes(since=doc_seq)
log_info("Trying changes with since={}".format(doc_seq))
assert len(changes["results"]) > 0
second_to_last_doc_entry_seq = changes["results"][-2]["seq"]
last_doc_entry_seq = changes["results"][-1]["seq"]
log_info('Second to last doc "seq": {}'.format(second_to_last_doc_entry_seq))
log_info('Last doc "seq": {}'.format(last_doc_entry_seq))
if mode == "di":
# Verify last "seq" follows the formate 12313-0, not 12313-0::1023.15
log_info('Verify that the last "seq" is a plain hashed value')
assert len(second_to_last_doc_entry_seq.split("::")) == 2
assert len(last_doc_entry_seq.split("::")) == 1
elif mode == "cc":
assert second_to_last_doc_entry_seq > 0
assert last_doc_entry_seq > 0
else:
raise ValueError("Unsupported 'mode' !!")
all_doc_caches = [user.cache for user in users]
all_docs = {k: v for cache in all_doc_caches for k, v in cache.items()}
verify_changes(users, expected_num_docs=num_users * num_docs, expected_num_revisions=num_revisions, expected_docs=all_docs)
示例6: create_sync_gateways
# 需要导入模块: from libraries.testkit.cluster import Cluster [as 别名]
# 或者: from libraries.testkit.cluster.Cluster import reset [as 别名]
def create_sync_gateways(cluster_config, sg_config_path):
cluster = Cluster(config=cluster_config)
cluster.reset(sg_config_path=sg_config_path)
sg1 = cluster.sync_gateways[0]
sg2 = cluster.sync_gateways[1]
return sg1, sg2
示例7: test_db_online_offline_webhooks_offline_two
# 需要导入模块: from libraries.testkit.cluster import Cluster [as 别名]
# 或者: from libraries.testkit.cluster.Cluster import reset [as 别名]
def test_db_online_offline_webhooks_offline_two(params_from_base_test_setup, sg_conf_name, num_users, num_channels, num_docs, num_revisions):
start = time.time()
cluster_conf = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
log_info("Running 'test_db_online_offline_webhooks_offline_two'")
log_info("Using cluster_conf: {}".format(cluster_conf))
log_info("Using num_users: {}".format(num_users))
log_info("Using num_channels: {}".format(num_channels))
log_info("Using num_docs: {}".format(num_docs))
log_info("Using num_revisions: {}".format(num_revisions))
cluster = Cluster(config=cluster_conf)
cluster.reset(sg_conf)
init_completed = time.time()
log_info("Initialization completed. Time taken:{}s".format(init_completed - start))
channels = ["channel-" + str(i) for i in range(num_channels)]
password = "password"
ws = WebServer()
ws.start()
sgs = cluster.sync_gateways
admin = Admin(sgs[0])
# Register User
log_info("Register User")
user_objects = admin.register_bulk_users(target=sgs[0], db="db", name_prefix="User",
number=num_users, password=password, channels=channels)
# Add User
log_info("Add docs")
in_parallel(user_objects, 'add_docs', num_docs)
# Update docs
log_info("Update docs")
in_parallel(user_objects, 'update_docs', num_revisions)
time.sleep(10)
status = cluster.servers[0].delete_bucket("data-bucket")
assert status == 0
log_info("Sleeping for 120 seconds...")
time.sleep(120)
webhook_events = ws.get_data()
time.sleep(5)
log_info("webhook event {}".format(webhook_events))
last_event = webhook_events[-1]
assert last_event['state'] == 'offline'
ws.stop()
示例8: test_dcp_reshard_single_sg_accel_goes_down_and_up
# 需要导入模块: from libraries.testkit.cluster import Cluster [as 别名]
# 或者: from libraries.testkit.cluster.Cluster import reset [as 别名]
def test_dcp_reshard_single_sg_accel_goes_down_and_up(params_from_base_test_setup, sg_conf):
cluster_conf = params_from_base_test_setup["cluster_config"]
log_info("Running 'test_dcp_reshard_single_sg_accel_goes_down_and_up'")
log_info("cluster_conf: {}".format(cluster_conf))
log_info("sg_conf: {}".format(sg_conf))
cluster = Cluster(config=cluster_conf)
cluster.reset(sg_config_path=sg_conf)
# Stop the second sg_accel
stop_status = cluster.sg_accels[1].stop()
assert stop_status == 0, "Failed to stop sg_accel"
admin = Admin(cluster.sync_gateways[0])
traun = admin.register_user(target=cluster.sync_gateways[0], db="db", name="traun", password="password", channels=["ABC", "NBC", "CBS"])
seth = admin.register_user(target=cluster.sync_gateways[0], db="db", name="seth", password="password", channels=["FOX"])
log_info(">> Users added")
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
futures = dict()
log_info(">>> Adding Seth docs") # FOX
futures[executor.submit(seth.add_docs, 8000)] = "seth"
log_info(">>> Adding Traun docs") # ABC, NBC, CBS
futures[executor.submit(traun.add_docs, 10000, bulk=True)] = "traun"
# take down a sync_gateway
shutdown_status = cluster.sg_accels[0].stop()
assert shutdown_status == 0
# Add more docs while no writers are online
log_info(">>> Adding Seth docs") # FOX
futures[executor.submit(seth.add_docs, 2000, bulk=True)] = "seth"
# Start a single writer
start_status = cluster.sg_accels[0].start(sg_conf)
assert start_status == 0
for future in concurrent.futures.as_completed(futures):
tag = futures[future]
log_info("{} Completed:".format(tag))
# TODO better way to do this
time.sleep(120)
verify_changes(traun, expected_num_docs=10000, expected_num_revisions=0, expected_docs=traun.cache)
verify_changes(seth, expected_num_docs=10000, expected_num_revisions=0, expected_docs=seth.cache)
# Start second writer again
start_status = cluster.sg_accels[1].start(sg_conf)
assert start_status == 0
示例9: test_online_to_offline_longpoll_changes_feed_controlled_close_sanity_mulitple_users
# 需要导入模块: from libraries.testkit.cluster import Cluster [as 别名]
# 或者: from libraries.testkit.cluster.Cluster import reset [as 别名]
def test_online_to_offline_longpoll_changes_feed_controlled_close_sanity_mulitple_users(params_from_base_test_setup, sg_conf_name, num_docs, num_users):
cluster_conf = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
log_info("Using cluster_conf: {}".format(cluster_conf))
log_info("Using sg_conf: {}".format(sg_conf))
log_info("Using num_docs: {}".format(num_docs))
log_info("Using num_users: {}".format(num_users))
cluster = Cluster(config=cluster_conf)
cluster.reset(sg_config_path=sg_conf)
admin = Admin(cluster.sync_gateways[0])
users = admin.register_bulk_users(target=cluster.sync_gateways[0], db="db", name_prefix="user", password="password", number=num_users, channels=["ABC"])
feed_close_results = list()
with concurrent.futures.ThreadPoolExecutor(max_workers=libraries.testkit.settings.MAX_REQUEST_WORKERS) as executor:
# start longpoll tracking with no timeout, will block until longpoll is closed by db going offline
futures = {executor.submit(user.start_longpoll_changes_tracking, termination_doc_id=None, timeout=0, loop=False): user.name for user in users}
time.sleep(5)
futures[executor.submit(admin.take_db_offline, "db")] = "db_offline_task"
for future in concurrent.futures.as_completed(futures):
task_name = futures[future]
if task_name == "db_offline_task":
log_info("DB OFFLINE")
# make sure db_offline returns 200
assert future.result() == 200
if task_name.startswith("user"):
# Long poll will exit with 503, return docs in the exception
log_info("POLLING DONE")
try:
docs_in_changes, last_seq_num = future.result()
feed_close_results.append((docs_in_changes, last_seq_num))
except Exception as e:
log_info("Longpoll feed close error: {}".format(e))
# long poll should be closed so this exception should never happen
assert 0
# Assert that the feed close results length is num_users
assert len(feed_close_results) == num_users
# Account for _user doc
# last_seq may be of the form '1' for channel cache or '1-0' for distributed index
for feed_result in feed_close_results:
docs_in_changes = feed_result[0]
seq_num_component = feed_result[1].split("-")
assert len(docs_in_changes) == 0
assert int(seq_num_component[0]) > 0
示例10: test_sync_channel_sanity
# 需要导入模块: from libraries.testkit.cluster import Cluster [as 别名]
# 或者: from libraries.testkit.cluster.Cluster import reset [as 别名]
def test_sync_channel_sanity(params_from_base_test_setup, sg_conf_name):
num_docs_per_channel = 100
channels = ["ABC", "NBC", "CBS"]
cluster_conf = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
log_info("Running 'sync_channel_sanity'")
log_info("Using cluster_conf: {}".format(cluster_conf))
log_info("Using sg_conf: {}".format(sg_conf))
cluster = Cluster(config=cluster_conf)
cluster.reset(sg_config_path=sg_conf)
admin = Admin(cluster.sync_gateways[0])
doc_pushers = []
doc_pusher_caches = []
# Push some ABC docs
for channel in channels:
doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="{}_doc_pusher".format(channel), password="password", channels=[channel])
doc_pusher.add_docs(num_docs_per_channel, bulk=True)
doc_pushers.append(doc_pusher)
doc_pusher_caches.append(doc_pusher.cache)
# Verfy that none of the doc_pushers get docs. They should all be redirected by the sync function
verify_changes(doc_pushers, expected_num_docs=0, expected_num_revisions=0, expected_docs={})
subscriber = admin.register_user(target=cluster.sync_gateways[0], db="db", name="subscriber", password="password", channels=["tv_station_channel"])
# Allow docs to backfill
time.sleep(20)
# subscriber should recieve all docs
all_docs = {k: v for cache in doc_pusher_caches for k, v in cache.items()}
verify_changes(subscriber, expected_num_docs=len(channels) * num_docs_per_channel, expected_num_revisions=0, expected_docs=all_docs)
# update subscribers cache so the user knows what docs to update
subscriber.cache = all_docs
subscriber.update_docs(num_revs_per_doc=1)
# Allow docs to backfill
time.sleep(20)
# Verify the doc are back in the repective ABC, NBC, CBS channels
# HACK: Ignoring rev_id verification due to the fact that the doc was updated the the subscriber user and not the
# doc_pusher
for doc_pusher in doc_pushers:
verify_changes(doc_pusher, expected_num_docs=num_docs_per_channel, expected_num_revisions=1, expected_docs=doc_pusher.cache, ignore_rev_ids=True)
# Verify that all docs have been flaged with _removed = true in changes feed for subscriber
verify_docs_removed(subscriber, expected_num_docs=len(all_docs.items()), expected_docs=all_docs)
示例11: test_multiple_users_multiple_channels
# 需要导入模块: from libraries.testkit.cluster import Cluster [as 别名]
# 或者: from libraries.testkit.cluster.Cluster import reset [as 别名]
def test_multiple_users_multiple_channels(params_from_base_test_setup, sg_conf_name):
cluster_conf = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
log_info("Running 'multiple_users_multiple_channels'")
log_info("cluster_conf: {}".format(cluster_conf))
log_info("conf: {}".format(sg_conf))
cluster = Cluster(config=cluster_conf)
cluster.reset(sg_config_path=sg_conf)
num_docs_seth = 1000
num_docs_adam = 2000
num_docs_traun = 3000
sgs = cluster.sync_gateways
admin = Admin(sgs[0])
seth = admin.register_user(target=sgs[0], db="db", name="seth", password="password", channels=["ABC"])
adam = admin.register_user(target=sgs[0], db="db", name="adam", password="password", channels=["NBC", "CBS"])
traun = admin.register_user(target=sgs[0], db="db", name="traun", password="password", channels=["ABC", "NBC", "CBS"])
# TODO use bulk docs
seth.add_docs(num_docs_seth) # ABC
adam.add_docs(num_docs_adam) # NBC, CBS
traun.add_docs(num_docs_traun) # ABC, NBC, CBS
assert len(seth.cache) == num_docs_seth
assert len(adam.cache) == num_docs_adam
assert len(traun.cache) == num_docs_traun
# discuss appropriate time with team
time.sleep(10)
# Seth should get docs from seth + traun
seth_subset = [seth.cache, traun.cache]
seth_expected_docs = {k: v for cache in seth_subset for k, v in cache.items()}
verify_changes([seth], expected_num_docs=num_docs_seth + num_docs_traun, expected_num_revisions=0, expected_docs=seth_expected_docs)
# Adam should get docs from adam + traun
adam_subset = [adam.cache, traun.cache]
adam_expected_docs = {k: v for cache in adam_subset for k, v in cache.items()}
verify_changes([adam], expected_num_docs=num_docs_adam + num_docs_traun, expected_num_revisions=0, expected_docs=adam_expected_docs)
# Traun should get docs from seth + adam + traun
traun_subset = [seth.cache, adam.cache, traun.cache]
traun_expected_docs = {k: v for cache in traun_subset for k, v in cache.items()}
verify_changes([traun], expected_num_docs=num_docs_seth + num_docs_adam + num_docs_traun, expected_num_revisions=0, expected_docs=traun_expected_docs)
示例12: test_pindex_distribution
# 需要导入模块: from libraries.testkit.cluster import Cluster [as 别名]
# 或者: from libraries.testkit.cluster.Cluster import reset [as 别名]
def test_pindex_distribution(params_from_base_test_setup, sg_conf):
# the test itself doesn't have to do anything beyond calling cluster.reset() with the
# right configuration, since the validation of the cbgt pindex distribution is in the
# cluster.reset() method itself.
cluster_conf = params_from_base_test_setup["cluster_config"]
log_info("Running 'test_pindex_distribution'")
log_info("cluster_conf: {}".format(cluster_conf))
log_info("sg_conf: {}".format(sg_conf))
cluster = Cluster(config=cluster_conf)
cluster.reset(sg_config_path=sg_conf)
示例13: test_webhooks
# 需要导入模块: from libraries.testkit.cluster import Cluster [as 别名]
# 或者: from libraries.testkit.cluster.Cluster import reset [as 别名]
def test_webhooks(params_from_base_test_setup, sg_conf_name, num_users, num_channels, num_docs, num_revisions):
start = time.time()
cluster_conf = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
log_info("Running 'test_webhooks'")
log_info("Using cluster_conf: {}".format(cluster_conf))
log_info("Using num_users: {}".format(num_users))
log_info("Using num_channels: {}".format(num_channels))
log_info("Using num_docs: {}".format(num_docs))
log_info("Using num_revisions: {}".format(num_revisions))
cluster = Cluster(config=cluster_conf)
cluster.reset(sg_conf)
init_completed = time.time()
log_info("Initialization completed. Time taken:{}s".format(init_completed - start))
channels = ["channel-" + str(i) for i in range(num_channels)]
password = "password"
ws = WebServer()
ws.start()
sgs = cluster.sync_gateways
admin = Admin(sgs[0])
# Register User
log_info("Register User")
user_objects = admin.register_bulk_users(target=sgs[0], db="db", name_prefix="User",
number=num_users, password=password, channels=channels)
# Add User
log_info("Add docs")
in_parallel(user_objects, 'add_docs', num_docs)
# Update docs
log_info("Update docs")
in_parallel(user_objects, 'update_docs', num_revisions)
time.sleep(30)
ws.stop()
expected_events = (num_users * num_docs * num_revisions) + (num_users * num_docs)
received_events = len(ws.get_data())
log_info("expected_events: {} received_events {}".format(expected_events, received_events))
assert expected_events == received_events
示例14: reset_cluster
# 需要导入模块: from libraries.testkit.cluster import Cluster [as 别名]
# 或者: from libraries.testkit.cluster.Cluster import reset [as 别名]
def reset_cluster(self, cluster_config, sync_gateway_config):
"""
1. Stop sync_gateways
2. Stop sg_accels
3. Delete sync_gateway artifacts (logs, conf)
4. Delete sg_accel artifacts (logs, conf)
5. Delete all server buckets
6. Create buckets from 'sync_gateway_config'
7. Wait for server to be in 'healthy' state
8. Deploy sync_gateway config and start
9. Deploy sg_accel config and start (distributed index mode only)
"""
cluster = Cluster(config=cluster_config)
cluster.reset(sync_gateway_config)
示例15: test_continuous_changes_parametrized
# 需要导入模块: from libraries.testkit.cluster import Cluster [as 别名]
# 或者: from libraries.testkit.cluster.Cluster import reset [as 别名]
def test_continuous_changes_parametrized(params_from_base_test_setup, sg_conf_name, num_users, num_docs, num_revisions):
cluster_conf = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
log_info("Running 'continuous_changes_parametrized'")
log_info("cluster_conf: {}".format(cluster_conf))
log_info("sg_conf: {}".format(sg_conf))
log_info("num_users: {}".format(num_users))
log_info("num_docs: {}".format(num_docs))
log_info("num_revisions: {}".format(num_revisions))
cluster = Cluster(config=cluster_conf)
cluster.reset(sg_config_path=sg_conf)
admin = Admin(cluster.sync_gateways[0])
users = admin.register_bulk_users(target=cluster.sync_gateways[0], db="db", name_prefix="user", number=num_users, password="password", channels=["ABC", "TERMINATE"])
abc_doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="abc_doc_pusher", password="password", channels=["ABC"])
doc_terminator = admin.register_user(target=cluster.sync_gateways[0], db="db", name="doc_terminator", password="password", channels=["TERMINATE"])
with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor:
futures = {executor.submit(user.start_continuous_changes_tracking, termination_doc_id="killcontinuous"): user.name for user in users}
futures[executor.submit(abc_doc_pusher.add_docs, num_docs)] = "doc_pusher"
for future in concurrent.futures.as_completed(futures):
task_name = futures[future]
# Send termination doc to seth continuous changes feed subscriber
if task_name == "doc_pusher":
errors = future.result()
assert len(errors) == 0
abc_doc_pusher.update_docs(num_revs_per_doc=num_revisions)
time.sleep(10)
doc_terminator.add_doc("killcontinuous")
elif task_name.startswith("user"):
# When the user has continuous _changes feed closed, return the docs and verify the user got all the channel docs
docs_in_changes = future.result()
# Expect number of docs + the termination doc + _user doc
verify_same_docs(expected_num_docs=num_docs, doc_dict_one=docs_in_changes, doc_dict_two=abc_doc_pusher.cache)
# Expect number of docs + the termination doc
verify_changes(abc_doc_pusher, expected_num_docs=num_docs, expected_num_revisions=num_revisions, expected_docs=abc_doc_pusher.cache)