本文整理汇总了Python中libraries.testkit.admin.Admin类的典型用法代码示例。如果您正苦于以下问题:Python Admin类的具体用法?Python Admin怎么用?Python Admin使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Admin类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create_sg_users_channels
def create_sg_users_channels(sg1, sg2, db1, db2):
admin1 = Admin(sg1)
admin2 = Admin(sg2)
sg1a_user = admin1.register_user(
target=sg1,
db=db1,
name="sg1A_user",
password="sg1A_user",
channels=["A"],
)
sg1b_user = admin1.register_user(
target=sg1,
db=db1,
name="sg1B_user",
password="sg1B_user",
channels=["B"],
)
sg2_user = admin2.register_user(
target=sg2,
db=db2,
name="sg2_user",
password="sg2_user",
channels=["*"],
)
return sg1a_user, sg1b_user, sg2_user
示例2: test_offline_false_config_rest
def test_offline_false_config_rest(params_from_base_test_setup, sg_conf_name, num_docs):
cluster_conf = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
log_info("Using cluster_conf: {}".format(cluster_conf))
log_info("Using sg_conf: {}".format(sg_conf))
log_info("Using num_docs: {}".format(num_docs))
cluster = Cluster(config=cluster_conf)
cluster.reset(sg_config_path=sg_conf)
# all db endpoints should function as expected
errors = rest_scan(cluster.sync_gateways[0], db="db", online=True, num_docs=num_docs, user_name="seth", channels=["ABC"])
assert len(errors) == 0
# Scenario 4
# Check the db has an Online state at each running sync_gateway
for sg in cluster.sync_gateways:
admin = Admin(sg)
db_info = admin.get_db_info("db")
assert db_info["state"] == "Online"
示例3: test_online_to_offline_check_503
def test_online_to_offline_check_503(params_from_base_test_setup, sg_conf_name, num_docs):
cluster_conf = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
log_info("Using cluster_conf: {}".format(cluster_conf))
log_info("Using sg_conf: {}".format(sg_conf))
log_info("Using num_docs: {}".format(num_docs))
cluster = Cluster(config=cluster_conf)
cluster.reset(sg_config_path=sg_conf)
admin = Admin(cluster.sync_gateways[0])
# all db endpoints should function as expected
errors = rest_scan(cluster.sync_gateways[0], db="db", online=True, num_docs=num_docs, user_name="seth", channels=["ABC"])
assert len(errors) == 0
# Take bucket offline
status = admin.take_db_offline(db="db")
assert status == 200
# all db endpoints should return 503
errors = rest_scan(cluster.sync_gateways[0], db="db", online=False, num_docs=num_docs, user_name="seth", channels=["ABC"])
# We hit NUM_ENDPOINT unique REST endpoints + num of doc PUT failures
assert len(errors) == NUM_ENDPOINTS + (num_docs * 2)
for error_tuple in errors:
log_info("({},{})".format(error_tuple[0], error_tuple[1]))
assert error_tuple[1] == 503
示例4: test_offline_true_config_bring_online
def test_offline_true_config_bring_online(params_from_base_test_setup, sg_conf_name, num_docs):
cluster_conf = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
log_info("Using cluster_conf: {}".format(cluster_conf))
log_info("Using sg_conf: {}".format(sg_conf))
log_info("Using num_docs: {}".format(num_docs))
cluster = Cluster(config=cluster_conf)
cluster.reset(sg_config_path=sg_conf)
admin = Admin(cluster.sync_gateways[0])
# all db endpoints should fail with 503
errors = rest_scan(cluster.sync_gateways[0], db="db", online=False, num_docs=num_docs, user_name="seth", channels=["ABC"])
assert len(errors) == NUM_ENDPOINTS + (num_docs * 2)
for error_tuple in errors:
log_info("({},{})".format(error_tuple[0], error_tuple[1]))
assert error_tuple[1] == 503
# Scenario 9
# POST /db/_online
status = admin.bring_db_online(db="db")
assert status == 200
# all db endpoints should succeed
errors = rest_scan(cluster.sync_gateways[0], db="db", online=True, num_docs=num_docs, user_name="seth", channels=["ABC"])
assert len(errors) == 0
示例5: test_seq
def test_seq(params_from_base_test_setup, sg_conf_name, num_users, num_docs, num_revisions):
cluster_conf = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
log_info("Running seq")
log_info("cluster_conf: {}".format(cluster_conf))
log_info("sg_conf: {}".format(sg_conf))
log_info("num_users: {}".format(num_users))
log_info("num_docs: {}".format(num_docs))
log_info("num_revisions: {}".format(num_revisions))
cluster = Cluster(config=cluster_conf)
cluster.reset(sg_config_path=sg_conf)
admin = Admin(cluster.sync_gateways[0])
# all users will share docs due to having the same channel
users = admin.register_bulk_users(target=cluster.sync_gateways[0], db="db", name_prefix="user", number=num_users, password="password", channels=["ABC"])
for user in users:
user.add_docs(num_docs, bulk=True)
for user in users:
user.update_docs(num_revisions)
time.sleep(5)
user_0_changes = users[0].get_changes(since=0)
doc_seq = user_0_changes["results"][num_docs / 2]["seq"]
# https://github.com/couchbase/sync_gateway/issues/1475#issuecomment-172426052
# verify you can issue _changes with since=12313-0::1023.15
for user in users:
changes = user.get_changes(since=doc_seq)
log_info("Trying changes with since={}".format(doc_seq))
assert len(changes["results"]) > 0
second_to_last_doc_entry_seq = changes["results"][-2]["seq"]
last_doc_entry_seq = changes["results"][-1]["seq"]
log_info('Second to last doc "seq": {}'.format(second_to_last_doc_entry_seq))
log_info('Last doc "seq": {}'.format(last_doc_entry_seq))
if mode == "di":
# Verify last "seq" follows the formate 12313-0, not 12313-0::1023.15
log_info('Verify that the last "seq" is a plain hashed value')
assert len(second_to_last_doc_entry_seq.split("::")) == 2
assert len(last_doc_entry_seq.split("::")) == 1
elif mode == "cc":
assert second_to_last_doc_entry_seq > 0
assert last_doc_entry_seq > 0
else:
raise ValueError("Unsupported 'mode' !!")
all_doc_caches = [user.cache for user in users]
all_docs = {k: v for cache in all_doc_caches for k, v in cache.items()}
verify_changes(users, expected_num_docs=num_users * num_docs, expected_num_revisions=num_revisions, expected_docs=all_docs)
示例6: test_db_online_offline_webhooks_offline_two
def test_db_online_offline_webhooks_offline_two(params_from_base_test_setup, sg_conf_name, num_users, num_channels, num_docs, num_revisions):
start = time.time()
cluster_conf = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
log_info("Running 'test_db_online_offline_webhooks_offline_two'")
log_info("Using cluster_conf: {}".format(cluster_conf))
log_info("Using num_users: {}".format(num_users))
log_info("Using num_channels: {}".format(num_channels))
log_info("Using num_docs: {}".format(num_docs))
log_info("Using num_revisions: {}".format(num_revisions))
cluster = Cluster(config=cluster_conf)
cluster.reset(sg_conf)
init_completed = time.time()
log_info("Initialization completed. Time taken:{}s".format(init_completed - start))
channels = ["channel-" + str(i) for i in range(num_channels)]
password = "password"
ws = WebServer()
ws.start()
sgs = cluster.sync_gateways
admin = Admin(sgs[0])
# Register User
log_info("Register User")
user_objects = admin.register_bulk_users(target=sgs[0], db="db", name_prefix="User",
number=num_users, password=password, channels=channels)
# Add User
log_info("Add docs")
in_parallel(user_objects, 'add_docs', num_docs)
# Update docs
log_info("Update docs")
in_parallel(user_objects, 'update_docs', num_revisions)
time.sleep(10)
status = cluster.servers[0].delete_bucket("data-bucket")
assert status == 0
log_info("Sleeping for 120 seconds...")
time.sleep(120)
webhook_events = ws.get_data()
time.sleep(5)
log_info("webhook event {}".format(webhook_events))
last_event = webhook_events[-1]
assert last_event['state'] == 'offline'
ws.stop()
示例7: test_dcp_reshard_single_sg_accel_goes_down_and_up
def test_dcp_reshard_single_sg_accel_goes_down_and_up(params_from_base_test_setup, sg_conf):
cluster_conf = params_from_base_test_setup["cluster_config"]
log_info("Running 'test_dcp_reshard_single_sg_accel_goes_down_and_up'")
log_info("cluster_conf: {}".format(cluster_conf))
log_info("sg_conf: {}".format(sg_conf))
cluster = Cluster(config=cluster_conf)
cluster.reset(sg_config_path=sg_conf)
# Stop the second sg_accel
stop_status = cluster.sg_accels[1].stop()
assert stop_status == 0, "Failed to stop sg_accel"
admin = Admin(cluster.sync_gateways[0])
traun = admin.register_user(target=cluster.sync_gateways[0], db="db", name="traun", password="password", channels=["ABC", "NBC", "CBS"])
seth = admin.register_user(target=cluster.sync_gateways[0], db="db", name="seth", password="password", channels=["FOX"])
log_info(">> Users added")
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
futures = dict()
log_info(">>> Adding Seth docs") # FOX
futures[executor.submit(seth.add_docs, 8000)] = "seth"
log_info(">>> Adding Traun docs") # ABC, NBC, CBS
futures[executor.submit(traun.add_docs, 10000, bulk=True)] = "traun"
# take down a sync_gateway
shutdown_status = cluster.sg_accels[0].stop()
assert shutdown_status == 0
# Add more docs while no writers are online
log_info(">>> Adding Seth docs") # FOX
futures[executor.submit(seth.add_docs, 2000, bulk=True)] = "seth"
# Start a single writer
start_status = cluster.sg_accels[0].start(sg_conf)
assert start_status == 0
for future in concurrent.futures.as_completed(futures):
tag = futures[future]
log_info("{} Completed:".format(tag))
# TODO better way to do this
time.sleep(120)
verify_changes(traun, expected_num_docs=10000, expected_num_revisions=0, expected_docs=traun.cache)
verify_changes(seth, expected_num_docs=10000, expected_num_revisions=0, expected_docs=seth.cache)
# Start second writer again
start_status = cluster.sg_accels[1].start(sg_conf)
assert start_status == 0
示例8: test_online_to_offline_longpoll_changes_feed_controlled_close_sanity_mulitple_users
def test_online_to_offline_longpoll_changes_feed_controlled_close_sanity_mulitple_users(params_from_base_test_setup, sg_conf_name, num_docs, num_users):
cluster_conf = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
log_info("Using cluster_conf: {}".format(cluster_conf))
log_info("Using sg_conf: {}".format(sg_conf))
log_info("Using num_docs: {}".format(num_docs))
log_info("Using num_users: {}".format(num_users))
cluster = Cluster(config=cluster_conf)
cluster.reset(sg_config_path=sg_conf)
admin = Admin(cluster.sync_gateways[0])
users = admin.register_bulk_users(target=cluster.sync_gateways[0], db="db", name_prefix="user", password="password", number=num_users, channels=["ABC"])
feed_close_results = list()
with concurrent.futures.ThreadPoolExecutor(max_workers=libraries.testkit.settings.MAX_REQUEST_WORKERS) as executor:
# start longpoll tracking with no timeout, will block until longpoll is closed by db going offline
futures = {executor.submit(user.start_longpoll_changes_tracking, termination_doc_id=None, timeout=0, loop=False): user.name for user in users}
time.sleep(5)
futures[executor.submit(admin.take_db_offline, "db")] = "db_offline_task"
for future in concurrent.futures.as_completed(futures):
task_name = futures[future]
if task_name == "db_offline_task":
log_info("DB OFFLINE")
# make sure db_offline returns 200
assert future.result() == 200
if task_name.startswith("user"):
# Long poll will exit with 503, return docs in the exception
log_info("POLLING DONE")
try:
docs_in_changes, last_seq_num = future.result()
feed_close_results.append((docs_in_changes, last_seq_num))
except Exception as e:
log_info("Longpoll feed close error: {}".format(e))
# long poll should be closed so this exception should never happen
assert 0
# Assert that the feed close results length is num_users
assert len(feed_close_results) == num_users
# Account for _user doc
# last_seq may be of the form '1' for channel cache or '1-0' for distributed index
for feed_result in feed_close_results:
docs_in_changes = feed_result[0]
seq_num_component = feed_result[1].split("-")
assert len(docs_in_changes) == 0
assert int(seq_num_component[0]) > 0
示例9: test_sync_channel_sanity
def test_sync_channel_sanity(params_from_base_test_setup, sg_conf_name):
num_docs_per_channel = 100
channels = ["ABC", "NBC", "CBS"]
cluster_conf = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
log_info("Running 'sync_channel_sanity'")
log_info("Using cluster_conf: {}".format(cluster_conf))
log_info("Using sg_conf: {}".format(sg_conf))
cluster = Cluster(config=cluster_conf)
cluster.reset(sg_config_path=sg_conf)
admin = Admin(cluster.sync_gateways[0])
doc_pushers = []
doc_pusher_caches = []
# Push some ABC docs
for channel in channels:
doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="{}_doc_pusher".format(channel), password="password", channels=[channel])
doc_pusher.add_docs(num_docs_per_channel, bulk=True)
doc_pushers.append(doc_pusher)
doc_pusher_caches.append(doc_pusher.cache)
# Verfy that none of the doc_pushers get docs. They should all be redirected by the sync function
verify_changes(doc_pushers, expected_num_docs=0, expected_num_revisions=0, expected_docs={})
subscriber = admin.register_user(target=cluster.sync_gateways[0], db="db", name="subscriber", password="password", channels=["tv_station_channel"])
# Allow docs to backfill
time.sleep(20)
# subscriber should recieve all docs
all_docs = {k: v for cache in doc_pusher_caches for k, v in cache.items()}
verify_changes(subscriber, expected_num_docs=len(channels) * num_docs_per_channel, expected_num_revisions=0, expected_docs=all_docs)
# update subscribers cache so the user knows what docs to update
subscriber.cache = all_docs
subscriber.update_docs(num_revs_per_doc=1)
# Allow docs to backfill
time.sleep(20)
# Verify the doc are back in the repective ABC, NBC, CBS channels
# HACK: Ignoring rev_id verification due to the fact that the doc was updated the the subscriber user and not the
# doc_pusher
for doc_pusher in doc_pushers:
verify_changes(doc_pusher, expected_num_docs=num_docs_per_channel, expected_num_revisions=1, expected_docs=doc_pusher.cache, ignore_rev_ids=True)
# Verify that all docs have been flaged with _removed = true in changes feed for subscriber
verify_docs_removed(subscriber, expected_num_docs=len(all_docs.items()), expected_docs=all_docs)
示例10: test_sg_replicate_push_async
def test_sg_replicate_push_async(params_from_base_test_setup, num_docs):
assert num_docs > 0
# if the async stuff works, we should be able to kick off a large
# push replication and get a missing doc before the replication has
# a chance to finish. And then we should later see that doc.
cluster_config = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
log_info("Running 'test_sg_replicate_push_async'")
log_info("Using cluster_config: {}".format(cluster_config))
config = sync_gateway_config_path_for_mode("sync_gateway_sg_replicate", mode)
sg1, sg2 = create_sync_gateways(
cluster_config=cluster_config,
sg_config_path=config
)
admin = Admin(sg1)
admin.admin_url = sg1.url
sg1_user, sg2_user = create_sg_users(sg1, sg2, DB1, DB2)
# Add docs to sg1
doc_ids_added = []
last_doc_id_added = None
for i in xrange(num_docs):
doc_id = sg1_user.add_doc()
doc_ids_added.append(doc_id)
last_doc_id_added = doc_id
# Wait until doc shows up on sg1's changes feed
wait_until_doc_in_changes_feed(sg1, DB1, last_doc_id_added)
# try to get the last doc added from the target -- assert that we get an exception
assert_does_not_have_doc(sg2_user, last_doc_id_added)
# kick off a one-off push replication with async=true
sg1.start_push_replication(
sg2.admin.admin_url,
DB1,
DB2,
continuous=False,
use_remote_source=True,
async=True,
use_admin_url=True
)
# wait until that doc shows up on the target
wait_until_doc_sync(sg2_user, last_doc_id_added)
# At this point, the active tasks should be empty
wait_until_active_tasks_empty(sg1)
示例11: save_cbgt_diagnostics
def save_cbgt_diagnostics(self):
# CBGT REST Admin API endpoint
for sync_gateway_writer in self.sg_accels:
adminApi = Admin(sync_gateway_writer)
cbgt_diagnostics = adminApi.get_cbgt_diagnostics()
adminApi.get_cbgt_cfg()
# dump raw diagnostics
pretty_print_json = json.dumps(cbgt_diagnostics, sort_keys=True, indent=4, separators=(',', ': '))
log_info("SG {} CBGT diagnostic output: {}".format(sync_gateway_writer, pretty_print_json))
示例12: test_multiple_users_multiple_channels
def test_multiple_users_multiple_channels(params_from_base_test_setup, sg_conf_name):
cluster_conf = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
log_info("Running 'multiple_users_multiple_channels'")
log_info("cluster_conf: {}".format(cluster_conf))
log_info("conf: {}".format(sg_conf))
cluster = Cluster(config=cluster_conf)
cluster.reset(sg_config_path=sg_conf)
num_docs_seth = 1000
num_docs_adam = 2000
num_docs_traun = 3000
sgs = cluster.sync_gateways
admin = Admin(sgs[0])
seth = admin.register_user(target=sgs[0], db="db", name="seth", password="password", channels=["ABC"])
adam = admin.register_user(target=sgs[0], db="db", name="adam", password="password", channels=["NBC", "CBS"])
traun = admin.register_user(target=sgs[0], db="db", name="traun", password="password", channels=["ABC", "NBC", "CBS"])
# TODO use bulk docs
seth.add_docs(num_docs_seth) # ABC
adam.add_docs(num_docs_adam) # NBC, CBS
traun.add_docs(num_docs_traun) # ABC, NBC, CBS
assert len(seth.cache) == num_docs_seth
assert len(adam.cache) == num_docs_adam
assert len(traun.cache) == num_docs_traun
# discuss appropriate time with team
time.sleep(10)
# Seth should get docs from seth + traun
seth_subset = [seth.cache, traun.cache]
seth_expected_docs = {k: v for cache in seth_subset for k, v in cache.items()}
verify_changes([seth], expected_num_docs=num_docs_seth + num_docs_traun, expected_num_revisions=0, expected_docs=seth_expected_docs)
# Adam should get docs from adam + traun
adam_subset = [adam.cache, traun.cache]
adam_expected_docs = {k: v for cache in adam_subset for k, v in cache.items()}
verify_changes([adam], expected_num_docs=num_docs_adam + num_docs_traun, expected_num_revisions=0, expected_docs=adam_expected_docs)
# Traun should get docs from seth + adam + traun
traun_subset = [seth.cache, adam.cache, traun.cache]
traun_expected_docs = {k: v for cache in traun_subset for k, v in cache.items()}
verify_changes([traun], expected_num_docs=num_docs_seth + num_docs_adam + num_docs_traun, expected_num_revisions=0, expected_docs=traun_expected_docs)
示例13: test_dcp_reshard_sync_gateway_goes_down
def test_dcp_reshard_sync_gateway_goes_down(params_from_base_test_setup, sg_conf):
cluster_conf = params_from_base_test_setup["cluster_config"]
log_info("Running 'test_dcp_reshard_sync_gateway_goes_down'")
log_info("cluster_conf: {}".format(cluster_conf))
log_info("sg_conf: {}".format(sg_conf))
cluster = Cluster(config=cluster_conf)
mode = cluster.reset(sg_config_path=sg_conf)
admin = Admin(cluster.sync_gateways[0])
traun = admin.register_user(target=cluster.sync_gateways[0], db="db", name="traun", password="password", channels=["ABC", "NBC", "CBS"])
seth = admin.register_user(target=cluster.sync_gateways[0], db="db", name="seth", password="password", channels=["FOX"])
log_info(">> Users added")
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
futures = dict()
log_info(">>> Adding Seth docs") # FOX
futures[executor.submit(seth.add_docs, 8000)] = "seth"
log_info(">>> Adding Traun docs") # ABC, NBC, CBS
futures[executor.submit(traun.add_docs, 2000, bulk=True)] = "traun"
# stop sg_accel
shutdown_status = cluster.sg_accels[0].stop()
assert shutdown_status == 0
for future in concurrent.futures.as_completed(futures):
tag = futures[future]
log_info("{} Completed:".format(tag))
# TODO better way to do this
time.sleep(120)
verify_changes(traun, expected_num_docs=2000, expected_num_revisions=0, expected_docs=traun.cache)
verify_changes(seth, expected_num_docs=8000, expected_num_revisions=0, expected_docs=seth.cache)
# Verify that the sg1 is down but the other sync_gateways are running
errors = cluster.verify_alive(mode)
assert len(errors) == 1 and errors[0][0].hostname == "ac1"
# Restart the failing node so that cluster verification does not blow up in test teardown
start_status = cluster.sg_accels[0].start(sg_conf)
assert start_status == 0
示例14: test_webhooks
def test_webhooks(params_from_base_test_setup, sg_conf_name, num_users, num_channels, num_docs, num_revisions):
start = time.time()
cluster_conf = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
log_info("Running 'test_webhooks'")
log_info("Using cluster_conf: {}".format(cluster_conf))
log_info("Using num_users: {}".format(num_users))
log_info("Using num_channels: {}".format(num_channels))
log_info("Using num_docs: {}".format(num_docs))
log_info("Using num_revisions: {}".format(num_revisions))
cluster = Cluster(config=cluster_conf)
cluster.reset(sg_conf)
init_completed = time.time()
log_info("Initialization completed. Time taken:{}s".format(init_completed - start))
channels = ["channel-" + str(i) for i in range(num_channels)]
password = "password"
ws = WebServer()
ws.start()
sgs = cluster.sync_gateways
admin = Admin(sgs[0])
# Register User
log_info("Register User")
user_objects = admin.register_bulk_users(target=sgs[0], db="db", name_prefix="User",
number=num_users, password=password, channels=channels)
# Add User
log_info("Add docs")
in_parallel(user_objects, 'add_docs', num_docs)
# Update docs
log_info("Update docs")
in_parallel(user_objects, 'update_docs', num_revisions)
time.sleep(30)
ws.stop()
expected_events = (num_users * num_docs * num_revisions) + (num_users * num_docs)
received_events = len(ws.get_data())
log_info("expected_events: {} received_events {}".format(expected_events, received_events))
assert expected_events == received_events
示例15: test_longpoll_changes_sanity
def test_longpoll_changes_sanity(params_from_base_test_setup, sg_conf_name, num_docs, num_revisions):
cluster_conf = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
log_info("Running: 'longpoll_changes_sanity': {}".format(cluster_conf))
log_info("cluster_conf: {}".format(cluster_conf))
log_info("sg_conf: {}".format(sg_conf))
log_info("num_docs: {}".format(num_docs))
log_info("num_revisions: {}".format(num_revisions))
cluster = Cluster(config=cluster_conf)
cluster.reset(sg_config_path=sg_conf)
admin = Admin(cluster.sync_gateways[0])
seth = admin.register_user(target=cluster.sync_gateways[0], db="db", name="seth", password="password", channels=["ABC", "TERMINATE"])
abc_doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="abc_doc_pusher", password="password", channels=["ABC"])
doc_terminator = admin.register_user(target=cluster.sync_gateways[0], db="db", name="doc_terminator", password="password", channels=["TERMINATE"])
docs_in_changes = dict()
with concurrent.futures.ThreadPoolExecutor(max_workers=libraries.testkit.settings.MAX_REQUEST_WORKERS) as executor:
futures = dict()
futures[executor.submit(seth.start_longpoll_changes_tracking, termination_doc_id="killpolling")] = "polling"
futures[executor.submit(abc_doc_pusher.add_docs, num_docs)] = "doc_pusher"
for future in concurrent.futures.as_completed(futures):
task_name = futures[future]
# Send termination doc to seth long poller
if task_name == "doc_pusher":
abc_doc_pusher.update_docs(num_revs_per_doc=num_revisions)
# Allow time for changes to reach subscribers
time.sleep(5)
doc_terminator.add_doc("killpolling")
elif task_name == "polling":
docs_in_changes, seq_num = future.result()
# Verify abc_docs_pusher gets the correct docs in changes feed
verify_changes(abc_doc_pusher, expected_num_docs=num_docs, expected_num_revisions=num_revisions, expected_docs=abc_doc_pusher.cache)
# Verify docs from seth continous changes is the same as abc_docs_pusher's docs
verify_same_docs(expected_num_docs=num_docs, doc_dict_one=docs_in_changes, doc_dict_two=abc_doc_pusher.cache)