本文整理汇总了Python中libraries.testkit.admin.Admin.register_user方法的典型用法代码示例。如果您正苦于以下问题:Python Admin.register_user方法的具体用法?Python Admin.register_user怎么用?Python Admin.register_user使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类libraries.testkit.admin.Admin
的用法示例。
在下文中一共展示了Admin.register_user方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create_sg_users_channels
# 需要导入模块: from libraries.testkit.admin import Admin [as 别名]
# 或者: from libraries.testkit.admin.Admin import register_user [as 别名]
def create_sg_users_channels(sg1, sg2, db1, db2):
admin1 = Admin(sg1)
admin2 = Admin(sg2)
sg1a_user = admin1.register_user(
target=sg1,
db=db1,
name="sg1A_user",
password="sg1A_user",
channels=["A"],
)
sg1b_user = admin1.register_user(
target=sg1,
db=db1,
name="sg1B_user",
password="sg1B_user",
channels=["B"],
)
sg2_user = admin2.register_user(
target=sg2,
db=db2,
name="sg2_user",
password="sg2_user",
channels=["*"],
)
return sg1a_user, sg1b_user, sg2_user
示例2: test_dcp_reshard_single_sg_accel_goes_down_and_up
# 需要导入模块: from libraries.testkit.admin import Admin [as 别名]
# 或者: from libraries.testkit.admin.Admin import register_user [as 别名]
def test_dcp_reshard_single_sg_accel_goes_down_and_up(params_from_base_test_setup, sg_conf):
cluster_conf = params_from_base_test_setup["cluster_config"]
log_info("Running 'test_dcp_reshard_single_sg_accel_goes_down_and_up'")
log_info("cluster_conf: {}".format(cluster_conf))
log_info("sg_conf: {}".format(sg_conf))
cluster = Cluster(config=cluster_conf)
cluster.reset(sg_config_path=sg_conf)
# Stop the second sg_accel
stop_status = cluster.sg_accels[1].stop()
assert stop_status == 0, "Failed to stop sg_accel"
admin = Admin(cluster.sync_gateways[0])
traun = admin.register_user(target=cluster.sync_gateways[0], db="db", name="traun", password="password", channels=["ABC", "NBC", "CBS"])
seth = admin.register_user(target=cluster.sync_gateways[0], db="db", name="seth", password="password", channels=["FOX"])
log_info(">> Users added")
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
futures = dict()
log_info(">>> Adding Seth docs") # FOX
futures[executor.submit(seth.add_docs, 8000)] = "seth"
log_info(">>> Adding Traun docs") # ABC, NBC, CBS
futures[executor.submit(traun.add_docs, 10000, bulk=True)] = "traun"
# take down a sync_gateway
shutdown_status = cluster.sg_accels[0].stop()
assert shutdown_status == 0
# Add more docs while no writers are online
log_info(">>> Adding Seth docs") # FOX
futures[executor.submit(seth.add_docs, 2000, bulk=True)] = "seth"
# Start a single writer
start_status = cluster.sg_accels[0].start(sg_conf)
assert start_status == 0
for future in concurrent.futures.as_completed(futures):
tag = futures[future]
log_info("{} Completed:".format(tag))
# TODO better way to do this
time.sleep(120)
verify_changes(traun, expected_num_docs=10000, expected_num_revisions=0, expected_docs=traun.cache)
verify_changes(seth, expected_num_docs=10000, expected_num_revisions=0, expected_docs=seth.cache)
# Start second writer again
start_status = cluster.sg_accels[1].start(sg_conf)
assert start_status == 0
示例3: test_sync_channel_sanity
# 需要导入模块: from libraries.testkit.admin import Admin [as 别名]
# 或者: from libraries.testkit.admin.Admin import register_user [as 别名]
def test_sync_channel_sanity(params_from_base_test_setup, sg_conf_name):
num_docs_per_channel = 100
channels = ["ABC", "NBC", "CBS"]
cluster_conf = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
log_info("Running 'sync_channel_sanity'")
log_info("Using cluster_conf: {}".format(cluster_conf))
log_info("Using sg_conf: {}".format(sg_conf))
cluster = Cluster(config=cluster_conf)
cluster.reset(sg_config_path=sg_conf)
admin = Admin(cluster.sync_gateways[0])
doc_pushers = []
doc_pusher_caches = []
# Push some ABC docs
for channel in channels:
doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="{}_doc_pusher".format(channel), password="password", channels=[channel])
doc_pusher.add_docs(num_docs_per_channel, bulk=True)
doc_pushers.append(doc_pusher)
doc_pusher_caches.append(doc_pusher.cache)
# Verfy that none of the doc_pushers get docs. They should all be redirected by the sync function
verify_changes(doc_pushers, expected_num_docs=0, expected_num_revisions=0, expected_docs={})
subscriber = admin.register_user(target=cluster.sync_gateways[0], db="db", name="subscriber", password="password", channels=["tv_station_channel"])
# Allow docs to backfill
time.sleep(20)
# subscriber should recieve all docs
all_docs = {k: v for cache in doc_pusher_caches for k, v in cache.items()}
verify_changes(subscriber, expected_num_docs=len(channels) * num_docs_per_channel, expected_num_revisions=0, expected_docs=all_docs)
# update subscribers cache so the user knows what docs to update
subscriber.cache = all_docs
subscriber.update_docs(num_revs_per_doc=1)
# Allow docs to backfill
time.sleep(20)
# Verify the doc are back in the repective ABC, NBC, CBS channels
# HACK: Ignoring rev_id verification due to the fact that the doc was updated the the subscriber user and not the
# doc_pusher
for doc_pusher in doc_pushers:
verify_changes(doc_pusher, expected_num_docs=num_docs_per_channel, expected_num_revisions=1, expected_docs=doc_pusher.cache, ignore_rev_ids=True)
# Verify that all docs have been flaged with _removed = true in changes feed for subscriber
verify_docs_removed(subscriber, expected_num_docs=len(all_docs.items()), expected_docs=all_docs)
示例4: test_multiple_users_multiple_channels
# 需要导入模块: from libraries.testkit.admin import Admin [as 别名]
# 或者: from libraries.testkit.admin.Admin import register_user [as 别名]
def test_multiple_users_multiple_channels(params_from_base_test_setup, sg_conf_name):
cluster_conf = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
log_info("Running 'multiple_users_multiple_channels'")
log_info("cluster_conf: {}".format(cluster_conf))
log_info("conf: {}".format(sg_conf))
cluster = Cluster(config=cluster_conf)
cluster.reset(sg_config_path=sg_conf)
num_docs_seth = 1000
num_docs_adam = 2000
num_docs_traun = 3000
sgs = cluster.sync_gateways
admin = Admin(sgs[0])
seth = admin.register_user(target=sgs[0], db="db", name="seth", password="password", channels=["ABC"])
adam = admin.register_user(target=sgs[0], db="db", name="adam", password="password", channels=["NBC", "CBS"])
traun = admin.register_user(target=sgs[0], db="db", name="traun", password="password", channels=["ABC", "NBC", "CBS"])
# TODO use bulk docs
seth.add_docs(num_docs_seth) # ABC
adam.add_docs(num_docs_adam) # NBC, CBS
traun.add_docs(num_docs_traun) # ABC, NBC, CBS
assert len(seth.cache) == num_docs_seth
assert len(adam.cache) == num_docs_adam
assert len(traun.cache) == num_docs_traun
# discuss appropriate time with team
time.sleep(10)
# Seth should get docs from seth + traun
seth_subset = [seth.cache, traun.cache]
seth_expected_docs = {k: v for cache in seth_subset for k, v in cache.items()}
verify_changes([seth], expected_num_docs=num_docs_seth + num_docs_traun, expected_num_revisions=0, expected_docs=seth_expected_docs)
# Adam should get docs from adam + traun
adam_subset = [adam.cache, traun.cache]
adam_expected_docs = {k: v for cache in adam_subset for k, v in cache.items()}
verify_changes([adam], expected_num_docs=num_docs_adam + num_docs_traun, expected_num_revisions=0, expected_docs=adam_expected_docs)
# Traun should get docs from seth + adam + traun
traun_subset = [seth.cache, adam.cache, traun.cache]
traun_expected_docs = {k: v for cache in traun_subset for k, v in cache.items()}
verify_changes([traun], expected_num_docs=num_docs_seth + num_docs_adam + num_docs_traun, expected_num_revisions=0, expected_docs=traun_expected_docs)
示例5: test_dcp_reshard_sync_gateway_goes_down
# 需要导入模块: from libraries.testkit.admin import Admin [as 别名]
# 或者: from libraries.testkit.admin.Admin import register_user [as 别名]
def test_dcp_reshard_sync_gateway_goes_down(params_from_base_test_setup, sg_conf):
cluster_conf = params_from_base_test_setup["cluster_config"]
log_info("Running 'test_dcp_reshard_sync_gateway_goes_down'")
log_info("cluster_conf: {}".format(cluster_conf))
log_info("sg_conf: {}".format(sg_conf))
cluster = Cluster(config=cluster_conf)
mode = cluster.reset(sg_config_path=sg_conf)
admin = Admin(cluster.sync_gateways[0])
traun = admin.register_user(target=cluster.sync_gateways[0], db="db", name="traun", password="password", channels=["ABC", "NBC", "CBS"])
seth = admin.register_user(target=cluster.sync_gateways[0], db="db", name="seth", password="password", channels=["FOX"])
log_info(">> Users added")
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
futures = dict()
log_info(">>> Adding Seth docs") # FOX
futures[executor.submit(seth.add_docs, 8000)] = "seth"
log_info(">>> Adding Traun docs") # ABC, NBC, CBS
futures[executor.submit(traun.add_docs, 2000, bulk=True)] = "traun"
# stop sg_accel
shutdown_status = cluster.sg_accels[0].stop()
assert shutdown_status == 0
for future in concurrent.futures.as_completed(futures):
tag = futures[future]
log_info("{} Completed:".format(tag))
# TODO better way to do this
time.sleep(120)
verify_changes(traun, expected_num_docs=2000, expected_num_revisions=0, expected_docs=traun.cache)
verify_changes(seth, expected_num_docs=8000, expected_num_revisions=0, expected_docs=seth.cache)
# Verify that the sg1 is down but the other sync_gateways are running
errors = cluster.verify_alive(mode)
assert len(errors) == 1 and errors[0][0].hostname == "ac1"
# Restart the failing node so that cluster verification does not blow up in test teardown
start_status = cluster.sg_accels[0].start(sg_conf)
assert start_status == 0
示例6: test_continuous_changes_parametrized
# 需要导入模块: from libraries.testkit.admin import Admin [as 别名]
# 或者: from libraries.testkit.admin.Admin import register_user [as 别名]
def test_continuous_changes_parametrized(params_from_base_test_setup, sg_conf_name, num_users, num_docs, num_revisions):
cluster_conf = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
log_info("Running 'continuous_changes_parametrized'")
log_info("cluster_conf: {}".format(cluster_conf))
log_info("sg_conf: {}".format(sg_conf))
log_info("num_users: {}".format(num_users))
log_info("num_docs: {}".format(num_docs))
log_info("num_revisions: {}".format(num_revisions))
cluster = Cluster(config=cluster_conf)
cluster.reset(sg_config_path=sg_conf)
admin = Admin(cluster.sync_gateways[0])
users = admin.register_bulk_users(target=cluster.sync_gateways[0], db="db", name_prefix="user", number=num_users, password="password", channels=["ABC", "TERMINATE"])
abc_doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="abc_doc_pusher", password="password", channels=["ABC"])
doc_terminator = admin.register_user(target=cluster.sync_gateways[0], db="db", name="doc_terminator", password="password", channels=["TERMINATE"])
with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor:
futures = {executor.submit(user.start_continuous_changes_tracking, termination_doc_id="killcontinuous"): user.name for user in users}
futures[executor.submit(abc_doc_pusher.add_docs, num_docs)] = "doc_pusher"
for future in concurrent.futures.as_completed(futures):
task_name = futures[future]
# Send termination doc to seth continuous changes feed subscriber
if task_name == "doc_pusher":
errors = future.result()
assert len(errors) == 0
abc_doc_pusher.update_docs(num_revs_per_doc=num_revisions)
time.sleep(10)
doc_terminator.add_doc("killcontinuous")
elif task_name.startswith("user"):
# When the user has continuous _changes feed closed, return the docs and verify the user got all the channel docs
docs_in_changes = future.result()
# Expect number of docs + the termination doc + _user doc
verify_same_docs(expected_num_docs=num_docs, doc_dict_one=docs_in_changes, doc_dict_two=abc_doc_pusher.cache)
# Expect number of docs + the termination doc
verify_changes(abc_doc_pusher, expected_num_docs=num_docs, expected_num_revisions=num_revisions, expected_docs=abc_doc_pusher.cache)
示例7: test_longpoll_changes_sanity
# 需要导入模块: from libraries.testkit.admin import Admin [as 别名]
# 或者: from libraries.testkit.admin.Admin import register_user [as 别名]
def test_longpoll_changes_sanity(params_from_base_test_setup, sg_conf_name, num_docs, num_revisions):
cluster_conf = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
log_info("Running: 'longpoll_changes_sanity': {}".format(cluster_conf))
log_info("cluster_conf: {}".format(cluster_conf))
log_info("sg_conf: {}".format(sg_conf))
log_info("num_docs: {}".format(num_docs))
log_info("num_revisions: {}".format(num_revisions))
cluster = Cluster(config=cluster_conf)
cluster.reset(sg_config_path=sg_conf)
admin = Admin(cluster.sync_gateways[0])
seth = admin.register_user(target=cluster.sync_gateways[0], db="db", name="seth", password="password", channels=["ABC", "TERMINATE"])
abc_doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="abc_doc_pusher", password="password", channels=["ABC"])
doc_terminator = admin.register_user(target=cluster.sync_gateways[0], db="db", name="doc_terminator", password="password", channels=["TERMINATE"])
docs_in_changes = dict()
with concurrent.futures.ThreadPoolExecutor(max_workers=libraries.testkit.settings.MAX_REQUEST_WORKERS) as executor:
futures = dict()
futures[executor.submit(seth.start_longpoll_changes_tracking, termination_doc_id="killpolling")] = "polling"
futures[executor.submit(abc_doc_pusher.add_docs, num_docs)] = "doc_pusher"
for future in concurrent.futures.as_completed(futures):
task_name = futures[future]
# Send termination doc to seth long poller
if task_name == "doc_pusher":
abc_doc_pusher.update_docs(num_revs_per_doc=num_revisions)
# Allow time for changes to reach subscribers
time.sleep(5)
doc_terminator.add_doc("killpolling")
elif task_name == "polling":
docs_in_changes, seq_num = future.result()
# Verify abc_docs_pusher gets the correct docs in changes feed
verify_changes(abc_doc_pusher, expected_num_docs=num_docs, expected_num_revisions=num_revisions, expected_docs=abc_doc_pusher.cache)
# Verify docs from seth continous changes is the same as abc_docs_pusher's docs
verify_same_docs(expected_num_docs=num_docs, doc_dict_one=docs_in_changes, doc_dict_two=abc_doc_pusher.cache)
示例8: test_continuous_changes_sanity
# 需要导入模块: from libraries.testkit.admin import Admin [as 别名]
# 或者: from libraries.testkit.admin.Admin import register_user [as 别名]
def test_continuous_changes_sanity(params_from_base_test_setup, sg_conf_name, num_docs, num_revisions):
cluster_conf = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
log_info("Running 'continuous_changes_sanity'")
log_info("cluster_conf: {}".format(cluster_conf))
log_info("sg_conf: {}".format(sg_conf))
log_info("num_docs: {}".format(num_docs))
log_info("num_revisions: {}".format(num_revisions))
cluster = Cluster(config=cluster_conf)
cluster.reset(sg_config_path=sg_conf)
admin = Admin(cluster.sync_gateways[0])
seth = admin.register_user(target=cluster.sync_gateways[0], db="db", name="seth", password="password", channels=["ABC", "TERMINATE"])
abc_doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="abc_doc_pusher", password="password", channels=["ABC"])
doc_terminator = admin.register_user(target=cluster.sync_gateways[0], db="db", name="doc_terminator", password="password", channels=["TERMINATE"])
docs_in_changes = dict()
with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor:
futures = dict()
futures[executor.submit(seth.start_continuous_changes_tracking, termination_doc_id="killcontinuous")] = "continuous"
futures[executor.submit(abc_doc_pusher.add_docs, num_docs)] = "doc_pusher"
for future in concurrent.futures.as_completed(futures):
task_name = futures[future]
# Send termination doc to seth continuous changes feed subscriber
if task_name == "doc_pusher":
abc_doc_pusher.update_docs(num_revs_per_doc=num_revisions)
time.sleep(5)
doc_terminator.add_doc("killcontinuous")
elif task_name == "continuous":
docs_in_changes = future.result()
# Expect number of docs + the termination doc
verify_changes(abc_doc_pusher, expected_num_docs=num_docs, expected_num_revisions=num_revisions, expected_docs=abc_doc_pusher.cache)
# Expect number of docs + the termination doc + _user doc
verify_same_docs(expected_num_docs=num_docs, doc_dict_one=docs_in_changes, doc_dict_two=abc_doc_pusher.cache)
示例9: init_shadow_cluster
# 需要导入模块: from libraries.testkit.admin import Admin [as 别名]
# 或者: from libraries.testkit.admin.Admin import register_user [as 别名]
def init_shadow_cluster(cluster, config_path_shadower, config_path_non_shadower):
# initially, setup both sync gateways as shadowers -- this needs to be
# the initial config so that both buckets (source and data) will be created
mode = cluster.reset(sg_config_path=config_path_shadower)
# pick a sync gateway and choose it as non-shadower. reset with config.
non_shadower_sg = cluster.sync_gateways[1]
non_shadower_sg.restart(config_path_non_shadower)
# the other sync gateway will be the shadower
shadower_sg = cluster.sync_gateways[0]
admin = Admin(non_shadower_sg)
alice_shadower = admin.register_user(
target=shadower_sg,
db="db",
name="alice",
password="password",
channels=["ABC", "NBC", "CBS"],
)
bob_non_shadower = admin.register_user(
target=non_shadower_sg,
db="db",
name="bob",
password="password",
channels=["ABC", "NBC", "CBS"],
)
source_bucket = cluster.servers[0].get_bucket(source_bucket_name)
data_bucket = cluster.servers[0].get_bucket(data_bucket_name)
sc = ShadowCluster(
bob_non_shadower=bob_non_shadower,
alice_shadower=alice_shadower,
admin=admin,
mode=mode,
shadower_sg=shadower_sg,
non_shadower_sg=non_shadower_sg,
source_bucket=source_bucket,
data_bucket=data_bucket,
)
return sc
示例10: test_dcp_reshard_sync_gateway_comes_up
# 需要导入模块: from libraries.testkit.admin import Admin [as 别名]
# 或者: from libraries.testkit.admin.Admin import register_user [as 别名]
def test_dcp_reshard_sync_gateway_comes_up(params_from_base_test_setup, sg_conf):
cluster_conf = params_from_base_test_setup["cluster_config"]
log_info("Running 'test_dcp_reshard_sync_gateway_goes_down'")
log_info("cluster_conf: {}".format(cluster_conf))
log_info("sg_conf: {}".format(sg_conf))
cluster = Cluster(config=cluster_conf)
cluster.reset(sg_config_path=sg_conf)
stop_status = cluster.sg_accels[0].stop()
assert stop_status == 0, "Failed to stop sg_accel"
admin = Admin(cluster.sync_gateways[0])
traun = admin.register_user(target=cluster.sync_gateways[0], db="db", name="traun", password="password", channels=["ABC", "NBC", "CBS"])
seth = admin.register_user(target=cluster.sync_gateways[0], db="db", name="seth", password="password", channels=["FOX"])
log_info(">> Users added")
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
futures = dict()
time.sleep(5)
log_info(">>> Adding Traun docs") # ABC, NBC, CBS
futures[executor.submit(traun.add_docs, 6000)] = "traun"
log_info(">>> Adding Seth docs") # FOX
futures[executor.submit(seth.add_docs, 4000)] = "seth"
# Bring up a sync_gateway
up_status = cluster.sg_accels[0].start(sg_conf)
assert up_status == 0
for future in concurrent.futures.as_completed(futures):
tag = futures[future]
log_info("{} Completed:".format(tag))
# TODO better way to do this
time.sleep(60)
verify_changes(traun, expected_num_docs=6000, expected_num_revisions=0, expected_docs=traun.cache)
verify_changes(seth, expected_num_docs=4000, expected_num_revisions=0, expected_docs=seth.cache)
示例11: test_muliple_users_single_channel
# 需要导入模块: from libraries.testkit.admin import Admin [as 别名]
# 或者: from libraries.testkit.admin.Admin import register_user [as 别名]
def test_muliple_users_single_channel(params_from_base_test_setup, sg_conf_name):
cluster_conf = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
log_info("Running 'muliple_users_single_channel'")
log_info("cluster_conf: {}".format(cluster_conf))
log_info("conf: {}".format(sg_conf))
cluster = Cluster(config=cluster_conf)
cluster.reset(sg_config_path=sg_conf)
sgs = cluster.sync_gateways
num_docs_seth = 1000
num_docs_adam = 2000
num_docs_traun = 3000
admin = Admin(sgs[0])
seth = admin.register_user(target=sgs[0], db="db", name="seth", password="password", channels=["ABC"])
adam = admin.register_user(target=sgs[0], db="db", name="adam", password="password", channels=["ABC"])
traun = admin.register_user(target=sgs[0], db="db", name="traun", password="password", channels=["ABC"])
seth.add_docs(num_docs_seth) # ABC
adam.add_docs(num_docs_adam, bulk=True) # ABC
traun.add_docs(num_docs_traun, bulk=True) # ABC
assert len(seth.cache) == num_docs_seth
assert len(adam.cache) == num_docs_adam
assert len(traun.cache) == num_docs_traun
# discuss appropriate time with team
time.sleep(10)
# Each user should get all docs from all users
all_caches = [seth.cache, adam.cache, traun.cache]
all_docs = {k: v for cache in all_caches for k, v in cache.items()}
verify_changes([seth, adam, traun], expected_num_docs=num_docs_seth + num_docs_adam + num_docs_traun, expected_num_revisions=0, expected_docs=all_docs)
示例12: test_sync_access_sanity
# 需要导入模块: from libraries.testkit.admin import Admin [as 别名]
# 或者: from libraries.testkit.admin.Admin import register_user [as 别名]
def test_sync_access_sanity(params_from_base_test_setup, sg_conf_name):
num_docs = 100
cluster_conf = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
log_info("Running 'sync_access_sanity'")
log_info("Using cluster_conf: {}".format(cluster_conf))
log_info("Using sg_conf: {}".format(sg_conf))
cluster = Cluster(config=cluster_conf)
cluster.reset(sg_config_path=sg_conf)
admin = Admin(cluster.sync_gateways[0])
seth = admin.register_user(target=cluster.sync_gateways[0], db="db", name="seth", password="password")
# Push some ABC docs
abc_doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="abc_doc_pusher", password="password", channels=["ABC"])
abc_doc_pusher.add_docs(num_docs)
# Create access doc pusher and grant access Seth to ABC channel
access_doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="access_doc_pusher", password="password", channels=["access"])
access_doc_pusher.add_doc(doc_id="access_doc", content={"grant_access": "true"})
# Allow docs to backfill
time.sleep(5)
verify_changes(seth, expected_num_docs=num_docs, expected_num_revisions=0, expected_docs=abc_doc_pusher.cache)
# Remove seth from ABC
access_doc_pusher.update_doc(doc_id="access_doc", content={"grant_access": "false"})
# Push more ABC docs
abc_doc_pusher.add_docs(num_docs)
time.sleep(10)
# Verify seth sees no abc_docs
verify_changes(seth, expected_num_docs=0, expected_num_revisions=0, expected_docs={})
示例13: test_sync_sanity_backfill
# 需要导入模块: from libraries.testkit.admin import Admin [as 别名]
# 或者: from libraries.testkit.admin.Admin import register_user [as 别名]
def test_sync_sanity_backfill(params_from_base_test_setup, sg_conf_name):
cluster_conf = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
log_info("Running 'sync_sanity_backfill'")
log_info("Using cluster_conf: {}".format(cluster_conf))
log_info("Using sg_conf: {}".format(sg_conf))
cluster = Cluster(config=cluster_conf)
cluster.reset(sg_config_path=sg_conf)
radio_stations = ["KMOW", "HWOD", "KDWB"]
number_of_docs_per_pusher = 5000
admin = Admin(cluster.sync_gateways[0])
dj_0 = admin.register_user(target=cluster.sync_gateways[0], db="db", name="dj_0", password="password")
kdwb_caches = []
for radio_station in radio_stations:
doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="{}_doc_pusher".format(radio_station), password="password", channels=[radio_station])
doc_pusher.add_docs(number_of_docs_per_pusher, bulk=True)
if doc_pusher.name == "KDWB_doc_pusher":
kdwb_caches.append(doc_pusher.cache)
access_doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="access_doc_pusher", password="password")
# Grant dj_0 access to KDWB channel via sync after docs are pushed
access_doc_pusher.add_doc("access_doc", content="access")
# Build global doc_id, rev dict for all docs from all KDWB caches
kdwb_docs = {k: v for cache in kdwb_caches for k, v in cache.items()}
# wait for changes
time.sleep(5)
verify_changes(dj_0, expected_num_docs=number_of_docs_per_pusher, expected_num_revisions=0, expected_docs=kdwb_docs)
示例14: test_single_user_single_channel
# 需要导入模块: from libraries.testkit.admin import Admin [as 别名]
# 或者: from libraries.testkit.admin.Admin import register_user [as 别名]
def test_single_user_single_channel(params_from_base_test_setup, sg_conf_name):
cluster_conf = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
log_info("Running 'single_user_single_channel'")
log_info("cluster_conf: {}".format(cluster_conf))
log_info("conf: {}".format(sg_conf))
cluster = Cluster(config=cluster_conf)
cluster.reset(sg_config_path=sg_conf)
sgs = cluster.sync_gateways
num_seth_docs = 7000
num_cbs_docs = 3000
admin = Admin(sgs[0])
seth = admin.register_user(target=sgs[0], db="db", name="seth", password="password", channels=["ABC"])
cbs_user = admin.register_user(target=sgs[0], db="db", name="cbs_user", password="password", channels=["CBS"])
admin_user = admin.register_user(target=sgs[0], db="db", name="admin", password="password", channels=["ABC", "CBS"])
seth.add_docs(num_seth_docs)
cbs_user.add_docs(num_cbs_docs)
assert len(seth.cache) == num_seth_docs
assert len(cbs_user.cache) == num_cbs_docs
assert len(admin_user.cache) == 0
time.sleep(10)
verify_changes([seth], expected_num_docs=num_seth_docs, expected_num_revisions=0, expected_docs=seth.cache)
verify_changes([cbs_user], expected_num_docs=num_cbs_docs, expected_num_revisions=0, expected_docs=cbs_user.cache)
all_doc_caches = [seth.cache, cbs_user.cache]
all_docs = {k: v for cache in all_doc_caches for k, v in cache.items()}
verify_changes([admin_user], expected_num_docs=num_cbs_docs + num_seth_docs, expected_num_revisions=0, expected_docs=all_docs)
示例15: test_online_to_offline_changes_feed_controlled_close_longpoll_sanity
# 需要导入模块: from libraries.testkit.admin import Admin [as 别名]
# 或者: from libraries.testkit.admin.Admin import register_user [as 别名]
def test_online_to_offline_changes_feed_controlled_close_longpoll_sanity(params_from_base_test_setup, sg_conf_name, num_docs):
cluster_conf = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
log_info("Using cluster_conf: {}".format(cluster_conf))
log_info("Using sg_conf: {}".format(sg_conf))
log_info("Using num_docs: {}".format(num_docs))
cluster = Cluster(config=cluster_conf)
cluster.reset(sg_config_path=sg_conf)
admin = Admin(cluster.sync_gateways[0])
seth = admin.register_user(target=cluster.sync_gateways[0], db="db", name="seth", password="password", channels=["ABC"])
docs_in_changes = dict()
with concurrent.futures.ThreadPoolExecutor(max_workers=libraries.testkit.settings.MAX_REQUEST_WORKERS) as executor:
futures = dict()
# start longpoll tracking with no timeout, will block until longpoll is closed by db going offline
futures[executor.submit(seth.start_longpoll_changes_tracking, termination_doc_id=None, timeout=0, loop=False)] = "polling"
time.sleep(5)
futures[executor.submit(admin.take_db_offline, "db")] = "db_offline_task"
for future in concurrent.futures.as_completed(futures):
task_name = futures[future]
if task_name == "db_offline_task":
log_info("DB OFFLINE")
# make sure db_offline returns 200
assert future.result() == 200
if task_name == "polling":
# Long poll will exit with 503, return docs in the exception
log_info("POLLING DONE")
try:
docs_in_changes, last_seq_num = future.result()
except Exception as e:
log_info("Longpoll feed close error: {}".format(e))
# long poll should be closed so this exception should never happen
assert 0
# Account for _user doc
# last_seq may be of the form '1' for channel cache or '1-0' for distributed index
seq_num_component = last_seq_num.split("-")
assert 1 == int(seq_num_component[0])
assert len(docs_in_changes) == 0