本文整理汇总了Python中libraries.testkit.admin.Admin.bring_db_online方法的典型用法代码示例。如果您正苦于以下问题:Python Admin.bring_db_online方法的具体用法?Python Admin.bring_db_online怎么用?Python Admin.bring_db_online使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类libraries.testkit.admin.Admin
的用法示例。
在下文中一共展示了Admin.bring_db_online方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_offline_true_config_bring_online
# 需要导入模块: from libraries.testkit.admin import Admin [as 别名]
# 或者: from libraries.testkit.admin.Admin import bring_db_online [as 别名]
def test_offline_true_config_bring_online(params_from_base_test_setup, sg_conf_name, num_docs):
cluster_conf = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
log_info("Using cluster_conf: {}".format(cluster_conf))
log_info("Using sg_conf: {}".format(sg_conf))
log_info("Using num_docs: {}".format(num_docs))
cluster = Cluster(config=cluster_conf)
cluster.reset(sg_config_path=sg_conf)
admin = Admin(cluster.sync_gateways[0])
# all db endpoints should fail with 503
errors = rest_scan(cluster.sync_gateways[0], db="db", online=False, num_docs=num_docs, user_name="seth", channels=["ABC"])
assert len(errors) == NUM_ENDPOINTS + (num_docs * 2)
for error_tuple in errors:
log_info("({},{})".format(error_tuple[0], error_tuple[1]))
assert error_tuple[1] == 503
# Scenario 9
# POST /db/_online
status = admin.bring_db_online(db="db")
assert status == 200
# all db endpoints should succeed
errors = rest_scan(cluster.sync_gateways[0], db="db", online=True, num_docs=num_docs, user_name="seth", channels=["ABC"])
assert len(errors) == 0
示例2: test_bucket_online_offline_resync_with_offline
# 需要导入模块: from libraries.testkit.admin import Admin [as 别名]
# 或者: from libraries.testkit.admin.Admin import bring_db_online [as 别名]
#.........这里部分代码省略.........
for doc_id in docs_revision_dict.keys():
rev = docs_revision_dict[doc_id]
log_info('User {} doc_id {} has {} revisions, expected revision: {}'.format(
user_obj.name,
doc_id,
rev,
expected_revision
))
if rev != expected_revision:
rev_errors.append(doc_id)
log_error('User {} doc_id {} got revision {}, expected revision {}'.format(
user_obj.name,
doc_id,
rev,
expected_revision
))
assert len(rev_errors) == 0
# Verify each User created docs are part of changes feed
output = in_parallel(user_objects, 'check_doc_ids_in_changes_feed')
assert True in output.values()
# Take "db" offline
status = admin.take_db_offline(db="db")
assert status == 200
sg_restart_config = sync_gateway_config_path_for_mode("bucket_online_offline/db_online_offline_access_restricted", test_mode)
restart_status = cluster.sync_gateways[0].restart(sg_restart_config)
assert restart_status == 0
log_info("Sleeping....")
time.sleep(10)
pool = ThreadPool(processes=1)
log_info("Restarted SG....")
time.sleep(5)
db_info = admin.get_db_info("db")
log_info("Status of db = {}".format(db_info["state"]))
assert db_info["state"] == "Offline"
try:
async_resync_result = pool.apply_async(admin.db_resync, ("db",))
log_info("resync issued !!!!!!")
except Exception as e:
log_info("Catch resync exception: {}".format(e))
time.sleep(1)
resync_occured = False
for i in range(20):
db_info = admin.get_db_info("db")
log_info("Status of db = {}".format(db_info["state"]))
if db_info["state"] == "Resyncing":
resync_occured = True
log_info("Resync occured")
try:
status = admin.get_db_info(db="db")
log_info("Got db_info request status: {}".format(status))
except HTTPError as e:
log_info("status = {} exception = {}".format(status, e.response.status_code))
assert False
else:
log_info("Got 200 ok for supported operation")
time.sleep(1)
if resync_occured:
break
time.sleep(10)
status = admin.bring_db_online(db="db")
log_info("online request issued !!!!! response status: {}".format(status))
time.sleep(5)
db_info = admin.get_db_info("db")
log_info("Status of db = {}".format(db_info["state"]))
assert db_info["state"] == "Online"
resync_result = async_resync_result.get()
log_info("resync_changes {}".format(resync_result))
log_info("expecting num_changes == num_docs {} * num_users {}".format(num_docs, num_users))
assert resync_result['payload']['changes'] == num_docs * num_users
assert resync_result['status_code'] == 200
time.sleep(5)
global_cache = list()
for user in user_objects:
global_cache.append(user.cache)
all_docs = {k: v for user_cache in global_cache for k, v in user_cache.items()}
verify_changes(user_x, expected_num_docs=expected_docs, expected_num_revisions=num_revisions, expected_docs=all_docs)
end = time.time()
log_info("Test ended.")
log_info("Main test duration: {}".format(end - init_completed))
log_info("Test setup time: {}".format(init_completed - start))
log_info("Total Time taken: {}s".format(end - start))
示例3: test_bucket_online_offline_resync_sanity
# 需要导入模块: from libraries.testkit.admin import Admin [as 别名]
# 或者: from libraries.testkit.admin.Admin import bring_db_online [as 别名]
#.........这里部分代码省略.........
cluster = Cluster(config=cluster_conf)
cluster.reset(sg_conf)
init_completed = time.time()
log_info("Initialization completed. Time taken:{}s".format(init_completed - start))
num_channels = 1
channels = ["channel-" + str(i) for i in range(num_channels)]
password = "password"
sgs = cluster.sync_gateways
admin = Admin(sgs[0])
# Register User
log_info("Register User")
user_objects = admin.register_bulk_users(target=sgs[0], db="db", name_prefix="User",
number=num_users, password=password, channels=channels)
user_x = admin.register_user(target=sgs[0], db="db", name="User-X", password="password", channels=["channel_x"])
# Add User
log_info("Add docs")
in_parallel(user_objects, 'add_docs', num_docs)
# Update docs
log_info("Update docs")
in_parallel(user_objects, 'update_docs', num_revisions)
time.sleep(10)
# Get changes for all users
in_parallel(user_objects, 'get_changes')
# every user should have same number of docs
# total/expected docs = num_users * num_docs
recieved_docs = in_parallel(user_objects, 'get_num_docs')
expected_docs = num_users * num_docs
for user_obj, docs in recieved_docs.items():
log_info('User {} got {} docs, expected docs: {}'.format(user_obj.name, docs, expected_docs))
assert docs == expected_docs
# Verify that
# user created doc-ids exist in docs received in changes feed
# expected revision is equal to received revision
expected_revision = str(num_revisions + 1)
docs_rev_dict = in_parallel(user_objects, 'get_num_revisions')
rev_errors = []
for user_obj, docs_revision_dict in docs_rev_dict.items():
for doc_id in docs_revision_dict.keys():
rev = docs_revision_dict[doc_id]
log_info('User {} doc_id {} has {} revisions, expected revision: {}'.format(user_obj.name,
doc_id, rev, expected_revision))
if rev != expected_revision:
rev_errors.append(doc_id)
log_error('User {} doc_id {} got revision {}, expected revision {}'.format(
user_obj.name,
doc_id,
rev,
expected_revision)
)
assert len(rev_errors) == 0
# Verify each User created docs are part of changes feed
output = in_parallel(user_objects, 'check_doc_ids_in_changes_feed')
assert True in output.values()
# Take "db" offline
status = admin.take_db_offline(db="db")
assert status == 200
sg_restart_config = sync_gateway_config_path_for_mode("bucket_online_offline/db_online_offline_access_restricted", test_mode)
restart_status = cluster.sync_gateways[0].restart(sg_restart_config)
assert restart_status == 0
time.sleep(10)
num_changes = admin.db_resync(db="db")
log_info("expecting num_changes {} == num_docs {} * num_users {}".format(num_changes, num_docs, num_users))
assert num_changes['payload']['changes'] == num_docs * num_users
status = admin.bring_db_online(db="db")
assert status == 200
time.sleep(5)
global_cache = list()
for user in user_objects:
global_cache.append(user.cache)
all_docs = {k: v for user_cache in global_cache for k, v in user_cache.items()}
verify_changes(user_x, expected_num_docs=expected_docs, expected_num_revisions=num_revisions, expected_docs=all_docs)
end = time.time()
log_info("Test ended.")
log_info("Main test duration: {}".format(end - init_completed))
log_info("Test setup time: {}".format(init_completed - start))
log_info("Total Time taken: {}s".format(end - start))
示例4: test_online_to_offline_changes_feed_controlled_close_longpoll
# 需要导入模块: from libraries.testkit.admin import Admin [as 别名]
# 或者: from libraries.testkit.admin.Admin import bring_db_online [as 别名]
def test_online_to_offline_changes_feed_controlled_close_longpoll(params_from_base_test_setup, sg_conf_name, num_docs):
cluster_conf = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
log_info("Using cluster_conf: {}".format(cluster_conf))
log_info("Using sg_conf: {}".format(sg_conf))
log_info("Using num_docs: {}".format(num_docs))
cluster = Cluster(config=cluster_conf)
cluster.reset(sg_config_path=sg_conf)
admin = Admin(cluster.sync_gateways[0])
seth = admin.register_user(target=cluster.sync_gateways[0], db="db", name="seth", password="password", channels=["ABC"])
doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="doc_pusher", password="password", channels=["ABC"])
docs_in_changes = dict()
doc_add_errors = list()
with concurrent.futures.ThreadPoolExecutor(max_workers=libraries.testkit.settings.MAX_REQUEST_WORKERS) as executor:
futures = dict()
futures[executor.submit(seth.start_longpoll_changes_tracking, termination_doc_id=None)] = "polling"
futures[executor.submit(doc_pusher.add_docs, num_docs)] = "docs_push"
time.sleep(5)
futures[executor.submit(admin.take_db_offline, "db")] = "db_offline_task"
for future in concurrent.futures.as_completed(futures):
task_name = futures[future]
if task_name == "db_offline_task":
log_info("DB OFFLINE")
# make sure db_offline returns 200
assert future.result() == 200
if task_name == "docs_push":
log_info("DONE PUSHING DOCS")
doc_add_errors = future.result()
if task_name == "polling":
# Long poll will exit with 503, return docs in the exception
log_info("POLLING DONE")
try:
docs_in_changes = future.result()
except Exception as e:
log_info(e)
log_info("POLLING DONE EXCEPTION")
log_info("ARGS: {}".format(e.args))
docs_in_changes = e.args[0]["docs"]
last_seq_num = e.args[0]["last_seq_num"]
log_info("DOCS FROM longpoll")
for k, v in docs_in_changes.items():
log_info("DFC -> {}:{}".format(k, v))
log_info("LAST_SEQ_NUM FROM longpoll {}".format(last_seq_num))
log_info("Number of docs from _changes ({})".format(len(docs_in_changes)))
log_info("last_seq_num _changes ({})".format(last_seq_num))
log_info("Number of docs add errors ({})".format(len(doc_add_errors)))
# Some docs should have made it to _changes
assert len(docs_in_changes) > 0
# Make sure some docs failed due to db being taken offline
assert len(doc_add_errors) > 0
seq_num_component = last_seq_num.split("-")
if mode == "cc":
# assert the last_seq_number == number _changes + 2 (_user doc starts and one and docs start at _user doc seq + 2)
assert len(docs_in_changes) + 2 == int(seq_num_component[0])
else:
# assert the value is not an empty string
assert last_seq_num != ""
# Bring db back online
status = admin.bring_db_online("db")
assert status == 200
#
# Get all docs that have been pushed
# Verify that changes returns all of them
all_docs = doc_pusher.get_all_docs()
num_docs_pushed = len(all_docs["rows"])
verify_changes(doc_pusher, expected_num_docs=num_docs_pushed, expected_num_revisions=0, expected_docs=doc_pusher.cache)
# Check that the number of errors return when trying to push while db is offline + num of docs in db
# should equal the number of docs
assert num_docs_pushed + len(doc_add_errors) == num_docs
示例5: test_online_to_offline_changes_feed_controlled_close_continuous
# 需要导入模块: from libraries.testkit.admin import Admin [as 别名]
# 或者: from libraries.testkit.admin.Admin import bring_db_online [as 别名]
def test_online_to_offline_changes_feed_controlled_close_continuous(params_from_base_test_setup, sg_conf_name, num_docs):
cluster_conf = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
log_info("Using cluster_conf: {}".format(cluster_conf))
log_info("Using sg_conf: {}".format(sg_conf))
log_info("Using num_docs: {}".format(num_docs))
cluster = Cluster(config=cluster_conf)
cluster.reset(sg_config_path=sg_conf)
admin = Admin(cluster.sync_gateways[0])
seth = admin.register_user(target=cluster.sync_gateways[0], db="db", name="seth", password="password", channels=["ABC"])
doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="doc_pusher", password="password", channels=["ABC"])
docs_in_changes = dict()
doc_add_errors = list()
with concurrent.futures.ThreadPoolExecutor(max_workers=libraries.testkit.settings.MAX_REQUEST_WORKERS) as executor:
futures = dict()
futures[executor.submit(seth.start_continuous_changes_tracking, termination_doc_id=None)] = "continuous"
futures[executor.submit(doc_pusher.add_docs, num_docs)] = "docs_push"
time.sleep(5)
futures[executor.submit(admin.take_db_offline, "db")] = "db_offline_task"
for future in concurrent.futures.as_completed(futures):
task_name = futures[future]
if task_name == "db_offline_task":
log_info("DB OFFLINE")
# make sure db_offline returns 200
assert future.result() == 200
elif task_name == "docs_push":
log_info("DONE PUSHING DOCS")
doc_add_errors = future.result()
elif task_name == "continuous":
docs_in_changes = future.result()
log_info("DOCS FROM CHANGES")
for k, v in docs_in_changes.items():
log_info("DFC -> {}:{}".format(k, v))
log_info("Number of docs from _changes ({})".format(len(docs_in_changes)))
log_info("Number of docs add errors ({})".format(len(doc_add_errors)))
# Some docs should have made it to _changes
assert len(docs_in_changes) > 0
# Bring db back online
status = admin.bring_db_online("db")
assert status == 200
# Get all docs that have been pushed
# Verify that changes returns all of them
all_docs = doc_pusher.get_all_docs()
num_docs_pushed = len(all_docs["rows"])
verify_changes(doc_pusher, expected_num_docs=num_docs_pushed, expected_num_revisions=0, expected_docs=doc_pusher.cache)
# Check that the number of errors return when trying to push while db is offline + num of docs in db
# should equal the number of docs
assert num_docs_pushed + len(doc_add_errors) == num_docs
示例6: test_db_online_offline_webhooks_offline
# 需要导入模块: from libraries.testkit.admin import Admin [as 别名]
# 或者: from libraries.testkit.admin.Admin import bring_db_online [as 别名]
def test_db_online_offline_webhooks_offline(params_from_base_test_setup, sg_conf_name, num_users, num_channels, num_docs, num_revisions):
start = time.time()
cluster_conf = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
log_info("Running 'test_db_online_offline_webhooks_offline'")
log_info("Using cluster_conf: {}".format(cluster_conf))
log_info("Using num_users: {}".format(num_users))
log_info("Using num_channels: {}".format(num_channels))
log_info("Using num_docs: {}".format(num_docs))
log_info("Using num_revisions: {}".format(num_revisions))
cluster = Cluster(config=cluster_conf)
cluster.reset(sg_conf)
init_completed = time.time()
log_info("Initialization completed. Time taken:{}s".format(init_completed - start))
channels = ["channel-" + str(i) for i in range(num_channels)]
password = "password"
ws = WebServer()
ws.start()
sgs = cluster.sync_gateways
admin = Admin(sgs[0])
# Register User
log_info("Register User")
user_objects = admin.register_bulk_users(target=sgs[0], db="db", name_prefix="User",
number=num_users, password=password, channels=channels)
# Add User
log_info("Add docs")
in_parallel(user_objects, 'add_docs', num_docs)
# Update docs
log_info("Update docs")
in_parallel(user_objects, 'update_docs', num_revisions)
time.sleep(10)
admin.take_db_offline("db")
time.sleep(5)
db_info = admin.get_db_info("db")
log_info("Expecting db state {} found db state {}".format("Offline", db_info['state']))
assert db_info["state"] == "Offline"
webhook_events = ws.get_data()
time.sleep(5)
log_info("webhook event {}".format(webhook_events))
last_event = webhook_events[-1]
assert last_event['state'] == 'offline'
admin.bring_db_online("db")
time.sleep(5)
db_info = admin.get_db_info("db")
log_info("Expecting db state {} found db state {}".format("Online", db_info['state']))
assert db_info["state"] == "Online"
webhook_events = ws.get_data()
last_event = webhook_events[-1]
assert last_event['state'] == 'online'
time.sleep(10)
log_info("webhook event {}".format(webhook_events))
ws.stop()