本文整理汇总了Python中libraries.testkit.admin.Admin.take_db_offline方法的典型用法代码示例。如果您正苦于以下问题:Python Admin.take_db_offline方法的具体用法?Python Admin.take_db_offline怎么用?Python Admin.take_db_offline使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类libraries.testkit.admin.Admin
的用法示例。
在下文中一共展示了Admin.take_db_offline方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_online_to_offline_check_503
# 需要导入模块: from libraries.testkit.admin import Admin [as 别名]
# 或者: from libraries.testkit.admin.Admin import take_db_offline [as 别名]
def test_online_to_offline_check_503(params_from_base_test_setup, sg_conf_name, num_docs):
cluster_conf = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
log_info("Using cluster_conf: {}".format(cluster_conf))
log_info("Using sg_conf: {}".format(sg_conf))
log_info("Using num_docs: {}".format(num_docs))
cluster = Cluster(config=cluster_conf)
cluster.reset(sg_config_path=sg_conf)
admin = Admin(cluster.sync_gateways[0])
# all db endpoints should function as expected
errors = rest_scan(cluster.sync_gateways[0], db="db", online=True, num_docs=num_docs, user_name="seth", channels=["ABC"])
assert len(errors) == 0
# Take bucket offline
status = admin.take_db_offline(db="db")
assert status == 200
# all db endpoints should return 503
errors = rest_scan(cluster.sync_gateways[0], db="db", online=False, num_docs=num_docs, user_name="seth", channels=["ABC"])
# We hit NUM_ENDPOINT unique REST endpoints + num of doc PUT failures
assert len(errors) == NUM_ENDPOINTS + (num_docs * 2)
for error_tuple in errors:
log_info("({},{})".format(error_tuple[0], error_tuple[1]))
assert error_tuple[1] == 503
示例2: test_db_delayed_online
# 需要导入模块: from libraries.testkit.admin import Admin [as 别名]
# 或者: from libraries.testkit.admin.Admin import take_db_offline [as 别名]
def test_db_delayed_online(params_from_base_test_setup, sg_conf_name, num_docs):
cluster_conf = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
log_info("Using cluster_conf: {}".format(cluster_conf))
log_info("Using sg_conf: {}".format(sg_conf))
log_info("Using num_docs: {}".format(num_docs))
cluster = Cluster(config=cluster_conf)
cluster.reset(sg_config_path=sg_conf)
admin = Admin(cluster.sync_gateways[0])
time.sleep(2)
status = admin.take_db_offline("db")
log_info("offline request response status: {}".format(status))
time.sleep(10)
pool = ThreadPool(processes=1)
db_info = admin.get_db_info("db")
assert db_info["state"] == "Offline"
async_result = pool.apply_async(admin.bring_db_online, ("db", 15,))
status = async_result.get(timeout=15)
log_info("offline request response status: {}".format(status))
time.sleep(20)
db_info = admin.get_db_info("db")
assert db_info["state"] == "Online"
# all db rest enpoints should succeed
errors = rest_scan(cluster.sync_gateways[0], db="db", online=True, num_docs=num_docs, user_name="seth", channels=["ABC"])
assert len(errors) == 0
示例3: test_bucket_online_offline_resync_with_offline
# 需要导入模块: from libraries.testkit.admin import Admin [as 别名]
# 或者: from libraries.testkit.admin.Admin import take_db_offline [as 别名]
def test_bucket_online_offline_resync_with_offline(params_from_base_test_setup, sg_conf_name, num_users, num_docs, num_revisions):
start = time.time()
cluster_conf = params_from_base_test_setup["cluster_config"]
test_mode = params_from_base_test_setup["mode"]
if test_mode == "di":
pytest.skip("Unsupported feature in distributed index")
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, test_mode)
log_info("Running 'test_bucket_online_offline_resync_with_online'")
log_info("Using cluster_conf: {}".format(cluster_conf))
log_info("Using num_users: {}".format(num_users))
log_info("Using num_docs: {}".format(num_docs))
log_info("Using num_revisions: {}".format(num_revisions))
cluster = Cluster(config=cluster_conf)
cluster.reset(sg_conf)
init_completed = time.time()
log_info("Initialization completed. Time taken:{}s".format(init_completed - start))
num_channels = 1
channels = ["channel-" + str(i) for i in range(num_channels)]
password = "password"
sgs = cluster.sync_gateways
admin = Admin(sgs[0])
# Register User
log_info("Register User")
user_objects = admin.register_bulk_users(target=sgs[0], db="db", name_prefix="User",
number=num_users, password=password, channels=channels)
user_x = admin.register_user(target=sgs[0], db="db", name="User-X", password="password", channels=["channel_x"])
# Add User
log_info("Add docs")
in_parallel(user_objects, 'add_docs', num_docs)
# Update docs
log_info("Update docs")
in_parallel(user_objects, 'update_docs', num_revisions)
time.sleep(10)
# Get changes for all users
in_parallel(user_objects, 'get_changes')
# every user should have same number of docs
# total/expected docs = num_users * num_docs
recieved_docs = in_parallel(user_objects, 'get_num_docs')
expected_docs = num_users * num_docs
for user_obj, docs in recieved_docs.items():
log_info('User {} got {} docs, expected docs: {}'.format(user_obj.name, docs, expected_docs))
assert docs == expected_docs
# Verify that
# user created doc-ids exist in docs received in changes feed
# expected revision is equal to received revision
expected_revision = str(num_revisions + 1)
docs_rev_dict = in_parallel(user_objects, 'get_num_revisions')
rev_errors = []
for user_obj, docs_revision_dict in docs_rev_dict.items():
for doc_id in docs_revision_dict.keys():
rev = docs_revision_dict[doc_id]
log_info('User {} doc_id {} has {} revisions, expected revision: {}'.format(
user_obj.name,
doc_id,
rev,
expected_revision
))
if rev != expected_revision:
rev_errors.append(doc_id)
log_error('User {} doc_id {} got revision {}, expected revision {}'.format(
user_obj.name,
doc_id,
rev,
expected_revision
))
assert len(rev_errors) == 0
# Verify each User created docs are part of changes feed
output = in_parallel(user_objects, 'check_doc_ids_in_changes_feed')
assert True in output.values()
# Take "db" offline
status = admin.take_db_offline(db="db")
assert status == 200
sg_restart_config = sync_gateway_config_path_for_mode("bucket_online_offline/db_online_offline_access_restricted", test_mode)
restart_status = cluster.sync_gateways[0].restart(sg_restart_config)
assert restart_status == 0
log_info("Sleeping....")
time.sleep(10)
pool = ThreadPool(processes=1)
#.........这里部分代码省略.........
示例4: test_bucket_online_offline_resync_sanity
# 需要导入模块: from libraries.testkit.admin import Admin [as 别名]
# 或者: from libraries.testkit.admin.Admin import take_db_offline [as 别名]
def test_bucket_online_offline_resync_sanity(params_from_base_test_setup, sg_conf_name, num_users, num_docs, num_revisions):
cluster_conf = params_from_base_test_setup["cluster_config"]
test_mode = params_from_base_test_setup["mode"]
if test_mode == "di":
pytest.skip("Unsupported feature in distributed index")
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, test_mode)
log_info("Running 'test_bucket_online_offline_resync_sanity'")
log_info("Using cluster_conf: {}".format(cluster_conf))
log_info("Using num_users: {}".format(num_users))
log_info("Using num_docs: {}".format(num_docs))
log_info("Using num_revisions: {}".format(num_revisions))
start = time.time()
cluster = Cluster(config=cluster_conf)
cluster.reset(sg_conf)
init_completed = time.time()
log_info("Initialization completed. Time taken:{}s".format(init_completed - start))
num_channels = 1
channels = ["channel-" + str(i) for i in range(num_channels)]
password = "password"
sgs = cluster.sync_gateways
admin = Admin(sgs[0])
# Register User
log_info("Register User")
user_objects = admin.register_bulk_users(target=sgs[0], db="db", name_prefix="User",
number=num_users, password=password, channels=channels)
user_x = admin.register_user(target=sgs[0], db="db", name="User-X", password="password", channels=["channel_x"])
# Add User
log_info("Add docs")
in_parallel(user_objects, 'add_docs', num_docs)
# Update docs
log_info("Update docs")
in_parallel(user_objects, 'update_docs', num_revisions)
time.sleep(10)
# Get changes for all users
in_parallel(user_objects, 'get_changes')
# every user should have same number of docs
# total/expected docs = num_users * num_docs
recieved_docs = in_parallel(user_objects, 'get_num_docs')
expected_docs = num_users * num_docs
for user_obj, docs in recieved_docs.items():
log_info('User {} got {} docs, expected docs: {}'.format(user_obj.name, docs, expected_docs))
assert docs == expected_docs
# Verify that
# user created doc-ids exist in docs received in changes feed
# expected revision is equal to received revision
expected_revision = str(num_revisions + 1)
docs_rev_dict = in_parallel(user_objects, 'get_num_revisions')
rev_errors = []
for user_obj, docs_revision_dict in docs_rev_dict.items():
for doc_id in docs_revision_dict.keys():
rev = docs_revision_dict[doc_id]
log_info('User {} doc_id {} has {} revisions, expected revision: {}'.format(user_obj.name,
doc_id, rev, expected_revision))
if rev != expected_revision:
rev_errors.append(doc_id)
log_error('User {} doc_id {} got revision {}, expected revision {}'.format(
user_obj.name,
doc_id,
rev,
expected_revision)
)
assert len(rev_errors) == 0
# Verify each User created docs are part of changes feed
output = in_parallel(user_objects, 'check_doc_ids_in_changes_feed')
assert True in output.values()
# Take "db" offline
status = admin.take_db_offline(db="db")
assert status == 200
sg_restart_config = sync_gateway_config_path_for_mode("bucket_online_offline/db_online_offline_access_restricted", test_mode)
restart_status = cluster.sync_gateways[0].restart(sg_restart_config)
assert restart_status == 0
time.sleep(10)
num_changes = admin.db_resync(db="db")
log_info("expecting num_changes {} == num_docs {} * num_users {}".format(num_changes, num_docs, num_users))
assert num_changes['payload']['changes'] == num_docs * num_users
#.........这里部分代码省略.........
示例5: test_db_online_offline_webhooks_offline
# 需要导入模块: from libraries.testkit.admin import Admin [as 别名]
# 或者: from libraries.testkit.admin.Admin import take_db_offline [as 别名]
def test_db_online_offline_webhooks_offline(params_from_base_test_setup, sg_conf_name, num_users, num_channels, num_docs, num_revisions):
start = time.time()
cluster_conf = params_from_base_test_setup["cluster_config"]
mode = params_from_base_test_setup["mode"]
sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
log_info("Running 'test_db_online_offline_webhooks_offline'")
log_info("Using cluster_conf: {}".format(cluster_conf))
log_info("Using num_users: {}".format(num_users))
log_info("Using num_channels: {}".format(num_channels))
log_info("Using num_docs: {}".format(num_docs))
log_info("Using num_revisions: {}".format(num_revisions))
cluster = Cluster(config=cluster_conf)
cluster.reset(sg_conf)
init_completed = time.time()
log_info("Initialization completed. Time taken:{}s".format(init_completed - start))
channels = ["channel-" + str(i) for i in range(num_channels)]
password = "password"
ws = WebServer()
ws.start()
sgs = cluster.sync_gateways
admin = Admin(sgs[0])
# Register User
log_info("Register User")
user_objects = admin.register_bulk_users(target=sgs[0], db="db", name_prefix="User",
number=num_users, password=password, channels=channels)
# Add User
log_info("Add docs")
in_parallel(user_objects, 'add_docs', num_docs)
# Update docs
log_info("Update docs")
in_parallel(user_objects, 'update_docs', num_revisions)
time.sleep(10)
admin.take_db_offline("db")
time.sleep(5)
db_info = admin.get_db_info("db")
log_info("Expecting db state {} found db state {}".format("Offline", db_info['state']))
assert db_info["state"] == "Offline"
webhook_events = ws.get_data()
time.sleep(5)
log_info("webhook event {}".format(webhook_events))
last_event = webhook_events[-1]
assert last_event['state'] == 'offline'
admin.bring_db_online("db")
time.sleep(5)
db_info = admin.get_db_info("db")
log_info("Expecting db state {} found db state {}".format("Online", db_info['state']))
assert db_info["state"] == "Online"
webhook_events = ws.get_data()
last_event = webhook_events[-1]
assert last_event['state'] == 'online'
time.sleep(10)
log_info("webhook event {}".format(webhook_events))
ws.stop()