本文整理汇总了Python中libraries.testkit.cluster.Cluster.validate_cbgt_pindex_distribution方法的典型用法代码示例。如果您正苦于以下问题:Python Cluster.validate_cbgt_pindex_distribution方法的具体用法?Python Cluster.validate_cbgt_pindex_distribution怎么用?Python Cluster.validate_cbgt_pindex_distribution使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类libraries.testkit.cluster.Cluster
的用法示例。
在下文中一共展示了Cluster.validate_cbgt_pindex_distribution方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_take_down_bring_up_sg_accel_validate_cbgt
# 需要导入模块: from libraries.testkit.cluster import Cluster [as 别名]
# 或者: from libraries.testkit.cluster.Cluster import validate_cbgt_pindex_distribution [as 别名]
def test_take_down_bring_up_sg_accel_validate_cbgt(params_from_base_test_setup, sg_conf):
"""
Scenario 1
Start with 3 sg_accels
Take down 2 sg_accels (block until down -- poll port if needed)
Doc adds with uuids (~30 sec for cbgt to reshard)
polling loop: wait for all docs to come back over changes feed
Call validate pindex with correct number of accels
Scenario 2 (Continuation)
When bringing up, you'd have to poll the cbgt_cfg until you get expected number of nodes,
then you could validate the pindex with 2 accels
"""
cluster_conf = params_from_base_test_setup["cluster_config"]
log_info("Running 'test_dcp_reshard_single_sg_accel_goes_down_and_up'")
log_info("cluster_conf: {}".format(cluster_conf))
log_info("sg_conf: {}".format(sg_conf))
cluster = Cluster(config=cluster_conf)
cluster.reset(sg_config_path=sg_conf)
cluster_util = ClusterKeywords()
topology = cluster_util.get_cluster_topology(cluster_conf)
sg_url = topology["sync_gateways"][0]["public"]
sg_admin_url = topology["sync_gateways"][0]["admin"]
sg_db = "db"
client = MobileRestClient()
doc_pusher_user_info = userinfo.UserInfo("doc_pusher", "pass", channels=["A"], roles=[])
doc_pusher_auth = client.create_user(
url=sg_admin_url,
db=sg_db,
name=doc_pusher_user_info.name,
password=doc_pusher_user_info.password,
channels=doc_pusher_user_info.channels
)
log_info("Shutting down sg_accels: [{}, {}]".format(cluster.sg_accels[1], cluster.sg_accels[2]))
# Shutdown two accel nodes in parallel
with concurrent.futures.ThreadPoolExecutor(max_workers=3) as ex:
sg_accel_down_task_1 = ex.submit(cluster.sg_accels[1].stop)
sg_accel_down_task_2 = ex.submit(cluster.sg_accels[2].stop)
assert sg_accel_down_task_1.result() == 0
assert sg_accel_down_task_2.result() == 0
log_info("Finished taking nodes down!")
# It should take some time ~30 for cbgt to pick up failing nodes and reshard the pindexes. During
# this add a 1000 docs a start a longpoll changes loop to see if those docs make to to the changes feed
# If the reshard is successful they will show up at somepoint after. If not, the docs will fail to show up.
doc_pusher_docs = client.add_docs(
url=sg_url,
db=sg_db,
number=1000,
id_prefix=None,
auth=doc_pusher_auth,
channels=doc_pusher_user_info.channels
)
assert len(doc_pusher_docs) == 1000
client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=doc_pusher_docs, auth=doc_pusher_auth, polling_interval=5)
# The pindexes should be reshared at this point since all of the changes have shown up
assert cluster.validate_cbgt_pindex_distribution(num_running_sg_accels=1)
log_info("Start sg_accels: [{}, {}]".format(cluster.sg_accels[1], cluster.sg_accels[2]))
# Start two accel nodes in parallel
status = cluster.sg_accels[1].start(sg_conf)
assert status == 0
# Poll on pIndex reshard after bring 2 accel nodes back
assert cluster.validate_cbgt_pindex_distribution_retry(num_running_sg_accels=2)
status = cluster.sg_accels[2].start(sg_conf)
assert status == 0
# Poll on pIndex reshard after bring 2 accel nodes back
assert cluster.validate_cbgt_pindex_distribution_retry(num_running_sg_accels=3)