本文整理汇总了Python中libraries.testkit.admin.Admin.get_cbgt_cfg方法的典型用法代码示例。如果您正苦于以下问题:Python Admin.get_cbgt_cfg方法的具体用法?Python Admin.get_cbgt_cfg怎么用?Python Admin.get_cbgt_cfg使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类libraries.testkit.admin.Admin
的用法示例。
在下文中一共展示了Admin.get_cbgt_cfg方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: save_cbgt_diagnostics
# 需要导入模块: from libraries.testkit.admin import Admin [as 别名]
# 或者: from libraries.testkit.admin.Admin import get_cbgt_cfg [as 别名]
def save_cbgt_diagnostics(self):
# CBGT REST Admin API endpoint
for sync_gateway_writer in self.sg_accels:
adminApi = Admin(sync_gateway_writer)
cbgt_diagnostics = adminApi.get_cbgt_diagnostics()
adminApi.get_cbgt_cfg()
# dump raw diagnostics
pretty_print_json = json.dumps(cbgt_diagnostics, sort_keys=True, indent=4, separators=(',', ': '))
log_info("SG {} CBGT diagnostic output: {}".format(sync_gateway_writer, pretty_print_json))
示例2: validate_cbgt_pindex_distribution
# 需要导入模块: from libraries.testkit.admin import Admin [as 别名]
# 或者: from libraries.testkit.admin.Admin import get_cbgt_cfg [as 别名]
def validate_cbgt_pindex_distribution(self, num_running_sg_accels):
if num_running_sg_accels < 1:
raise keywords.exceptions.ClusterError("Need at least one sg_accel running to verify pindexes")
# build a map of node -> num_pindexes
node_defs_pindex_counts = {}
# CBGT REST Admin API endpoint
adminApi = Admin(self.sg_accels[0])
cbgt_cfg = adminApi.get_cbgt_cfg()
# loop over the planpindexes and update the count for the node where it lives
# this will end up with a dictionary like:
# {'74c818f04b99b169': 32, '11886131c807a30e': 32} (each node uuid has 32 pindexes)
plan_pindexes = cbgt_cfg["planPIndexes"]["planPIndexes"]
for data_bucket_key, data_bucket_val in plan_pindexes.iteritems():
# get the nodes where this pindex lives
nodes = data_bucket_val["nodes"]
# it should only live on one node. if not, abort.
if len(nodes) > 1:
raise Exception("Unexpected: a CBGT Pindex was assigned to more than one node")
# loop over the nodes where this pindex lives and increment the count
for node in nodes:
# add a key for this node if we don't already have one
if node not in node_defs_pindex_counts:
node_defs_pindex_counts[node] = 0
current_pindex_count = node_defs_pindex_counts[node]
current_pindex_count += 1
node_defs_pindex_counts[node] = current_pindex_count
log_info("CBGT node to pindex counts: {}".format(node_defs_pindex_counts))
# make sure number of unique node uuids is equal to the number of sync gateway writers
if len(node_defs_pindex_counts) != num_running_sg_accels:
log_info("CBGT len(unique_node_uuids) != len(self.sync_gateway_writers) ({} != {})".format(
len(node_defs_pindex_counts),
num_running_sg_accels
))
return False
# make sure that all of the nodes have approx the same number of pindexes assigneed to them
i = 0
num_pindex_first_node = 0
for node_def_uuid, num_pindexes in node_defs_pindex_counts.iteritems():
if i == 0:
# it's the first node we've looked at, just record number of pindexes and continue
num_pindex_first_node = num_pindexes
i += 1
continue
# ok, it's the 2nd+ node, make sure the delta with the first node is less than or equal to 1
# (the reason we can't compare for equality is that sometimes the pindexes can't be
# divided evenly across the cluster)
delta = abs(num_pindex_first_node - num_pindexes)
if delta > 1:
log_info("CBGT Sync Gateway node {} has {} pindexes, but other node has {} pindexes.".format(
node_def_uuid,
num_pindexes,
num_pindex_first_node
))
return False
return True