本文整理汇总了Python中daos_api.DaosPool.pool_query方法的典型用法代码示例。如果您正苦于以下问题:Python DaosPool.pool_query方法的具体用法?Python DaosPool.pool_query怎么用?Python DaosPool.pool_query使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类daos_api.DaosPool
的用法示例。
在下文中一共展示了DaosPool.pool_query方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: check_handle
# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import pool_query [as 别名]
def check_handle(buf_len, iov_len, buf, uuidstr, rank):
"""
This gets run in a child process and verifyes the global
handle can be turned into a local handle in another process.
"""
try:
# get paths from the build_vars generated by build
with open('../../../.build_vars.json') as build_file:
build_paths = json.load(build_file)
# setup the DAOS python API in this process
context = DaosContext(build_paths['PREFIX'] + '/lib/')
pool = DaosPool(context)
pool.set_uuid_str(uuidstr)
pool.set_svc(rank)
pool.group = "daos_server"
# note that the handle is stored inside the pool as well
dummy_local_handle = pool.global2local(context, iov_len, buf_len, buf)
# perform some operations that will use the new handle
pool.pool_query()
container = DaosContainer(context)
container.create(pool.handle)
except DaosApiError as excep:
print(excep)
print(traceback.format_exc())
raise
return
示例2: test_rebuild_with_io
# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import pool_query [as 别名]
def test_rebuild_with_io(self):
"""
Test ID: Rebuild-003
Test Description: Trigger a rebuild while I/O is ongoing.
Use Cases:
-- single pool, single client performing continous read/write/verify
sequence while failure/rebuild is triggered in another process
:avocado: tags=pool,rebuild,rebuildwithio
"""
# the rebuild tests need to redo this stuff each time so not in setup
# as it usually would be
server_group = self.params.get("name", '/server_config/',
'daos_server')
basepath = os.path.normpath(self.build_paths['PREFIX'] + "/../")
self.hostlist = self.params.get("test_machines", '/run/hosts/')
hostfile = write_host_file.write_host_file(self.hostlist, self.workdir)
try:
self.agent_sessions = AgentUtils.run_agent(basepath, self.hostlist)
server_utils.run_server(hostfile, server_group, basepath)
# use the uid/gid of the user running the test, these should
# be perfectly valid
createuid = os.geteuid()
creategid = os.getegid()
# parameters used in pool create that are in yaml
createmode = self.params.get("mode", '/run/testparams/createmode/')
createsetid = self.params.get("setname",
'/run/testparams/createset/')
createsize = self.params.get("size", '/run/testparams/createsize/')
# initialize a python pool object then create the underlying
# daos storage
pool = DaosPool(self.context)
pool.create(createmode, createuid, creategid,
createsize, createsetid, None)
pool.connect(1 << 1)
container = DaosContainer(self.context)
container.create(pool.handle)
container.open()
# get pool status and make sure it all looks good before we start
pool.pool_query()
if pool.pool_info.pi_ndisabled != 0:
self.fail("Number of disabled targets reporting incorrectly.\n")
if pool.pool_info.pi_rebuild_st.rs_errno != 0:
self.fail("Rebuild error but rebuild hasn't run.\n")
if pool.pool_info.pi_rebuild_st.rs_done != 1:
self.fail("Rebuild is running but device hasn't failed yet.\n")
if pool.pool_info.pi_rebuild_st.rs_obj_nr != 0:
self.fail("Rebuilt objs not zero.\n")
if pool.pool_info.pi_rebuild_st.rs_rec_nr != 0:
self.fail("Rebuilt recs not zero.\n")
dummy_pool_version = pool.pool_info.pi_rebuild_st.rs_version
# do I/O for 30 seconds
dummy_bw = io_utilities.continuous_io(container, 30)
# trigger the rebuild
rank = self.params.get("rank", '/run/testparams/ranks/*')
server = DaosServer(self.context, server_group, rank)
server.kill(1)
pool.exclude([rank])
# do another 30 seconds of I/O,
# waiting for some improvements in server bootstrap
# at which point we can move the I/O to a separate client and
# really pound it with I/O
dummy_bw = io_utilities.continuous_io(container, 30)
# wait for the rebuild to finish
while True:
pool.pool_query()
if pool.pool_info.pi_rebuild_st.rs_done == 1:
break
else:
time.sleep(2)
# check rebuild statistics
if pool.pool_info.pi_ndisabled != 1:
self.fail("Number of disabled targets reporting incorrectly: {}"
.format(pool.pool_info.pi_ndisabled))
if pool.pool_info.pi_rebuild_st.rs_errno != 0:
self.fail("Rebuild error reported: {}".format(
pool.pool_info.pi_rebuild_st.rs_errno))
if pool.pool_info.pi_rebuild_st.rs_obj_nr <= 0:
self.fail("No objects have been rebuilt.")
if pool.pool_info.pi_rebuild_st.rs_rec_nr <= 0:
self.fail("No records have been rebuilt.")
except (ValueError, DaosApiError) as excep:
print(excep)
print(traceback.format_exc())
#.........这里部分代码省略.........
示例3: FullPoolContainerCreate
# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import pool_query [as 别名]
class FullPoolContainerCreate(Test):
"""
Class for test to create a container in a pool with no remaining free space.
"""
def setUp(self):
self.agent_sessions = None
# get paths from the build_vars generated by build
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"../../../../.build_vars.json")) as build_file:
build_paths = json.load(build_file)
self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")
self.server_group = self.params.get("name", '/server_config/',
'daos_default_oops')
self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
self.cont = None
self.cont2 = None
self.pool = DaosPool(self.context)
self.d_log = DaosLog(self.context)
self.hostlist = self.params.get("test_machines1", '/hosts/')
self.hostfile = write_host_file.write_host_file(self.hostlist,
self.workdir)
self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist)
server_utils.run_server(self.hostfile, self.server_group, self.basepath)
def tearDown(self):
# shut 'er down
"""
wrap pool destroy in a try; in case pool create didn't succeed, we
still need the server to be shut down in any case
"""
try:
self.pool.destroy(1)
finally:
if self.agent_sessions:
AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
server_utils.stop_server(hosts=self.hostlist)
def test_no_space_cont_create(self):
"""
:avocado: tags=pool,cont,fullpoolcontcreate,small,vm
"""
# full storage rc
err = "-1007"
# probably should be -1007, revisit later
err2 = "-1009"
# create pool
mode = self.params.get("mode", '/conttests/createmode/')
self.d_log.debug("mode is {0}".format(mode))
uid = os.geteuid()
gid = os.getegid()
# 16 mb pool, minimum size currently possible
size = 16777216
self.d_log.debug("creating pool")
self.pool.create(mode, uid, gid, size, self.server_group, None)
self.d_log.debug("created pool")
# connect to the pool
self.d_log.debug("connecting to pool")
self.pool.connect(1 << 1)
self.d_log.debug("connected to pool")
# query the pool
self.d_log.debug("querying pool info")
dummy_pool_info = self.pool.pool_query()
self.d_log.debug("queried pool info")
# create a container
try:
self.d_log.debug("creating container")
self.cont = DaosContainer(self.context)
self.cont.create(self.pool.handle)
self.d_log.debug("created container")
except DaosApiError as excep:
self.d_log.error("caught exception creating container: "
"{0}".format(excep))
self.fail("caught exception creating container: {0}".format(excep))
self.d_log.debug("opening container")
self.cont.open()
self.d_log.debug("opened container")
# generate random dkey, akey each time
# write 1mb until no space, then 1kb, etc. to fill pool quickly
for obj_sz in [1048576, 1024, 1]:
write_count = 0
while True:
self.d_log.debug("writing obj {0}, sz {1} to "
"container".format(write_count, obj_sz))
my_str = "a" * obj_sz
my_str_sz = obj_sz
dkey = (
''.join(random.choice(string.lowercase) for i in range(5)))
akey = (
''.join(random.choice(string.lowercase) for i in range(5)))
#.........这里部分代码省略.........
示例4: RebuildNoCap
# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import pool_query [as 别名]
class RebuildNoCap(Test):
"""
Test Class Description:
This class contains tests for pool rebuild.
:avocado: tags=pool,rebuild,nocap
"""
def setUp(self):
""" setup for the test """
self.agent_sessions = None
# get paths from the build_vars generated by build
with open('../../../.build_vars.json') as build_file:
build_paths = json.load(build_file)
self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")
# generate a hostfile
self.hostlist = self.params.get("test_machines", '/run/hosts/')
self.hostfile = write_host_file.write_host_file(self.hostlist,
self.workdir)
# fire up the DAOS servers
self.server_group = self.params.get("name", '/run/server_config/',
'daos_server')
self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist)
server_utils.run_server(self.hostfile, self.server_group,
build_paths['PREFIX'] + '/../')
# create a pool to test with
createmode = self.params.get("mode", '/run/pool/createmode/')
createuid = self.params.get("uid", '/run/pool/createuid/')
creategid = self.params.get("gid", '/run/pool/creategid/')
createsetid = self.params.get("setname", '/run/pool/createset/')
createsize = self.params.get("size", '/run/pool/createsize/')
self.pool = DaosPool(self.context)
self.pool.create(createmode, createuid, creategid, createsize,
createsetid)
uuid = self.pool.get_uuid_str()
time.sleep(2)
# stuff some bogus data into the pool
how_many_bytes = long(self.params.get("datasize",
'/run/testparams/datatowrite/'))
exepath = os.path.join(build_paths['PREFIX'],
"/../src/tests/ftest/util/write_some_data.py")
cmd = "export DAOS_POOL={0}; export DAOS_SVCL=1; mpirun"\
" --np 1 --host {1} {2} {3} testfile".format(
uuid, self.hostlist[0], exepath, how_many_bytes)
subprocess.call(cmd, shell=True)
def tearDown(self):
""" cleanup after the test """
try:
os.remove(self.hostfile)
if self.pool:
self.pool.destroy(1)
finally:
if self.agent_sessions:
AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
server_utils.stop_server(hosts=self.hostlist)
def test_rebuild_no_capacity(self):
"""
:avocado: tags=pool,rebuild,nocap
"""
try:
print("\nsetup complete, starting test\n")
# create a server object that references on of our pool target hosts
# and then kill it
svr_to_kill = int(self.params.get("rank_to_kill",
'/run/testparams/ranks/'))
d_server = DaosServer(self.context, bytes(self.server_group),
svr_to_kill)
time.sleep(1)
d_server.kill(1)
# exclude the target from the dead server
self.pool.exclude([svr_to_kill])
# exclude should trigger rebuild, check
self.pool.connect(1 << 1)
status = self.pool.pool_query()
if not status.pi_ntargets == len(self.hostlist):
self.fail("target count wrong.\n")
if not status.pi_ndisabled == 1:
self.fail("disabled target count wrong.\n")
# the pool should be too full to start a rebuild so
# expecting an error
# not sure yet specifically what error
if status.pi_rebuild_st.rs_errno == 0:
self.fail("expecting rebuild to fail but it didn't.\n")
except DaosApiError as excep:
#.........这里部分代码省略.........
示例5: test_simple_rebuild
# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import pool_query [as 别名]
def test_simple_rebuild(self):
"""
Test ID: Rebuild-001
Test Description: The most basic rebuild test.
Use Cases:
-- single pool rebuild, single client, various reord/object
counts
:avocado: tags=pool,rebuild,rebuildsimple
"""
try:
# initialize a python pool object then create the underlying
# daos storage
pool = DaosPool(self.context)
pool.create(self.createmode, self.createuid, self.creategid,
self.createsize, self.createsetid)
# want an open connection during rebuild
pool.connect(1 << 1)
# get pool status we want to test later
pool.pool_query()
if pool.pool_info.pi_ndisabled != 0:
self.fail("Number of disabled targets reporting incorrectly.\n")
if pool.pool_info.pi_rebuild_st.rs_errno != 0:
self.fail("Rebuild error but rebuild hasn't run.\n")
if pool.pool_info.pi_rebuild_st.rs_done != 1:
self.fail("Rebuild is running but device hasn't failed yet.\n")
if pool.pool_info.pi_rebuild_st.rs_obj_nr != 0:
self.fail("Rebuilt objs not zero.\n")
if pool.pool_info.pi_rebuild_st.rs_rec_nr != 0:
self.fail("Rebuilt recs not zero.\n")
# create a container
container = DaosContainer(self.context)
container.create(pool.handle)
# now open it
container.open()
saved_data = []
for _objc in range(self.objcount):
obj = None
for _recc in range(self.reccount):
# make some stuff up and write
dkey = (
''.join(random.choice(string.ascii_uppercase +
string.digits) for _ in range(5)))
akey = (
''.join(random.choice(string.ascii_uppercase +
string.digits) for _ in range(5)))
data = (''.join(random.choice(string.ascii_uppercase +
string.digits)
for _ in range(self.size)))
obj, txn = container.write_an_obj(data, len(data), dkey,
akey, obj, self.rank,
obj_cls=16)
saved_data.append((obj, dkey, akey, data, txn))
# read the data back and make sure its correct
data2 = container.read_an_obj(self.size, dkey, akey, obj,
txn)
if data != data2.value:
self.fail("Write data 1, read it back, didn't match\n")
# kill a server that has
server = DaosServer(self.context, self.server_group, self.rank)
server.kill(1)
# temporarily, the exclude of a failed target must be done manually
pool.exclude([self.rank])
while True:
# get the pool/rebuild status again
pool.pool_query()
if pool.pool_info.pi_rebuild_st.rs_done == 1:
break
else:
time.sleep(2)
if pool.pool_info.pi_ndisabled != 1:
self.fail("Number of disabled targets reporting incorrectly: {}"
.format(pool.pool_info.pi_ndisabled))
if pool.pool_info.pi_rebuild_st.rs_errno != 0:
self.fail("Rebuild error reported: {}"
.format(pool.pool_info.pi_rebuild_st.rs_errno))
if pool.pool_info.pi_rebuild_st.rs_obj_nr != self.objcount:
self.fail("Rebuilt objs not as expected: {0} {1}"
.format(pool.pool_info.pi_rebuild_st.rs_obj_nr,
self.objcount))
if (pool.pool_info.pi_rebuild_st.rs_rec_nr !=
(self.reccount*self.objcount)):
self.fail("Rebuilt recs not as expected: {0} {1}"
.format(pool.pool_info.pi_rebuild_st.rs_rec_nr,
self.reccount*self.objcount))
#.........这里部分代码省略.........
示例6: test_multipool_rebuild
# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import pool_query [as 别名]
def test_multipool_rebuild(self):
"""
Test ID: Rebuild-002
Test Description: Expand on the basic test by rebuilding 2
pools at once.
Use Cases:
-- multipool rebuild, single client, various object and record counds
:avocado: tags=pool,rebuild,rebuildmulti
"""
try:
# initialize python pool object then create the underlying
# daos storage, the way the code is now the pools should be
# on the same storage and have the same service leader
pool1 = DaosPool(self.context)
pool2 = DaosPool(self.context)
pool1.create(self.createmode, self.createuid, self.creategid,
self.createsize, self.createsetid)
pool2.create(self.createmode, self.createuid, self.creategid,
self.createsize, self.createsetid)
# want an open connection during rebuild
pool1.connect(1 << 1)
pool2.connect(1 << 1)
# create containers
container1 = DaosContainer(self.context)
container1.create(pool1.handle)
container2 = DaosContainer(self.context)
container2.create(pool2.handle)
# now open them
container1.open()
container2.open()
# Putting the same data in both pools, at least for now to simplify
# checking its correct
saved_data = []
for _objc in range(self.objcount):
obj = None
for _recc in range(self.reccount):
# make some stuff up and write
dkey = (
''.join(random.choice(string.ascii_uppercase +
string.digits) for _ in range(5)))
akey = (
''.join(random.choice(string.ascii_uppercase +
string.digits) for _ in range(5)))
data = (
''.join(random.choice(string.ascii_uppercase +
string.digits) for _ in
range(self.size)))
# Used DAOS_OC_R1S_SPEC_RANK
# 1 replica with specified rank
obj, txn = container1.write_an_obj(data, len(data), dkey,
akey, obj, self.rank,
obj_cls=15)
obj, txn = container2.write_an_obj(data, len(data), dkey,
akey, obj, self.rank,
obj_cls=15)
saved_data.append((obj, dkey, akey, data, txn))
# read the data back and make sure its correct containers
data2 = container1.read_an_obj(self.size, dkey, akey, obj,
txn)
if data != data2.value:
self.fail("Wrote data P1, read it back, didn't match\n")
data2 = container2.read_an_obj(self.size, dkey, akey, obj,
txn)
if data != data2.value:
self.fail("Wrote data P2, read it back, didn't match\n")
# kill a server
server = DaosServer(self.context, self.server_group, self.rank)
server.kill(1)
# temporarily, the exclude of a failed target must be done
# manually
pool1.exclude([self.rank])
pool2.exclude([self.rank])
# check that rebuild finishes, no errors, progress data as
# know it to be. Check pool 1 first then we'll check 2 below.
while True:
pool1.pool_query()
if pool1.pool_info.pi_rebuild_st.rs_done == 1:
break
else:
time.sleep(2)
# check there are no errors and other data matches what we
# apriori know to be true,
if pool1.pool_info.pi_ndisabled != 1:
self.fail("P1 number disabled targets reporting incorrectly: {}"
.format(pool1.pool_info.pi_ndisabled))
if pool1.pool_info.pi_rebuild_st.rs_errno != 0:
self.fail("P1 rebuild error reported: {}"
#.........这里部分代码省略.........
示例7: NvmeIo
# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import pool_query [as 别名]
class NvmeIo(avocado.Test):
"""
Test Class Description:
Test the general Metadata operations and boundary conditions.
"""
def setUp(self):
self.pool = None
self.hostlist = None
self.hostfile_clients = None
self.hostfile = None
self.out_queue = None
self.pool_connect = False
with open('../../../.build_vars.json') as json_f:
build_paths = json.load(json_f)
self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")
self.server_group = self.params.get("name", '/server_config/',
'daos_server')
self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
self.d_log = DaosLog(self.context)
self.hostlist = self.params.get("servers", '/run/hosts/*')
self.hostfile = write_host_file.write_host_file(self.hostlist,
self.workdir)
#Start Server
server_utils.run_server(self.hostfile, self.server_group, self.basepath)
def tearDown(self):
try:
if self.pool_connect:
self.pool.disconnect()
self.pool.destroy(1)
finally:
server_utils.stop_server(hosts=self.hostlist)
def verify_pool_size(self, original_pool_info, ior_args):
"""
Function is to validate the pool size
original_pool_info: Pool info prior to IOR
ior_args: IOR args to calculate the file size
"""
#Get the current pool size for comparison
current_pool_info = self.pool.pool_query()
#if Transfer size is < 4K, Pool size will verified against NVMe, else
#it will be checked against SCM
if ior_args['stripe_size'] >= 4096:
print("Size is > 4K,Size verification will be done with NVMe size")
storage_index = 1
else:
print("Size is < 4K,Size verification will be done with SCM size")
storage_index = 0
free_pool_size = (
original_pool_info.pi_space.ps_space.s_free[storage_index]
- current_pool_info.pi_space.ps_space.s_free[storage_index])
obj_multiplier = 1
replica_number = re.findall(r'\d+', "ior_args['object_class']")
if replica_number:
obj_multiplier = int(replica_number[0])
expected_pool_size = (ior_args['slots'] * ior_args['block_size'] *
obj_multiplier)
if free_pool_size < expected_pool_size:
raise DaosTestError(
'Pool Free Size did not match Actual = {} Expected = {}'
.format(free_pool_size, expected_pool_size))
@avocado.fail_on(DaosApiError)
def test_nvme_io(self):
"""
Test ID: DAOS-2082
Test Description: Test will run IOR with standard and non standard
sizes.IOR will be run for all Object type supported. Purpose is to
verify pool size (SCM and NVMe) for IOR file.
This test is running multiple IOR on same server start instance.
:avocado: tags=nvme,nvme_io,large
"""
ior_args = {}
hostlist_clients = self.params.get("clients", '/run/hosts/*')
tests = self.params.get("ior_sequence", '/run/ior/*')
object_type = self.params.get("object_type", '/run/ior/*')
#Loop for every IOR object type
for obj_type in object_type:
for ior_param in tests:
self.hostfile_clients = write_host_file.write_host_file(
hostlist_clients,
self.workdir,
ior_param[4])
#There is an issue with NVMe if Transfer size>64M, Skipped this
#sizes for now
if ior_param[2] > 67108864:
print ("Xfersize > 64M getting failed, DAOS-1264")
continue
self.pool = DaosPool(self.context)
self.pool.create(self.params.get("mode",
'/run/pool/createmode/*'),
#.........这里部分代码省略.........
示例8: DestroyRebuild
# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import pool_query [as 别名]
class DestroyRebuild(Test):
"""
Test Class Description:
This test verifies destruction of a pool that is rebuilding.
:avocado: tags=pool,pooldestroy,rebuild,desreb
"""
build_paths = []
server_group = ""
context = None
pool = None
hostfile = ""
def setUp(self):
""" setup for the test """
self.agent_sessions = None
# get paths from the build_vars generated by build
with open('../../../.build_vars.json') as build_file:
build_paths = json.load(build_file)
self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")
# generate a hostfile
self.hostlist = self.params.get("test_machines", '/run/hosts/')
self.hostfile = write_host_file.write_host_file(self.hostlist,
self.workdir)
# fire up the DAOS servers
self.server_group = self.params.get("name", '/run/server_config/',
'daos_server')
self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist)
server_utils.run_server(self.hostfile, self.server_group,
build_paths['PREFIX'] + '/../')
# create a pool to test with
createmode = self.params.get("mode", '/run/pool/createmode/')
createuid = self.params.get("uid", '/run/pool/createuid/')
creategid = self.params.get("gid", '/run/pool/creategid/')
createsetid = self.params.get("setname", '/run/pool/createset/')
createsize = self.params.get("size", '/run/pool/createsize/')
self.pool = DaosPool(self.context)
self.pool.create(createmode, createuid, creategid, createsize,
createsetid)
self.pool.get_uuid_str()
time.sleep(2)
def tearDown(self):
""" cleanup after the test """
try:
os.remove(self.hostfile)
if self.pool:
self.pool.destroy(1)
finally:
if self.agent_sessions:
AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
server_utils.stop_server(hosts=self.hostlist)
def test_destroy_while_rebuilding(self):
"""
:avocado: tags=pool,pooldestroy,rebuild,desreb
"""
try:
print("\nsetup complete, starting test\n")
# create a server object that references on of our pool target hosts
# and then kill it
svr_to_kill = int(self.params.get("rank_to_kill",
'/run/testparams/ranks/'))
server = DaosServer(self.context, bytes(self.server_group),
svr_to_kill)
print("created server ")
# BUG if you don't connect the rebuild doesn't start correctly
self.pool.connect(1 << 1)
status = self.pool.pool_query()
if not status.pi_ntargets == len(self.hostlist):
self.fail("target count wrong.\n")
if not status.pi_ndisabled == 0:
self.fail("disabled target count wrong.\n")
print("connect ")
time.sleep(1)
server.kill(1)
print("killed server ")
# exclude the target from the dead server
self.pool.exclude([svr_to_kill])
print("exclude target ")
#self.pool.disconnect()
#print "disconnect "
#.........这里部分代码省略.........