本文整理汇总了Python中daos_api.DaosPool.exclude方法的典型用法代码示例。如果您正苦于以下问题:Python DaosPool.exclude方法的具体用法?Python DaosPool.exclude怎么用?Python DaosPool.exclude使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类daos_api.DaosPool
的用法示例。
在下文中一共展示了DaosPool.exclude方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_rebuild_with_io
# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import exclude [as 别名]
def test_rebuild_with_io(self):
"""
Test ID: Rebuild-003
Test Description: Trigger a rebuild while I/O is ongoing.
Use Cases:
-- single pool, single client performing continous read/write/verify
sequence while failure/rebuild is triggered in another process
:avocado: tags=pool,rebuild,rebuildwithio
"""
# the rebuild tests need to redo this stuff each time so not in setup
# as it usually would be
server_group = self.params.get("name", '/server_config/',
'daos_server')
basepath = os.path.normpath(self.build_paths['PREFIX'] + "/../")
self.hostlist = self.params.get("test_machines", '/run/hosts/')
hostfile = write_host_file.write_host_file(self.hostlist, self.workdir)
try:
self.agent_sessions = AgentUtils.run_agent(basepath, self.hostlist)
server_utils.run_server(hostfile, server_group, basepath)
# use the uid/gid of the user running the test, these should
# be perfectly valid
createuid = os.geteuid()
creategid = os.getegid()
# parameters used in pool create that are in yaml
createmode = self.params.get("mode", '/run/testparams/createmode/')
createsetid = self.params.get("setname",
'/run/testparams/createset/')
createsize = self.params.get("size", '/run/testparams/createsize/')
# initialize a python pool object then create the underlying
# daos storage
pool = DaosPool(self.context)
pool.create(createmode, createuid, creategid,
createsize, createsetid, None)
pool.connect(1 << 1)
container = DaosContainer(self.context)
container.create(pool.handle)
container.open()
# get pool status and make sure it all looks good before we start
pool.pool_query()
if pool.pool_info.pi_ndisabled != 0:
self.fail("Number of disabled targets reporting incorrectly.\n")
if pool.pool_info.pi_rebuild_st.rs_errno != 0:
self.fail("Rebuild error but rebuild hasn't run.\n")
if pool.pool_info.pi_rebuild_st.rs_done != 1:
self.fail("Rebuild is running but device hasn't failed yet.\n")
if pool.pool_info.pi_rebuild_st.rs_obj_nr != 0:
self.fail("Rebuilt objs not zero.\n")
if pool.pool_info.pi_rebuild_st.rs_rec_nr != 0:
self.fail("Rebuilt recs not zero.\n")
dummy_pool_version = pool.pool_info.pi_rebuild_st.rs_version
# do I/O for 30 seconds
dummy_bw = io_utilities.continuous_io(container, 30)
# trigger the rebuild
rank = self.params.get("rank", '/run/testparams/ranks/*')
server = DaosServer(self.context, server_group, rank)
server.kill(1)
pool.exclude([rank])
# do another 30 seconds of I/O,
# waiting for some improvements in server bootstrap
# at which point we can move the I/O to a separate client and
# really pound it with I/O
dummy_bw = io_utilities.continuous_io(container, 30)
# wait for the rebuild to finish
while True:
pool.pool_query()
if pool.pool_info.pi_rebuild_st.rs_done == 1:
break
else:
time.sleep(2)
# check rebuild statistics
if pool.pool_info.pi_ndisabled != 1:
self.fail("Number of disabled targets reporting incorrectly: {}"
.format(pool.pool_info.pi_ndisabled))
if pool.pool_info.pi_rebuild_st.rs_errno != 0:
self.fail("Rebuild error reported: {}".format(
pool.pool_info.pi_rebuild_st.rs_errno))
if pool.pool_info.pi_rebuild_st.rs_obj_nr <= 0:
self.fail("No objects have been rebuilt.")
if pool.pool_info.pi_rebuild_st.rs_rec_nr <= 0:
self.fail("No records have been rebuilt.")
except (ValueError, DaosApiError) as excep:
print(excep)
print(traceback.format_exc())
#.........这里部分代码省略.........
示例2: test_exclude
# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import exclude [as 别名]
def test_exclude(self):
"""
Pass bad parameters to pool connect
:avocado: tags=pool,poolexclude,badparam,badexclude
"""
# parameters used in pool create
createmode = self.params.get("mode", '/run/pool/createmode/')
createsetid = self.params.get("setname", '/run/pool/createset/')
createsize = self.params.get("size", '/run/pool/createsize/')
createuid = os.geteuid()
creategid = os.getegid()
# Accumulate a list of pass/fail indicators representing what is
# expected for each parameter then "and" them to determine the
# expected result of the test
expected_for_param = []
tgtlist = self.params.get("ranklist", '/run/testparams/tgtlist/*/')
targets = []
if tgtlist[0] == "NULLPTR":
targets = None
self.cancel("skipping null pointer test until DAOS-1929 is fixed")
else:
targets.append(tgtlist[0])
expected_for_param.append(tgtlist[1])
svclist = self.params.get("ranklist", '/run/testparams/svrlist/*/')
svc = svclist[0]
expected_for_param.append(svclist[1])
setlist = self.params.get("setname",
'/run/testparams/connectsetnames/*/')
connectset = setlist[0]
expected_for_param.append(setlist[1])
uuidlist = self.params.get("uuid", '/run/testparams/UUID/*/')
excludeuuid = uuidlist[0]
expected_for_param.append(uuidlist[1])
# if any parameter is FAIL then the test should FAIL, in this test
# virtually everyone should FAIL since we are testing bad parameters
expected_result = 'PASS'
for result in expected_for_param:
if result == 'FAIL':
expected_result = 'FAIL'
break
saved_svc = None
saved_grp = None
saved_uuid = None
pool = None
try:
# setup the DAOS python API
with open('../../../.build_vars.json') as build_file:
data = json.load(build_file)
context = DaosContext(data['PREFIX'] + '/lib/')
# initialize a python pool object then create the underlying
# daos storage
pool = DaosPool(context)
pool.create(createmode, createuid, creategid,
createsize, createsetid, None)
# trash the the pool service rank list
if not svc == 'VALID':
self.cancel("skipping this test until DAOS-1931 is fixed")
saved_svc = RankList(pool.svc.rl_ranks, pool.svc.rl_nr)
pool.svc = None
# trash the pool group value
if connectset == 'NULLPTR':
saved_grp = pool.group
pool.group = None
# trash the UUID value in various ways
if excludeuuid == 'NULLPTR':
self.cancel("skipping this test until DAOS-1932 is fixed")
ctypes.memmove(saved_uuid, pool.uuid, 16)
pool.uuid = 0
if excludeuuid == 'CRAP':
self.cancel("skipping this test until DAOS-1932 is fixed")
ctypes.memmove(saved_uuid, pool.uuid, 16)
pool.uuid[4] = 244
pool.exclude(targets)
if expected_result in ['FAIL']:
self.fail("Test was expected to fail but it passed.\n")
except DaosApiError as excep:
print(excep)
print(traceback.format_exc())
if expected_result in ['PASS']:
self.fail("Test was expected to pass but it failed.\n")
finally:
if pool is not None:
if saved_svc is not None:
#.........这里部分代码省略.........
示例3: RebuildNoCap
# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import exclude [as 别名]
class RebuildNoCap(Test):
"""
Test Class Description:
This class contains tests for pool rebuild.
:avocado: tags=pool,rebuild,nocap
"""
def setUp(self):
""" setup for the test """
self.agent_sessions = None
# get paths from the build_vars generated by build
with open('../../../.build_vars.json') as build_file:
build_paths = json.load(build_file)
self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")
# generate a hostfile
self.hostlist = self.params.get("test_machines", '/run/hosts/')
self.hostfile = write_host_file.write_host_file(self.hostlist,
self.workdir)
# fire up the DAOS servers
self.server_group = self.params.get("name", '/run/server_config/',
'daos_server')
self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist)
server_utils.run_server(self.hostfile, self.server_group,
build_paths['PREFIX'] + '/../')
# create a pool to test with
createmode = self.params.get("mode", '/run/pool/createmode/')
createuid = self.params.get("uid", '/run/pool/createuid/')
creategid = self.params.get("gid", '/run/pool/creategid/')
createsetid = self.params.get("setname", '/run/pool/createset/')
createsize = self.params.get("size", '/run/pool/createsize/')
self.pool = DaosPool(self.context)
self.pool.create(createmode, createuid, creategid, createsize,
createsetid)
uuid = self.pool.get_uuid_str()
time.sleep(2)
# stuff some bogus data into the pool
how_many_bytes = long(self.params.get("datasize",
'/run/testparams/datatowrite/'))
exepath = os.path.join(build_paths['PREFIX'],
"/../src/tests/ftest/util/write_some_data.py")
cmd = "export DAOS_POOL={0}; export DAOS_SVCL=1; mpirun"\
" --np 1 --host {1} {2} {3} testfile".format(
uuid, self.hostlist[0], exepath, how_many_bytes)
subprocess.call(cmd, shell=True)
def tearDown(self):
""" cleanup after the test """
try:
os.remove(self.hostfile)
if self.pool:
self.pool.destroy(1)
finally:
if self.agent_sessions:
AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
server_utils.stop_server(hosts=self.hostlist)
def test_rebuild_no_capacity(self):
"""
:avocado: tags=pool,rebuild,nocap
"""
try:
print("\nsetup complete, starting test\n")
# create a server object that references on of our pool target hosts
# and then kill it
svr_to_kill = int(self.params.get("rank_to_kill",
'/run/testparams/ranks/'))
d_server = DaosServer(self.context, bytes(self.server_group),
svr_to_kill)
time.sleep(1)
d_server.kill(1)
# exclude the target from the dead server
self.pool.exclude([svr_to_kill])
# exclude should trigger rebuild, check
self.pool.connect(1 << 1)
status = self.pool.pool_query()
if not status.pi_ntargets == len(self.hostlist):
self.fail("target count wrong.\n")
if not status.pi_ndisabled == 1:
self.fail("disabled target count wrong.\n")
# the pool should be too full to start a rebuild so
# expecting an error
# not sure yet specifically what error
if status.pi_rebuild_st.rs_errno == 0:
self.fail("expecting rebuild to fail but it didn't.\n")
except DaosApiError as excep:
#.........这里部分代码省略.........
示例4: test_simple_rebuild
# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import exclude [as 别名]
def test_simple_rebuild(self):
"""
Test ID: Rebuild-001
Test Description: The most basic rebuild test.
Use Cases:
-- single pool rebuild, single client, various reord/object
counts
:avocado: tags=pool,rebuild,rebuildsimple
"""
try:
# initialize a python pool object then create the underlying
# daos storage
pool = DaosPool(self.context)
pool.create(self.createmode, self.createuid, self.creategid,
self.createsize, self.createsetid)
# want an open connection during rebuild
pool.connect(1 << 1)
# get pool status we want to test later
pool.pool_query()
if pool.pool_info.pi_ndisabled != 0:
self.fail("Number of disabled targets reporting incorrectly.\n")
if pool.pool_info.pi_rebuild_st.rs_errno != 0:
self.fail("Rebuild error but rebuild hasn't run.\n")
if pool.pool_info.pi_rebuild_st.rs_done != 1:
self.fail("Rebuild is running but device hasn't failed yet.\n")
if pool.pool_info.pi_rebuild_st.rs_obj_nr != 0:
self.fail("Rebuilt objs not zero.\n")
if pool.pool_info.pi_rebuild_st.rs_rec_nr != 0:
self.fail("Rebuilt recs not zero.\n")
# create a container
container = DaosContainer(self.context)
container.create(pool.handle)
# now open it
container.open()
saved_data = []
for _objc in range(self.objcount):
obj = None
for _recc in range(self.reccount):
# make some stuff up and write
dkey = (
''.join(random.choice(string.ascii_uppercase +
string.digits) for _ in range(5)))
akey = (
''.join(random.choice(string.ascii_uppercase +
string.digits) for _ in range(5)))
data = (''.join(random.choice(string.ascii_uppercase +
string.digits)
for _ in range(self.size)))
obj, txn = container.write_an_obj(data, len(data), dkey,
akey, obj, self.rank,
obj_cls=16)
saved_data.append((obj, dkey, akey, data, txn))
# read the data back and make sure its correct
data2 = container.read_an_obj(self.size, dkey, akey, obj,
txn)
if data != data2.value:
self.fail("Write data 1, read it back, didn't match\n")
# kill a server that has
server = DaosServer(self.context, self.server_group, self.rank)
server.kill(1)
# temporarily, the exclude of a failed target must be done manually
pool.exclude([self.rank])
while True:
# get the pool/rebuild status again
pool.pool_query()
if pool.pool_info.pi_rebuild_st.rs_done == 1:
break
else:
time.sleep(2)
if pool.pool_info.pi_ndisabled != 1:
self.fail("Number of disabled targets reporting incorrectly: {}"
.format(pool.pool_info.pi_ndisabled))
if pool.pool_info.pi_rebuild_st.rs_errno != 0:
self.fail("Rebuild error reported: {}"
.format(pool.pool_info.pi_rebuild_st.rs_errno))
if pool.pool_info.pi_rebuild_st.rs_obj_nr != self.objcount:
self.fail("Rebuilt objs not as expected: {0} {1}"
.format(pool.pool_info.pi_rebuild_st.rs_obj_nr,
self.objcount))
if (pool.pool_info.pi_rebuild_st.rs_rec_nr !=
(self.reccount*self.objcount)):
self.fail("Rebuilt recs not as expected: {0} {1}"
.format(pool.pool_info.pi_rebuild_st.rs_rec_nr,
self.reccount*self.objcount))
#.........这里部分代码省略.........
示例5: test_multipool_rebuild
# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import exclude [as 别名]
def test_multipool_rebuild(self):
"""
Test ID: Rebuild-002
Test Description: Expand on the basic test by rebuilding 2
pools at once.
Use Cases:
-- multipool rebuild, single client, various object and record counds
:avocado: tags=pool,rebuild,rebuildmulti
"""
try:
# initialize python pool object then create the underlying
# daos storage, the way the code is now the pools should be
# on the same storage and have the same service leader
pool1 = DaosPool(self.context)
pool2 = DaosPool(self.context)
pool1.create(self.createmode, self.createuid, self.creategid,
self.createsize, self.createsetid)
pool2.create(self.createmode, self.createuid, self.creategid,
self.createsize, self.createsetid)
# want an open connection during rebuild
pool1.connect(1 << 1)
pool2.connect(1 << 1)
# create containers
container1 = DaosContainer(self.context)
container1.create(pool1.handle)
container2 = DaosContainer(self.context)
container2.create(pool2.handle)
# now open them
container1.open()
container2.open()
# Putting the same data in both pools, at least for now to simplify
# checking its correct
saved_data = []
for _objc in range(self.objcount):
obj = None
for _recc in range(self.reccount):
# make some stuff up and write
dkey = (
''.join(random.choice(string.ascii_uppercase +
string.digits) for _ in range(5)))
akey = (
''.join(random.choice(string.ascii_uppercase +
string.digits) for _ in range(5)))
data = (
''.join(random.choice(string.ascii_uppercase +
string.digits) for _ in
range(self.size)))
# Used DAOS_OC_R1S_SPEC_RANK
# 1 replica with specified rank
obj, txn = container1.write_an_obj(data, len(data), dkey,
akey, obj, self.rank,
obj_cls=15)
obj, txn = container2.write_an_obj(data, len(data), dkey,
akey, obj, self.rank,
obj_cls=15)
saved_data.append((obj, dkey, akey, data, txn))
# read the data back and make sure its correct containers
data2 = container1.read_an_obj(self.size, dkey, akey, obj,
txn)
if data != data2.value:
self.fail("Wrote data P1, read it back, didn't match\n")
data2 = container2.read_an_obj(self.size, dkey, akey, obj,
txn)
if data != data2.value:
self.fail("Wrote data P2, read it back, didn't match\n")
# kill a server
server = DaosServer(self.context, self.server_group, self.rank)
server.kill(1)
# temporarily, the exclude of a failed target must be done
# manually
pool1.exclude([self.rank])
pool2.exclude([self.rank])
# check that rebuild finishes, no errors, progress data as
# know it to be. Check pool 1 first then we'll check 2 below.
while True:
pool1.pool_query()
if pool1.pool_info.pi_rebuild_st.rs_done == 1:
break
else:
time.sleep(2)
# check there are no errors and other data matches what we
# apriori know to be true,
if pool1.pool_info.pi_ndisabled != 1:
self.fail("P1 number disabled targets reporting incorrectly: {}"
.format(pool1.pool_info.pi_ndisabled))
if pool1.pool_info.pi_rebuild_st.rs_errno != 0:
self.fail("P1 rebuild error reported: {}"
#.........这里部分代码省略.........
示例6: DestroyRebuild
# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import exclude [as 别名]
class DestroyRebuild(Test):
"""
Test Class Description:
This test verifies destruction of a pool that is rebuilding.
:avocado: tags=pool,pooldestroy,rebuild,desreb
"""
build_paths = []
server_group = ""
context = None
pool = None
hostfile = ""
def setUp(self):
""" setup for the test """
self.agent_sessions = None
# get paths from the build_vars generated by build
with open('../../../.build_vars.json') as build_file:
build_paths = json.load(build_file)
self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")
# generate a hostfile
self.hostlist = self.params.get("test_machines", '/run/hosts/')
self.hostfile = write_host_file.write_host_file(self.hostlist,
self.workdir)
# fire up the DAOS servers
self.server_group = self.params.get("name", '/run/server_config/',
'daos_server')
self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist)
server_utils.run_server(self.hostfile, self.server_group,
build_paths['PREFIX'] + '/../')
# create a pool to test with
createmode = self.params.get("mode", '/run/pool/createmode/')
createuid = self.params.get("uid", '/run/pool/createuid/')
creategid = self.params.get("gid", '/run/pool/creategid/')
createsetid = self.params.get("setname", '/run/pool/createset/')
createsize = self.params.get("size", '/run/pool/createsize/')
self.pool = DaosPool(self.context)
self.pool.create(createmode, createuid, creategid, createsize,
createsetid)
self.pool.get_uuid_str()
time.sleep(2)
def tearDown(self):
""" cleanup after the test """
try:
os.remove(self.hostfile)
if self.pool:
self.pool.destroy(1)
finally:
if self.agent_sessions:
AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
server_utils.stop_server(hosts=self.hostlist)
def test_destroy_while_rebuilding(self):
"""
:avocado: tags=pool,pooldestroy,rebuild,desreb
"""
try:
print("\nsetup complete, starting test\n")
# create a server object that references on of our pool target hosts
# and then kill it
svr_to_kill = int(self.params.get("rank_to_kill",
'/run/testparams/ranks/'))
server = DaosServer(self.context, bytes(self.server_group),
svr_to_kill)
print("created server ")
# BUG if you don't connect the rebuild doesn't start correctly
self.pool.connect(1 << 1)
status = self.pool.pool_query()
if not status.pi_ntargets == len(self.hostlist):
self.fail("target count wrong.\n")
if not status.pi_ndisabled == 0:
self.fail("disabled target count wrong.\n")
print("connect ")
time.sleep(1)
server.kill(1)
print("killed server ")
# exclude the target from the dead server
self.pool.exclude([svr_to_kill])
print("exclude target ")
#self.pool.disconnect()
#print "disconnect "
#.........这里部分代码省略.........
示例7: PoolSvc
# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import exclude [as 别名]
class PoolSvc(Test):
"""
Tests svc argument while pool create.
"""
def setUp(self):
# get paths from the build_vars generated by build
with open('../../../.build_vars.json') as build_file:
build_paths = json.load(build_file)
self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")
self.server_group = self.params.get("name", '/server_config/',
'daos_server')
self.daosctl = self.basepath + '/install/bin/daosctl'
# setup the DAOS python API
self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
self.pool = None
self.hostfile = None
self.hostlist = self.params.get("test_machines", '/run/hosts/*')
self.hostfile = write_host_file.write_host_file(self.hostlist,
self.workdir)
print("Host file is: {}".format(self.hostfile))
self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist)
server_utils.run_server(self.hostfile, self.server_group, self.basepath)
def tearDown(self):
try:
if self.pool is not None and self.pool.attached:
self.pool.destroy(1)
finally:
if self.agent_sessions:
AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
server_utils.stop_server(hosts=self.hostlist)
def test_poolsvc(self):
"""
Test svc arg during pool create.
:avocado: tags=pool,svc
"""
# parameters used in pool create
createmode = self.params.get("mode", '/run/createtests/createmode/*/')
createuid = os.geteuid()
creategid = os.getegid()
createsetid = self.params.get("setname", '/run/createtests/createset/')
createsize = self.params.get("size", '/run/createtests/createsize/')
createsvc = self.params.get("svc", '/run/createtests/createsvc/*/')
expected_result = createsvc[1]
try:
# initialize a python pool object then create the underlying
# daos storage
self.pool = DaosPool(self.context)
self.pool.create(createmode, createuid, creategid,
createsize, createsetid, None, None, createsvc[0])
self.pool.connect(1 << 1)
# checking returned rank list for server more than 1
i = 0
while (
int(self.pool.svc.rl_ranks[i]) > 0 and
int(self.pool.svc.rl_ranks[i]) <= createsvc[0] and
int(self.pool.svc.rl_ranks[i]) != 999999
):
i += 1
if i != createsvc[0]:
self.fail("Length of Returned Rank list is not equal to "
"the number of Pool Service members.\n")
rank_list = []
for j in range(createsvc[0]):
rank_list.append(int(self.pool.svc.rl_ranks[j]))
if len(rank_list) != len(set(rank_list)):
self.fail("Duplicate values in returned rank list")
if createsvc[0] == 3:
self.pool.disconnect()
cmd = ('{0} kill-leader --uuid={1}'
.format(self.daosctl, self.pool.get_uuid_str()))
process.system(cmd)
self.pool.connect(1 << 1)
self.pool.disconnect()
server = DaosServer(self.context, self.server_group, 2)
server.kill(1)
self.pool.exclude([2])
self.pool.connect(1 << 1)
if expected_result in ['FAIL']:
self.fail("Test was expected to fail but it passed.\n")
except DaosApiError as excep:
print(excep)
print(traceback.format_exc())
if expected_result == 'PASS':
self.fail("Test was expected to pass but it failed.\n")