当前位置: 首页>>代码示例>>Python>>正文


Python DaosPool.destroy方法代码示例

本文整理汇总了Python中daos_api.DaosPool.destroy方法的典型用法代码示例。如果您正苦于以下问题:Python DaosPool.destroy方法的具体用法?Python DaosPool.destroy怎么用?Python DaosPool.destroy使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在daos_api.DaosPool的用法示例。


在下文中一共展示了DaosPool.destroy方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_destroy_connect

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import destroy [as 别名]
    def test_destroy_connect(self):
        """
        Test destroying a pool that has a connected client with force == false.
        Should fail.

        :avocado: tags=pool,pooldestroy,x
        """
        host = self.hostlist[0]
        try:

            # write out a hostfile and start the servers with it
            self.hostlist = self.params.get("test_machines1", '/run/hosts/')
            hostfile = write_host_file.write_host_file(self.hostlist, self.tmp)

            self.agent_sessions = AgentUtils.run_agent(self.basepath,
                                                       self.hostlist)
            server_utils.run_server(hostfile, self.server_group, self.basepath)

            # parameters used in pool create
            createmode = self.params.get("mode", '/run/poolparams/createmode/')
            createuid = self.params.get("uid", '/run/poolparams/createuid/')
            creategid = self.params.get("gid", '/run/poolparams/creategid/')
            createsetid = self.params.get("setname",
                                          '/run/poolparams/createset/')
            createsize = self.params.get("size", '/run/poolparams/createsize/')

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid, None)

            # need a connection to create container
            pool.connect(1 << 1)

            # destroy pool with connection open
            pool.destroy(0)

            # should throw an exception and not hit this
            self.fail("Shouldn't hit this line.\n")

        except DaosApiError as excep:
            print("got exception which is expected so long as it is BUSY")
            print(excep)
            print(traceback.format_exc())
            # pool should still be there
            exists = check_for_pool.check_for_pool(host, pool.get_uuid_str)
            if exists != 0:
                self.fail("Pool gone, but destroy should have failed.\n")

        # no matter what happens cleanup
        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
            server_utils.stop_server(hosts=self.hostlist)
            os.remove(hostfile)
开发者ID:daos-stack,项目名称:daos,代码行数:58,代码来源:destroy_tests.py

示例2: test_many_servers

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import destroy [as 别名]
    def test_many_servers(self):
        """
        Test destroy on a large (relative) number of servers.

        :avocado: tags=pool,pooldestroy,destroybig
        """
        try:
            # write out a hostfile and start the servers with it
            self.hostlist = self.params.get("test_machines6", '/run/hosts/')
            hostfile = write_host_file.write_host_file(self.hostlist, self.tmp)

            self.agent_sessions = AgentUtils.run_agent(self.basepath,
                                                       self.hostlist)
            server_utils.run_server(hostfile, self.server_group, self.basepath)

            # parameters used in pool create
            createmode = self.params.get("mode", '/run/poolparams/createmode/')
            createuid = self.params.get("uid", '/run/poolparams/createuid/')
            creategid = self.params.get("gid", '/run/poolparams/creategid/')
            createsetid = self.params.get("setname",
                                          '/run/poolparams/createset/')
            createsize = self.params.get("size", '/run/poolparams/createsize/')

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid, None)

            time.sleep(1)

            # okay, get rid of it
            pool.destroy(1)

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            self.fail("6 server test failed.\n")

        except Exception as excep:
            self.fail("Daos code segfaulted most likely.  Error: %s" % excep)

        # no matter what happens cleanup
        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
            server_utils.stop_server(hosts=self.hostlist)
            os.remove(hostfile)
开发者ID:daos-stack,项目名称:daos,代码行数:50,代码来源:destroy_tests.py

示例3: test_destroy_recreate

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import destroy [as 别名]
    def test_destroy_recreate(self):
        """
        Test destroy and recreate one right after the other multiple times
        Should fail.

        :avocado: tags=pool,pooldestroy,destroyredo
        """

        try:
            # write out a hostfile and start the servers with it
            self.hostlist = self.params.get("test_machines1", '/run/hosts/')
            hostfile = write_host_file.write_host_file(self.hostlist, self.tmp)

            self.agent_sessions = AgentUtils.run_agent(self.basepath,
                                                       self.hostlist)
            server_utils.run_server(hostfile, self.server_group, self.basepath)

            # parameters used in pool create
            createmode = self.params.get("mode", '/run/poolparams/createmode/')
            createuid = self.params.get("uid", '/run/poolparams/createuid/')
            creategid = self.params.get("gid", '/run/poolparams/creategid/')
            createsetid = self.params.get("setname",
                                          '/run/poolparams/createset/')
            createsize = self.params.get("size", '/run/poolparams/createsize/')

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid, None)

            # blow it away immediately
            pool.destroy(1)

            # now recreate
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid, None)

            # blow it away immediately
            pool.destroy(1)

            # now recreate
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid, None)

            # blow it away immediately
            pool.destroy(1)

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            self.fail("create/destroy/create/destroy test failed.\n")

        except Exception as excep:
            self.fail("Daos code segfaulted most likely.  Error: %s" % excep)

        # no matter what happens cleanup
        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
            server_utils.stop_server(hosts=self.hostlist)
            os.remove(hostfile)
开发者ID:daos-stack,项目名称:daos,代码行数:64,代码来源:destroy_tests.py

示例4: test_connect

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import destroy [as 别名]

#.........这里部分代码省略.........
        :avocado: tags=pool,poolconnect,badparam,badconnect
        """

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/connecttests/createmode/')
        createuid = self.params.get("uid", '/run/connecttests/uids/createuid/')
        creategid = self.params.get("gid", '/run/connecttests/gids/creategid/')
        createsetid = self.params.get("setname",
                                      '/run/connecttests/setnames/createset/')
        createsize = self.params.get("size",
                                     '/run/connecttests/psize/createsize/')

        # Accumulate a list of pass/fail indicators representing what is
        # expected for each parameter then "and" them to determine the
        # expected result of the test
        expected_for_param = []

        modelist = self.params.get("mode", '/run/connecttests/connectmode/*/')
        connectmode = modelist[0]
        expected_for_param.append(modelist[1])

        svclist = self.params.get("ranklist", '/run/connecttests/svrlist/*/')
        svc = svclist[0]
        expected_for_param.append(svclist[1])

        setlist = self.params.get("setname",
                                  '/run/connecttests/connectsetnames/*/')
        connectset = setlist[0]
        expected_for_param.append(setlist[1])

        uuidlist = self.params.get("uuid", '/run/connecttests/UUID/*/')
        connectuuid = uuidlist[0]
        expected_for_param.append(uuidlist[1])

        # if any parameter is FAIL then the test should FAIL, in this test
        # virtually everyone should FAIL since we are testing bad parameters
        expected_result = 'PASS'
        for result in expected_for_param:
            if result == 'FAIL':
                expected_result = 'FAIL'
                break

        puuid = (ctypes.c_ubyte * 16)()
        psvc = RankList()
        pgroup = ctypes.create_string_buffer(0)
        pool = None
        try:
            # setup the DAOS python API
            with open('../../../.build_vars.json') as build_file:
                data = json.load(build_file)
            context = DaosContext(data['PREFIX'] + '/lib/')

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid, None)
            # save this uuid since we might trash it as part of the test
            ctypes.memmove(puuid, pool.uuid, 16)

            # trash the the pool service rank list
            psvc.rl_ranks = pool.svc.rl_ranks
            psvc.rl_nr = pool.svc.rl_nr
            if not svc == 'VALID':
                rl_ranks = ctypes.POINTER(ctypes.c_uint)()
                pool.svc = RankList(rl_ranks, 1)

            # trash the pool group value
            pgroup = pool.group
            if connectset == 'NULLPTR':
                pool.group = None

            # trash the UUID value in various ways
            if connectuuid == 'NULLPTR':
                pool.uuid = None
            if connectuuid == 'JUNK':
                pool.uuid[4] = 244

            pool.connect(connectmode)

            if expected_result in ['FAIL']:
                self.fail("Test was expected to fail but it passed.\n")

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            if expected_result in ['PASS']:
                self.fail("Test was expected to pass but it failed.\n")

        # cleanup the pool
        finally:
            if pool is not None and pool.attached == 1:
                # restore values in case we trashed them during test
                pool.svc.rl_ranks = psvc.rl_ranks
                pool.svc.rl_nr = psvc.rl_nr
                pool.group = pgroup
                ctypes.memmove(pool.uuid, puuid, 16)
                print("pool uuid after restore {}".format(
                    pool.get_uuid_str()))
                pool.destroy(1)
开发者ID:daos-stack,项目名称:daos,代码行数:104,代码来源:bad_connect.py

示例5: test_create

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import destroy [as 别名]
    def test_create(self):
        """
        Pass bad parameters to pool create.

        :avocado: tags=pool,poolcreate,badparam,badcreate
        """
        # Accumulate a list of pass/fail indicators representing what is
        # expected for each parameter then "and" them to determine the
        # expected result of the test

        pool = None
        expected_for_param = []

        modelist = self.params.get("mode", '/run/createtests/modes/*')
        mode = modelist[0]
        expected_for_param.append(modelist[1])

        uidlist = self.params.get("uid", '/run/createtests/uids/*')
        uid = uidlist[0]
        expected_for_param.append(uidlist[1])

        gidlist = self.params.get("gid", '/run/createtests/gids/*')
        gid = gidlist[0]
        expected_for_param.append(gidlist[1])

        setidlist = self.params.get("setname", '/run/createtests/setnames/*')
        if setidlist[0] == 'NULLPTR':
            group = None
            self.cancel("skipping this test until DAOS-1991 is fixed")
        else:
            group = setidlist[0]
        expected_for_param.append(setidlist[1])

        targetlist = self.params.get("rankptr", '/run/createtests/target/*')
        if targetlist[0] == 'NULL':
            targetptr = None
        else:
            targetptr = [0]
        expected_for_param.append(targetlist[1])

        # not ready for this yet
        #devicelist = self.params.get("devptr", '/run/createtests/device/*')
        #if devicelist[0] == 'NULL':
        #    devptr = None
        #else:
        #    devptr = devicelist[0]
        #expected_for_param.append(devicelist[1])

        sizelist = self.params.get("size", '/run/createtests/psize/*')
        size = sizelist[0]
        expected_for_param.append(sizelist[1])

        # parameter not presently supported
        #svclist = self.params.get("rankptr", '/run/createtests/svc/*')
        #if svclist[0] == 'NULL':
        #    svc = None
        #else:
        #    svc = None
        #expected_for_param.append(devicelist[1])

        # if any parameter is FAIL then the test should FAIL, in this test
        # virtually everyone should FAIL since we are testing bad parameters
        expected_result = 'PASS'
        for result in expected_for_param:
            if result == 'FAIL':
                expected_result = 'FAIL'
                break

        try:
            # setup the DAOS python API
            with open('../../../.build_vars.json') as build_file:
                data = json.load(build_file)
            context = DaosContext(data['PREFIX'] + '/lib/')

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(context)
            pool.create(mode, uid, gid, size, group, targetptr)

            if expected_result in ['FAIL']:
                self.fail("Test was expected to fail but it passed.\n")

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            if expected_result == 'PASS':
                self.fail("Test was expected to pass but it failed.\n")
        finally:
            if pool is not None and pool.attached:
                pool.destroy(1)
开发者ID:daos-stack,项目名称:daos,代码行数:92,代码来源:bad_create.py

示例6: test_bad_handle

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import destroy [as 别名]
    def test_bad_handle(self):
        """
        Test ID: DAOS-1376

        Test Description: Pass a bogus object handle, should return bad handle.

        :avocado: tags=object,objupdate,objbadhand,regression,vm,small
        """
        try:
            # parameters used in pool create
            createmode = self.params.get("mode", '/run/conttests/createmode/')
            createsetid = self.params.get("setname",
                                          '/run/conttests/createset/')
            createsize = self.params.get("size", '/run/conttests/createsize/')
            createuid = os.geteuid()
            creategid = os.getegid()

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid, None)
            self.plog.info("Pool %s created.", pool.get_uuid_str())

            # need a connection to create container
            pool.connect(1 << 1)

            # create a container
            container = DaosContainer(self.context)
            container.create(pool.handle)
            self.plog.info("Container %s created.", container.get_uuid_str())

            # now open it
            container.open()

            # create an object and write some data into it
            thedata = "a string that I want to stuff into an object"
            thedatasize = len(thedata) + 1
            dkey = "this is the dkey"
            akey = "this is the akey"
            obj, dummy_tx = container.write_an_obj(thedata, thedatasize,
                                                   dkey, akey, None, None, 2)

            saved_oh = obj.obj_handle
            obj.obj_handle = 99999

            obj, dummy_tx = container.write_an_obj(thedata, thedatasize,
                                                   dkey, akey, obj, None, 2)

            container.oh = saved_oh
            container.close()
            container.destroy()
            pool.disconnect()
            pool.destroy(1)
            self.fail("Test was expected to return a -1002 but it has not.\n")

        except DaosApiError as excep:
            container.oh = saved_oh
            container.close()
            container.destroy()
            pool.disconnect()
            pool.destroy(1)
            self.plog.info("Test Complete")
            if '-1002' not in str(excep):
                print(excep)
                print(traceback.format_exc())
                self.fail("Test was expected to get -1002 but it has not.\n")
开发者ID:daos-stack,项目名称:daos,代码行数:69,代码来源:obj_update_bad_param.py

示例7: test_exclude

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import destroy [as 别名]

#.........这里部分代码省略.........
        createmode = self.params.get("mode", '/run/pool/createmode/')
        createsetid = self.params.get("setname", '/run/pool/createset/')
        createsize = self.params.get("size", '/run/pool/createsize/')

        createuid = os.geteuid()
        creategid = os.getegid()

        # Accumulate a list of pass/fail indicators representing what is
        # expected for each parameter then "and" them to determine the
        # expected result of the test
        expected_for_param = []

        tgtlist = self.params.get("ranklist", '/run/testparams/tgtlist/*/')
        targets = []

        if tgtlist[0] == "NULLPTR":
            targets = None
            self.cancel("skipping null pointer test until DAOS-1929 is fixed")
        else:
            targets.append(tgtlist[0])
        expected_for_param.append(tgtlist[1])

        svclist = self.params.get("ranklist", '/run/testparams/svrlist/*/')
        svc = svclist[0]
        expected_for_param.append(svclist[1])

        setlist = self.params.get("setname",
                                  '/run/testparams/connectsetnames/*/')
        connectset = setlist[0]
        expected_for_param.append(setlist[1])

        uuidlist = self.params.get("uuid", '/run/testparams/UUID/*/')
        excludeuuid = uuidlist[0]
        expected_for_param.append(uuidlist[1])

        # if any parameter is FAIL then the test should FAIL, in this test
        # virtually everyone should FAIL since we are testing bad parameters
        expected_result = 'PASS'
        for result in expected_for_param:
            if result == 'FAIL':
                expected_result = 'FAIL'
                break

        saved_svc = None
        saved_grp = None
        saved_uuid = None
        pool = None
        try:
            # setup the DAOS python API
            with open('../../../.build_vars.json') as build_file:
                data = json.load(build_file)
            context = DaosContext(data['PREFIX'] + '/lib/')

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid, None)

            # trash the the pool service rank list
            if not svc == 'VALID':
                self.cancel("skipping this test until DAOS-1931 is fixed")
                saved_svc = RankList(pool.svc.rl_ranks, pool.svc.rl_nr)
                pool.svc = None

            # trash the pool group value
            if connectset == 'NULLPTR':
                saved_grp = pool.group
                pool.group = None

            # trash the UUID value in various ways
            if excludeuuid == 'NULLPTR':
                self.cancel("skipping this test until DAOS-1932 is fixed")
                ctypes.memmove(saved_uuid, pool.uuid, 16)
                pool.uuid = 0
            if excludeuuid == 'CRAP':
                self.cancel("skipping this test until DAOS-1932 is fixed")
                ctypes.memmove(saved_uuid, pool.uuid, 16)
                pool.uuid[4] = 244

            pool.exclude(targets)

            if expected_result in ['FAIL']:
                self.fail("Test was expected to fail but it passed.\n")

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            if expected_result in ['PASS']:
                self.fail("Test was expected to pass but it failed.\n")
        finally:
            if pool is not None:
                if saved_svc is not None:
                    pool.svc = saved_svc
                if saved_grp is not None:
                    pool.group = saved_grp
                if saved_uuid is not None:
                    ctypes.memmove(pool.uuid, saved_uuid, 16)

                pool.destroy(1)
开发者ID:daos-stack,项目名称:daos,代码行数:104,代码来源:bad_exclude.py

示例8: test_container_create

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import destroy [as 别名]
    def test_container_create(self):
        """
        Test ID: DAOS-689

        Test Description: valid and invalid container creation and close.

        :avocado: tags=regression,cont,contcreate
        """

        pool = None
        contuuid = None
        expected_results = []

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            createmode = self.params.get("mode", '/run/poolparams/')
            createuid = os.geteuid()
            creategid = os.getegid()
            createsetid = self.params.get("setname", '/run/poolparams/')
            createsize = self.params.get("size", '/run/poolparams/')

            # setup the pool
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid)
            pool.connect(1 << 1)

            # maybe use the good handle, maybe not
            handleparam = self.params.get("handle", '/run/poolhandle/*')
            if handleparam == 'VALID':
                poh = pool.handle
            else:
                poh = handleparam
                expected_results.append('FAIL')

            # maybe use a good UUID, maybe not
            uuidparam = self.params.get("uuid", "/uuids/*")
            expected_results.append(uuidparam[1])
            if uuidparam[0] == 'NULLPTR':
                self.cancel("skipping this test until DAOS-2043 is fixed")
                contuuid = 'NULLPTR'
            else:
                contuuid = uuid.UUID(uuidparam[0])

            should_fail = False
            for result in expected_results:
                if result == 'FAIL':
                    should_fail = True
                    break

            self.container = DaosContainer(self.context)
            self.container.create(poh, contuuid)

            # check UUID is the specified one
            if (uuidparam[0]).upper() != self.container.get_uuid_str().upper():
                print("uuidparam[0] is {}, uuid_str is {}".format(
                    uuidparam[0], self.container.get_uuid_str()))
                self.fail("Container UUID differs from specified at create\n")

            if should_fail:
                self.fail("Test was expected to fail but it passed.\n")

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            if not should_fail:
                self.fail("Test was expected to pass but it failed.\n")
        finally:
            if pool is not None:
                pool.disconnect()
                pool.destroy(1)
开发者ID:daos-stack,项目名称:daos,代码行数:74,代码来源:create.py

示例9: ObjectMetadata

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import destroy [as 别名]
class ObjectMetadata(avocado.Test):
    """
    Test Class Description:
        Test the general Metadata operations and boundary conditions.
    """

    def setUp(self):
        self.agent_sessions = None
        self.pool = None
        self.hostlist = None
        self.hostfile_clients = None
        self.hostfile = None
        self.out_queue = None
        self.pool_connect = True

        with open('../../../.build_vars.json') as json_f:
            build_paths = json.load(json_f)

        self.basepath = os.path.normpath(build_paths['PREFIX']  + "/../")
        self.server_group = self.params.get("name",
                                            '/server_config/',
                                            'daos_server')
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.d_log = DaosLog(self.context)
        self.hostlist = self.params.get("servers", '/run/hosts/*')
        self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                        self.workdir)
        self.hostlist_clients = self.params.get("clients", '/run/hosts/*')
        self.hostfile_clients = (
            write_host_file.write_host_file(hostlist_clients, self.workdir))
        self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist,
                                                   self.hostlist_clients)
        server_utils.run_server(self.hostfile, self.server_group, self.basepath)

        self.pool = DaosPool(self.context)
        self.pool.create(self.params.get("mode", '/run/pool/createmode/*'),
                         os.geteuid(),
                         os.getegid(),
                         self.params.get("size", '/run/pool/createsize/*'),
                         self.params.get("setname", '/run/pool/createset/*'),
                         nvme_size=self.params.get("size",
                                                   '/run/pool/nvmesize/*'))

    def tearDown(self):
        try:
            if self.pool_connect:
                self.pool.disconnect()
            if self.pool:
                self.pool.destroy(1)
        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist_clients,
                                      self.agent_sessions)
            server_utils.stop_server(hosts=self.hostlist)

    @avocado.skip("Skipping until DAOS-1936/DAOS-1946 is fixed.")
    def test_metadata_fillup(self):
        """
        Test ID: DAOS-1512
        Test Description: Test to verify no IO happens after metadata is full.
        :avocado: tags=metadata,metadata_fill,nvme,small
        """
        self.pool.connect(2)
        container = DaosContainer(self.context)
        self.d_log.debug("Fillup Metadata....")
        for _cont in range(NO_OF_MAX_CONTAINER):
            container.create(self.pool.handle)
        self.d_log.debug("Metadata Overload...")
        #This should fail with no Metadata space Error.
        try:
            for _cont in range(250):
                container.create(self.pool.handle)
        except DaosApiError as exe:
            print (exe, traceback.format_exc())
            return

        self.fail("Test was expected to fail but it passed.\n")

    @avocado.skip("Skipping until DAOS-1965 is fixed.")
    @avocado.fail_on(DaosApiError)
    def test_metadata_addremove(self):
        """
        Test ID: DAOS-1512
        Test Description: Verify metadata release the space
                          after container delete.
        :avocado: tags=metadata,metadata_free_space,nvme,small
        """
        self.pool.connect(2)
        for k in range(10):
            container_array = []
            self.d_log.debug("Container Create Iteration {}".format(k))
            for cont in range(NO_OF_MAX_CONTAINER):
                container = DaosContainer(self.context)
                container.create(self.pool.handle)
                container_array.append(container)
            self.d_log.debug("Container Remove Iteration {} ".format(k))
            for cont in container_array:
                cont.destroy()

    def thread_control(self, threads, operation):
#.........这里部分代码省略.........
开发者ID:daos-stack,项目名称:daos,代码行数:103,代码来源:metadata.py

示例10: CreateManyDkeys

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import destroy [as 别名]
class CreateManyDkeys(Test):
    """
    Test Class Description:
        Tests that create large numbers of keys in objects/containers and then
        destroy the containers and verify the space has been reclaimed.

    """
    def setUp(self):
        self.agent_sessions = None
        with open('../../../.build_vars.json') as json_f:
            build_paths = json.load(json_f)
        basepath = os.path.normpath(build_paths['PREFIX']  + "/../")
        server_group = self.params.get("name",
                                       '/server_config/',
                                       'daos_server')
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.container = None
        self.hostlist = self.params.get("test_machines", '/run/hosts/*')
        self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                        self.workdir)

        self.agent_sessions = AgentUtils.run_agent(basepath, self.hostlist)
        server_utils.run_server(self.hostfile, server_group, basepath)

        self.pool = DaosPool(self.context)
        self.pool.create(self.params.get("mode", '/run/pool/createmode/*'),
                         os.geteuid(),
                         os.getegid(),
                         self.params.get("size", '/run/pool/createsize/*'),
                         self.params.get("setname", '/run/pool/createset/*'),
                         None)
        self.pool.connect(1 << 1)

    def tearDown(self):
        try:
            if self.hostfile is not None:
                os.remove(self.hostfile)
            if self.pool:
                self.pool.destroy(1)
        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
            server_utils.stop_server(hosts=self.hostlist)

    def write_a_bunch_of_values(self, how_many):
        """
        Write data to an object, each with a dkey and akey.  The how_many
        parameter determines how many key:value pairs are written.
        """

        self.container = DaosContainer(self.context)
        self.container.create(self.pool.handle)
        self.container.open()

        ioreq = IORequest(self.context, self.container, None)
        epoch = self.container.get_new_epoch()
        c_epoch = ctypes.c_uint64(epoch)

        print("Started Writing the Dataset-----------\n")
        inc = 50000
        last_key = inc
        for key in range(how_many):
            c_dkey = ctypes.create_string_buffer("dkey {0}".format(key))
            c_akey = ctypes.create_string_buffer("akey {0}".format(key))
            c_value = ctypes.create_string_buffer(
                "some data that gets stored with the key {0}".format(key))
            c_size = ctypes.c_size_t(ctypes.sizeof(c_value))
            ioreq.single_insert(c_dkey,
                                c_akey,
                                c_value,
                                c_size,
                                c_epoch)

            if key > last_key:
                print("written: {}".format(key))
                sys.stdout.flush()
                last_key = key + inc

        self.container.commit_epoch(c_epoch)

        print("Started Verification of the Dataset-----------\n")
        last_key = inc
        for key in range(how_many):
            c_dkey = ctypes.create_string_buffer("dkey {0}".format(key))
            c_akey = ctypes.create_string_buffer("akey {0}".format(key))
            the_data = "some data that gets stored with the key {0}".format(key)
            val = ioreq.single_fetch(c_dkey,
                                     c_akey,
                                     len(the_data)+1,
                                     c_epoch)

            if the_data != (repr(val.value)[1:-1]):
                self.fail("ERROR: Data mismatch for dkey = {0}, akey={1}, "
                          "Expected Value={2} and Received Value={3}\n"
                          .format("dkey {0}".format(key),
                                  "akey {0}".format(key),
                                  the_data,
                                  repr(val.value)[1:-1]))

            if key > last_key:
#.........这里部分代码省略.........
开发者ID:daos-stack,项目名称:daos,代码行数:103,代码来源:create_many_dkeys.py

示例11: NvmeIo

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import destroy [as 别名]
class NvmeIo(avocado.Test):
    """
    Test Class Description:
        Test the general Metadata operations and boundary conditions.
    """

    def setUp(self):
        self.pool = None
        self.hostlist = None
        self.hostfile_clients = None
        self.hostfile = None
        self.out_queue = None
        self.pool_connect = False

        with open('../../../.build_vars.json') as json_f:
            build_paths = json.load(json_f)

        self.basepath = os.path.normpath(build_paths['PREFIX']  + "/../")
        self.server_group = self.params.get("name", '/server_config/',
                                            'daos_server')
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.d_log = DaosLog(self.context)
        self.hostlist = self.params.get("servers", '/run/hosts/*')
        self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                        self.workdir)
        #Start Server
        server_utils.run_server(self.hostfile, self.server_group, self.basepath)

    def tearDown(self):
        try:
            if self.pool_connect:
                self.pool.disconnect()
                self.pool.destroy(1)
        finally:
            server_utils.stop_server(hosts=self.hostlist)

    def verify_pool_size(self, original_pool_info, ior_args):
        """
        Function is to validate the pool size
        original_pool_info: Pool info prior to IOR
        ior_args: IOR args to calculate the file size
        """
        #Get the current pool size for comparison
        current_pool_info = self.pool.pool_query()
        #if Transfer size is < 4K, Pool size will verified against NVMe, else
        #it will be checked against SCM
        if ior_args['stripe_size'] >= 4096:
            print("Size is > 4K,Size verification will be done with NVMe size")
            storage_index = 1
        else:
            print("Size is < 4K,Size verification will be done with SCM size")
            storage_index = 0

        free_pool_size = (
            original_pool_info.pi_space.ps_space.s_free[storage_index]
            - current_pool_info.pi_space.ps_space.s_free[storage_index])

        obj_multiplier = 1
        replica_number = re.findall(r'\d+', "ior_args['object_class']")
        if replica_number:
            obj_multiplier = int(replica_number[0])
        expected_pool_size = (ior_args['slots'] * ior_args['block_size'] *
                              obj_multiplier)

        if free_pool_size < expected_pool_size:
            raise DaosTestError(
                'Pool Free Size did not match Actual = {} Expected = {}'
                .format(free_pool_size, expected_pool_size))

    @avocado.fail_on(DaosApiError)
    def test_nvme_io(self):
        """
        Test ID: DAOS-2082
        Test Description: Test will run IOR with standard and non standard
        sizes.IOR will be run for all Object type supported. Purpose is to
        verify pool size (SCM and NVMe) for IOR file.
        This test is running multiple IOR on same server start instance.
        :avocado: tags=nvme,nvme_io,large
        """
        ior_args = {}

        hostlist_clients = self.params.get("clients", '/run/hosts/*')
        tests = self.params.get("ior_sequence", '/run/ior/*')
        object_type = self.params.get("object_type", '/run/ior/*')
        #Loop for every IOR object type
        for obj_type in object_type:
            for ior_param in tests:
                self.hostfile_clients = write_host_file.write_host_file(
                    hostlist_clients,
                    self.workdir,
                    ior_param[4])
                #There is an issue with NVMe if Transfer size>64M, Skipped this
                #sizes for now
                if ior_param[2] > 67108864:
                    print ("Xfersize > 64M getting failed, DAOS-1264")
                    continue

                self.pool = DaosPool(self.context)
                self.pool.create(self.params.get("mode",
                                                 '/run/pool/createmode/*'),
#.........这里部分代码省略.........
开发者ID:daos-stack,项目名称:daos,代码行数:103,代码来源:nvme_io.py

示例12: test_destroy_withdata

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import destroy [as 别名]
    def test_destroy_withdata(self):
        """
        Test destroy and recreate one right after the other multiple times
        Should fail.

        :avocado: tags=pool,pooldestroy,destroydata
        """
        try:
            # write out a hostfile and start the servers with it
            self.hostlist = self.params.get("test_machines1", '/run/hosts/')
            hostfile = write_host_file.write_host_file(self.hostlist, self.tmp)

            self.agent_sessions = AgentUtils.run_agent(self.basepath,
                                                       self.hostlist)
            server_utils.run_server(hostfile, self.server_group, self.basepath)

            # parameters used in pool create
            createmode = self.params.get("mode", '/run/poolparams/createmode/')
            createuid = self.params.get("uid", '/run/poolparams/createuid/')
            creategid = self.params.get("gid", '/run/poolparams/creategid/')
            createsetid = self.params.get("setname",
                                          '/run/poolparams/createset/')
            createsize = self.params.get("size", '/run/poolparams/createsize/')

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid, None)

            # need a connection to create container
            pool.connect(1 << 1)

            # create a container
            container = DaosContainer(self.context)
            container.create(pool.handle)

            pool.disconnect()

            daosctl = self.basepath + '/install/bin/daosctl'

            write_cmd = ('{0} write-pattern -i {1} -l 0 -c {2} -p sequential'.
                         format(daosctl, c_uuid_to_str(pool.uuid),
                                c_uuid_to_str(container.uuid)))

            process.system_output(write_cmd)

            # blow it away
            pool.destroy(1)

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            self.fail("create/destroy/create/destroy test failed.\n")

        except Exception as excep:
            self.fail("Daos code segfaulted most likely.  Error: %s" % excep)

        # no matter what happens cleanup
        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
            server_utils.stop_server(hosts=self.hostlist)
            os.remove(hostfile)
开发者ID:daos-stack,项目名称:daos,代码行数:66,代码来源:destroy_tests.py

示例13: ObjectDataValidation

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import destroy [as 别名]
class ObjectDataValidation(avocado.Test):
    """
    Test Class Description:
        Tests that create Different length records,
        Disconnect the pool/container and reconnect,
        validate the data after reconnect.
    """
    # pylint: disable=too-many-instance-attributes
    def setUp(self):
        self.agent_sessions = None
        self.pool = None
        self.container = None
        self.obj = None
        self.ioreq = None
        self.hostlist = None
        self.hostfile = None
        self.no_of_dkeys = None
        self.no_of_akeys = None
        self.array_size = None
        self.record_length = None

        with open('../../../.build_vars.json') as json_f:
            build_paths = json.load(json_f)
        basepath = os.path.normpath(build_paths['PREFIX']  + "/../")
        server_group = self.params.get("name",
                                       '/server_config/',
                                       'daos_server')
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.d_log = DaosLog(self.context)
        self.hostlist = self.params.get("test_machines", '/run/hosts/*')
        self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                        self.workdir)
        self.no_of_dkeys = self.params.get("no_of_dkeys", '/run/dkeys/*')[0]
        self.no_of_akeys = self.params.get("no_of_akeys", '/run/akeys/*')[0]
        self.array_size = self.params.get("size", '/array_size/')
        self.record_length = self.params.get("length", '/run/record/*')

        self.agent_sessions = AgentUtils.run_agent(basepath, self.hostlist)
        server_utils.run_server(self.hostfile, server_group, basepath)

        self.pool = DaosPool(self.context)
        self.pool.create(self.params.get("mode", '/run/pool/createmode/*'),
                         os.geteuid(),
                         os.getegid(),
                         self.params.get("size", '/run/pool/createsize/*'),
                         self.params.get("setname", '/run/pool/createset/*'),
                         None)
        self.pool.connect(2)

        self.container = DaosContainer(self.context)
        self.container.create(self.pool.handle)
        self.container.open()

        self.obj = DaosObj(self.context, self.container)
        self.obj.create(objcls=1)
        self.obj.open()
        self.ioreq = IORequest(self.context,
                               self.container,
                               self.obj, objtype=4)

    def tearDown(self):
        try:
            if self.container:
                self.container.close()
                self.container.destroy()
            if self.pool:
                self.pool.disconnect()
                self.pool.destroy(1)
        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
            server_utils.stop_server(hosts=self.hostlist)

    def reconnect(self):
        '''
        Function to reconnect the pool/container and reopen the Object
        for read verification.
        '''
        #Close the Obj/Container, Disconnect the Pool.
        self.obj.close()
        self.container.close()
        self.pool.disconnect()
        time.sleep(5)
        #Connect Pool, Open Container and Object
        self.pool.connect(2)
        self.container.open()
        self.obj.open()
        self.ioreq = IORequest(self.context,
                               self.container,
                               self.obj,
                               objtype=4)

    @avocado.fail_on(DaosApiError)
    def test_single_object_validation(self):
        """
        Test ID: DAOS-707
        Test Description: Write Avocado Test to verify single data after
                          pool/container disconnect/reconnect.
        :avocado: tags=single_object,data_verification,medium,vm
        """
#.........这里部分代码省略.........
开发者ID:daos-stack,项目名称:daos,代码行数:103,代码来源:object_integrity.py

示例14: BasicSnapshot

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import destroy [as 别名]
class BasicSnapshot(Test):
    """
    DAOS-1370 Basic snapshot test

    Test Class Description:
    Test that a snapshot taken of a container remains unchaged even after an
    object in the container has been updated 500 times.
    Create the container.
    Write an object to the container.
    Take a snapshot.
    Write 500 changes to the KV pair of the object.
    Check that the snapshot is still there.
    Confirm that the data in the snapshot is unchanged.
    Destroy the snapshot
    """

    def __init__(self, *args, **kwargs):
        super(BasicSnapshot, self).__init__(*args, **kwargs)
        self.snapshot = None

    def setUp(self):

        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as finput:
            build_paths = json.load(finput)
        basepath = os.path.normpath(build_paths['PREFIX'] + "/../")

        server_group = self.params.get("name", '/server_config/',
                                       'daos_server')

        # setup the DAOS python API
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.d_log = DaosLog(self.context)

        self.hostlist = self.params.get("test_machines", '/run/hosts/*')
        hostfile = write_host_file.write_host_file(self.hostlist, self.workdir)

        server_utils.run_server(hostfile, server_group, basepath)

        # Set up the pool and container.
        try:
            # parameters used in pool create
            createmode = self.params.get("mode", '/run/pool/createmode/')
            createsetid = self.params.get("setname", '/run/pool/createset/')
            createsize = self.params.get("size", '/run/pool/createsize/*')
            createuid = os.geteuid()
            creategid = os.getegid()

            # initialize a pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(createmode, createuid, creategid,
                             createsize, createsetid, None)

            # need a connection to create container
            self.pool.connect(1 << 1)

            # create a container
            self.container = DaosContainer(self.context)
            self.container.create(self.pool.handle)

            # now open it
            self.container.open()

        except DaosApiError as error:
            print(error)
            print(traceback.format_exc())
            self.fail("Test failed before snapshot taken")

    def tearDown(self):
        try:
            if self.container:
                self.container.close()
                self.container.destroy()
            if self.pool:
                self.pool.disconnect()
                self.pool.destroy(1)
        finally:
            server_utils.stop_server()

    def test_basic_snapshot(self):
        """
        Test ID: DAOS-1370

        Test Description:
        Create a pool, container in the pool, object in the container, add
        one key:value to the object.
        Commit the transaction. Perform a snapshot create on the container.
        Create 500 additional transactions with a small change to the object
        in each and commit each after the object update is done.
        Verify the snapshot is still available and the contents remain in
        their original state.

        :avocado: tags=snapshot,basicsnap
        """

        try:
            # create an object and write some data into it
            obj_cls = self.params.get("obj_class", '/run/object_class/*')
            thedata = "Now is the winter of our discontent made glorious"
#.........这里部分代码省略.........
开发者ID:daos-stack,项目名称:daos,代码行数:103,代码来源:basic_snapshot.py

示例15: PunchTest

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import destroy [as 别名]
class PunchTest(Test):
    """
    Simple test to verify the 3 different punch calls.
    """
    def setUp(self):
        self.agent_sessions = None
        try:
            # get paths from the build_vars generated by build
            with open('../../../.build_vars.json') as build_file:
                build_paths = json.load(build_file)
                self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")

                self.server_group = self.params.get("name", '/server_config/',
                                                    'daos_server')

                # setup the DAOS python API
                self.context = DaosContext(build_paths['PREFIX'] + '/lib/')

                self.hostlist = self.params.get("test_machines", '/run/hosts/*')
                self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                                self.workdir)

                self.agent_sessions = AgentUtils.run_agent(self.basepath,
                                                           self.hostlist)
                server_utils.run_server(self.hostfile, self.server_group,
                                        self.basepath)

                # parameters used in pool create
                createmode = self.params.get("mode", '/run/pool/createmode/')
                createsetid = self.params.get("setname", '/run/pool/createset/')
                createsize = self.params.get("size", '/run/pool/createsize/')

                createuid = os.geteuid()
                creategid = os.getegid()

                # initialize a python pool object then create the underlying
                # daos storage
                self.pool = DaosPool(self.context)
                self.pool.create(createmode, createuid, creategid,
                                 createsize, createsetid, None)
                self.pool.connect(1 << 1)

                # create a container
                self.container = DaosContainer(self.context)
                self.container.create(self.pool.handle)

                # now open it
                self.container.open()

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            self.fail("Test failed during setup.\n")

    def tearDown(self):

        try:
            if self.container:
                self.container.close()

            # wait a few seconds and then destroy
            time.sleep(5)
            if self.container:
                self.container.destroy()

            # cleanup the pool
            if self.pool:
                self.pool.disconnect()
                self.pool.destroy(1)

            if self.hostfile is not None:
                os.remove(self.hostfile)

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            self.fail("Test failed during teardown.\n")

        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
            server_utils.stop_server(hosts=self.hostlist)

    def test_dkey_punch(self):
        """
        The most basic test of the dkey punch function.

        :avocado: tags=object,punch,dkeypunch,regression,vm,small
        """

        try:
            # create an object and write some data into it
            thedata = "a string that I want to stuff into an object"
            dkey = "this is the dkey"
            akey = "this is the akey"

            obj, txn = self.container.write_an_obj(thedata, len(thedata)+1,
                                                   dkey, akey, obj_cls=1)

            # read the data back and make sure its correct
#.........这里部分代码省略.........
开发者ID:daos-stack,项目名称:daos,代码行数:103,代码来源:punch_test.py


注:本文中的daos_api.DaosPool.destroy方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。