当前位置: 首页>>代码示例>>Python>>正文


Python DaosPool.create方法代码示例

本文整理汇总了Python中daos_api.DaosPool.create方法的典型用法代码示例。如果您正苦于以下问题:Python DaosPool.create方法的具体用法?Python DaosPool.create怎么用?Python DaosPool.create使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在daos_api.DaosPool的用法示例。


在下文中一共展示了DaosPool.create方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_destroy_connect

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import create [as 别名]
    def test_destroy_connect(self):
        """
        Test destroying a pool that has a connected client with force == false.
        Should fail.

        :avocado: tags=pool,pooldestroy,x
        """
        host = self.hostlist[0]
        try:

            # write out a hostfile and start the servers with it
            self.hostlist = self.params.get("test_machines1", '/run/hosts/')
            hostfile = write_host_file.write_host_file(self.hostlist, self.tmp)

            self.agent_sessions = AgentUtils.run_agent(self.basepath,
                                                       self.hostlist)
            server_utils.run_server(hostfile, self.server_group, self.basepath)

            # parameters used in pool create
            createmode = self.params.get("mode", '/run/poolparams/createmode/')
            createuid = self.params.get("uid", '/run/poolparams/createuid/')
            creategid = self.params.get("gid", '/run/poolparams/creategid/')
            createsetid = self.params.get("setname",
                                          '/run/poolparams/createset/')
            createsize = self.params.get("size", '/run/poolparams/createsize/')

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid, None)

            # need a connection to create container
            pool.connect(1 << 1)

            # destroy pool with connection open
            pool.destroy(0)

            # should throw an exception and not hit this
            self.fail("Shouldn't hit this line.\n")

        except DaosApiError as excep:
            print("got exception which is expected so long as it is BUSY")
            print(excep)
            print(traceback.format_exc())
            # pool should still be there
            exists = check_for_pool.check_for_pool(host, pool.get_uuid_str)
            if exists != 0:
                self.fail("Pool gone, but destroy should have failed.\n")

        # no matter what happens cleanup
        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
            server_utils.stop_server(hosts=self.hostlist)
            os.remove(hostfile)
开发者ID:daos-stack,项目名称:daos,代码行数:58,代码来源:destroy_tests.py

示例2: test_global_handle

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import create [as 别名]
    def test_global_handle(self):
        """
        Test ID: DAO

        Test Description: Use a pool handle in another process.

        :avocado: tags=pool,poolhandle,vm,small,regression
        """

        try:

            # use the uid/gid of the user running the test, these should
            # be perfectly valid
            createuid = os.geteuid()
            creategid = os.getegid()

            # parameters used in pool create that are in yaml
            createmode = self.params.get("mode", '/run/testparams/createmode/')
            createsetid = self.params.get("setname",
                                          '/run/testparams/createset/')
            createsize = self.params.get("size", '/run/testparams/createsize/')

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid, None)
            pool.connect(1 << 1)

            # create a container just to make sure handle is good
            container = DaosContainer(self.context)
            container.create(pool.handle)

            # create a global handle
            iov_len, buf_len, buf = pool.local2global()

            # this should work in the future but need on-line server addition
            #arg_list = (buf_len, iov_len, buf, pool.get_uuid_str(), 0)
            #p = Process(target=check_handle, args=arg_list)
            #p.start()
            #p.join()
            # for now verifying global handle in the same process which is not
            # the intended use case
            check_handle(buf_len, iov_len, buf, pool.get_uuid_str(), 0)

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            self.fail("Expecting to pass but test has failed.\n")
开发者ID:daos-stack,项目名称:daos,代码行数:51,代码来源:global_handle.py

示例3: test_many_servers

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import create [as 别名]
    def test_many_servers(self):
        """
        Test destroy on a large (relative) number of servers.

        :avocado: tags=pool,pooldestroy,destroybig
        """
        try:
            # write out a hostfile and start the servers with it
            self.hostlist = self.params.get("test_machines6", '/run/hosts/')
            hostfile = write_host_file.write_host_file(self.hostlist, self.tmp)

            self.agent_sessions = AgentUtils.run_agent(self.basepath,
                                                       self.hostlist)
            server_utils.run_server(hostfile, self.server_group, self.basepath)

            # parameters used in pool create
            createmode = self.params.get("mode", '/run/poolparams/createmode/')
            createuid = self.params.get("uid", '/run/poolparams/createuid/')
            creategid = self.params.get("gid", '/run/poolparams/creategid/')
            createsetid = self.params.get("setname",
                                          '/run/poolparams/createset/')
            createsize = self.params.get("size", '/run/poolparams/createsize/')

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid, None)

            time.sleep(1)

            # okay, get rid of it
            pool.destroy(1)

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            self.fail("6 server test failed.\n")

        except Exception as excep:
            self.fail("Daos code segfaulted most likely.  Error: %s" % excep)

        # no matter what happens cleanup
        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
            server_utils.stop_server(hosts=self.hostlist)
            os.remove(hostfile)
开发者ID:daos-stack,项目名称:daos,代码行数:50,代码来源:destroy_tests.py

示例4: test_destroy_async

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import create [as 别名]
    def test_destroy_async(self):
        """
        Performn destroy asynchronously, successful and failed.

        :avocado: tags=pool,pooldestroy,destroyasync
        """

        global GLOB_SIGNAL
        global GLOB_RC

        try:
            # write out a hostfile and start the servers with it
            self.hostlist = self.params.get("test_machines1", '/run/hosts/')
            hostfile = write_host_file.write_host_file(self.hostlist, self.tmp)

            self.agent_sessions = AgentUtils.run_agent(self.basepath,
                                                       self.hostlist)
            server_utils.run_server(hostfile, self.server_group, self.basepath)

            # parameters used in pool create
            createmode = self.params.get("mode", '/run/poolparams/createmode/')
            createuid = self.params.get("uid", '/run/poolparams/createuid/')
            creategid = self.params.get("gid", '/run/poolparams/creategid/')
            createsetid = self.params.get("setname",
                                          '/run/poolparams/createset/')
            createsize = self.params.get("size", '/run/poolparams/createsize/')

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid, None)

            # allow the callback to tell us when its been called
            GLOB_SIGNAL = threading.Event()

            # blow it away but this time get return code via callback function
            pool.destroy(1, cb_func)

            # wait for callback
            GLOB_SIGNAL.wait()
            if GLOB_RC != 0:
                self.fail("RC not as expected in async test")

            # recreate the pool, reset the signal, shutdown the
            # servers so call will fail and then check rc in the callback
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid, None)
            GLOB_SIGNAL = threading.Event()
            GLOB_RC = -9900000
            server_utils.stop_server(hosts=self.hostlist)
            pool.destroy(1, cb_func)

            # wait for callback, expecting a timeout since servers are down
            GLOB_SIGNAL.wait()
            if GLOB_RC != -1011:
                self.fail("RC not as expected in async test")

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            self.fail("destroy async test failed.\n")

        except Exception as excep:
            self.fail("Daos code segfaulted most likely. Error: %s" % excep)

        # no matter what happens cleanup
        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
            server_utils.stop_server(hosts=self.hostlist)
            os.remove(hostfile)
开发者ID:daos-stack,项目名称:daos,代码行数:74,代码来源:destroy_tests.py

示例5: ObjectMetadata

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import create [as 别名]
class ObjectMetadata(avocado.Test):
    """
    Test Class Description:
        Test the general Metadata operations and boundary conditions.
    """

    def setUp(self):
        self.agent_sessions = None
        self.pool = None
        self.hostlist = None
        self.hostfile_clients = None
        self.hostfile = None
        self.out_queue = None
        self.pool_connect = True

        with open('../../../.build_vars.json') as json_f:
            build_paths = json.load(json_f)

        self.basepath = os.path.normpath(build_paths['PREFIX']  + "/../")
        self.server_group = self.params.get("name",
                                            '/server_config/',
                                            'daos_server')
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.d_log = DaosLog(self.context)
        self.hostlist = self.params.get("servers", '/run/hosts/*')
        self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                        self.workdir)
        self.hostlist_clients = self.params.get("clients", '/run/hosts/*')
        self.hostfile_clients = (
            write_host_file.write_host_file(hostlist_clients, self.workdir))
        self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist,
                                                   self.hostlist_clients)
        server_utils.run_server(self.hostfile, self.server_group, self.basepath)

        self.pool = DaosPool(self.context)
        self.pool.create(self.params.get("mode", '/run/pool/createmode/*'),
                         os.geteuid(),
                         os.getegid(),
                         self.params.get("size", '/run/pool/createsize/*'),
                         self.params.get("setname", '/run/pool/createset/*'),
                         nvme_size=self.params.get("size",
                                                   '/run/pool/nvmesize/*'))

    def tearDown(self):
        try:
            if self.pool_connect:
                self.pool.disconnect()
            if self.pool:
                self.pool.destroy(1)
        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist_clients,
                                      self.agent_sessions)
            server_utils.stop_server(hosts=self.hostlist)

    @avocado.skip("Skipping until DAOS-1936/DAOS-1946 is fixed.")
    def test_metadata_fillup(self):
        """
        Test ID: DAOS-1512
        Test Description: Test to verify no IO happens after metadata is full.
        :avocado: tags=metadata,metadata_fill,nvme,small
        """
        self.pool.connect(2)
        container = DaosContainer(self.context)
        self.d_log.debug("Fillup Metadata....")
        for _cont in range(NO_OF_MAX_CONTAINER):
            container.create(self.pool.handle)
        self.d_log.debug("Metadata Overload...")
        #This should fail with no Metadata space Error.
        try:
            for _cont in range(250):
                container.create(self.pool.handle)
        except DaosApiError as exe:
            print (exe, traceback.format_exc())
            return

        self.fail("Test was expected to fail but it passed.\n")

    @avocado.skip("Skipping until DAOS-1965 is fixed.")
    @avocado.fail_on(DaosApiError)
    def test_metadata_addremove(self):
        """
        Test ID: DAOS-1512
        Test Description: Verify metadata release the space
                          after container delete.
        :avocado: tags=metadata,metadata_free_space,nvme,small
        """
        self.pool.connect(2)
        for k in range(10):
            container_array = []
            self.d_log.debug("Container Create Iteration {}".format(k))
            for cont in range(NO_OF_MAX_CONTAINER):
                container = DaosContainer(self.context)
                container.create(self.pool.handle)
                container_array.append(container)
            self.d_log.debug("Container Remove Iteration {} ".format(k))
            for cont in container_array:
                cont.destroy()

    def thread_control(self, threads, operation):
#.........这里部分代码省略.........
开发者ID:daos-stack,项目名称:daos,代码行数:103,代码来源:metadata.py

示例6: test_container_create

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import create [as 别名]
    def test_container_create(self):
        """
        Test ID: DAOS-689

        Test Description: valid and invalid container creation and close.

        :avocado: tags=regression,cont,contcreate
        """

        pool = None
        contuuid = None
        expected_results = []

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            createmode = self.params.get("mode", '/run/poolparams/')
            createuid = os.geteuid()
            creategid = os.getegid()
            createsetid = self.params.get("setname", '/run/poolparams/')
            createsize = self.params.get("size", '/run/poolparams/')

            # setup the pool
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid)
            pool.connect(1 << 1)

            # maybe use the good handle, maybe not
            handleparam = self.params.get("handle", '/run/poolhandle/*')
            if handleparam == 'VALID':
                poh = pool.handle
            else:
                poh = handleparam
                expected_results.append('FAIL')

            # maybe use a good UUID, maybe not
            uuidparam = self.params.get("uuid", "/uuids/*")
            expected_results.append(uuidparam[1])
            if uuidparam[0] == 'NULLPTR':
                self.cancel("skipping this test until DAOS-2043 is fixed")
                contuuid = 'NULLPTR'
            else:
                contuuid = uuid.UUID(uuidparam[0])

            should_fail = False
            for result in expected_results:
                if result == 'FAIL':
                    should_fail = True
                    break

            self.container = DaosContainer(self.context)
            self.container.create(poh, contuuid)

            # check UUID is the specified one
            if (uuidparam[0]).upper() != self.container.get_uuid_str().upper():
                print("uuidparam[0] is {}, uuid_str is {}".format(
                    uuidparam[0], self.container.get_uuid_str()))
                self.fail("Container UUID differs from specified at create\n")

            if should_fail:
                self.fail("Test was expected to fail but it passed.\n")

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            if not should_fail:
                self.fail("Test was expected to pass but it failed.\n")
        finally:
            if pool is not None:
                pool.disconnect()
                pool.destroy(1)
开发者ID:daos-stack,项目名称:daos,代码行数:74,代码来源:create.py

示例7: test_connect

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import create [as 别名]
    def test_connect(self):
        """
        Pass bad parameters to pool connect

        :avocado: tags=pool,poolconnect,badparam,badconnect
        """

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/connecttests/createmode/')
        createuid = self.params.get("uid", '/run/connecttests/uids/createuid/')
        creategid = self.params.get("gid", '/run/connecttests/gids/creategid/')
        createsetid = self.params.get("setname",
                                      '/run/connecttests/setnames/createset/')
        createsize = self.params.get("size",
                                     '/run/connecttests/psize/createsize/')

        # Accumulate a list of pass/fail indicators representing what is
        # expected for each parameter then "and" them to determine the
        # expected result of the test
        expected_for_param = []

        modelist = self.params.get("mode", '/run/connecttests/connectmode/*/')
        connectmode = modelist[0]
        expected_for_param.append(modelist[1])

        svclist = self.params.get("ranklist", '/run/connecttests/svrlist/*/')
        svc = svclist[0]
        expected_for_param.append(svclist[1])

        setlist = self.params.get("setname",
                                  '/run/connecttests/connectsetnames/*/')
        connectset = setlist[0]
        expected_for_param.append(setlist[1])

        uuidlist = self.params.get("uuid", '/run/connecttests/UUID/*/')
        connectuuid = uuidlist[0]
        expected_for_param.append(uuidlist[1])

        # if any parameter is FAIL then the test should FAIL, in this test
        # virtually everyone should FAIL since we are testing bad parameters
        expected_result = 'PASS'
        for result in expected_for_param:
            if result == 'FAIL':
                expected_result = 'FAIL'
                break

        puuid = (ctypes.c_ubyte * 16)()
        psvc = RankList()
        pgroup = ctypes.create_string_buffer(0)
        pool = None
        try:
            # setup the DAOS python API
            with open('../../../.build_vars.json') as build_file:
                data = json.load(build_file)
            context = DaosContext(data['PREFIX'] + '/lib/')

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid, None)
            # save this uuid since we might trash it as part of the test
            ctypes.memmove(puuid, pool.uuid, 16)

            # trash the the pool service rank list
            psvc.rl_ranks = pool.svc.rl_ranks
            psvc.rl_nr = pool.svc.rl_nr
            if not svc == 'VALID':
                rl_ranks = ctypes.POINTER(ctypes.c_uint)()
                pool.svc = RankList(rl_ranks, 1)

            # trash the pool group value
            pgroup = pool.group
            if connectset == 'NULLPTR':
                pool.group = None

            # trash the UUID value in various ways
            if connectuuid == 'NULLPTR':
                pool.uuid = None
            if connectuuid == 'JUNK':
                pool.uuid[4] = 244

            pool.connect(connectmode)

            if expected_result in ['FAIL']:
                self.fail("Test was expected to fail but it passed.\n")

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            if expected_result in ['PASS']:
                self.fail("Test was expected to pass but it failed.\n")

        # cleanup the pool
        finally:
            if pool is not None and pool.attached == 1:
                # restore values in case we trashed them during test
                pool.svc.rl_ranks = psvc.rl_ranks
                pool.svc.rl_nr = psvc.rl_nr
                pool.group = pgroup
#.........这里部分代码省略.........
开发者ID:daos-stack,项目名称:daos,代码行数:103,代码来源:bad_connect.py

示例8: MultipleClients

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import create [as 别名]
class MultipleClients(Test):
    """
    Test class Description: Runs IOR with multiple clients.

    """
    def setUp(self):
        self.agent_sessions = None
        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as build_file:
            build_paths = json.load(build_file)
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")

        self.server_group = self.params.get("name", '/server_config/',
                                            'daos_server')
        self.daosctl = self.basepath + '/install/bin/daosctl'

        # setup the DAOS python API
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.pool = None

        self.hostlist_servers = self.params.get("test_servers",
                                                '/run/hosts/test_machines/*')
        self.hostfile_servers = (
            write_host_file.write_host_file(self.hostlist_servers,
                                            self.workdir))
        print("Host file servers is: {}".format(self.hostfile_servers))

        self.hostlist_clients = (
            self.params.get("clients",
                            '/run/hosts/test_machines/test_clients/*'))
        self.hostfile_clients = (
            write_host_file.write_host_file(self.hostlist_clients,
                                            self.workdir))
        print("Host file clientsis: {}".format(self.hostfile_clients))

        self.agent_sessions = AgentUtils.run_agent(self.basepath,
                                                   self.hostlist_servers,
                                                   self.hostlist_clients)
        server_utils.run_server(self.hostfile_servers, self.server_group,
                                self.basepath)

        if int(str(self.name).split("-")[0]) == 1:
            ior_utils.build_ior(self.basepath)

    def tearDown(self):
        try:
            if self.hostfile_clients is not None:
                os.remove(self.hostfile_clients)
            if self.hostfile_servers is not None:
                os.remove(self.hostfile_servers)
            if self.pool is not None and self.pool.attached:
                self.pool.destroy(1)
        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist_clients,
                                      self.agent_sessions)
            server_utils.stop_server(hosts=self.hostlist_servers)

    def test_multipleclients(self):
        """
        Test ID: DAOS-1263
        Test Description: Test IOR with 16 and 32 clients config.
        Use Cases: Different combinations of 16/32 Clients, 8b/1k/4k
                   record size, 1m/8m stripesize and 16 async io.
        :avocado: tags=ior,twoservers,multipleclients
        """

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/pool/createmode/*/')
        createuid = os.geteuid()
        creategid = os.getegid()
        createsetid = self.params.get("setname", '/run/pool/createset/')
        createsize = self.params.get("size", '/run/pool/createsize/')
        createsvc = self.params.get("svcn", '/run/pool/createsvc/')
        iteration = self.params.get("iter", '/run/ior/iteration/')
        slots = self.params.get("slots", '/run/ior/clientslots/*')
        ior_flags = self.params.get("F", '/run/ior/iorflags/')
        transfer_size = self.params.get("t", '/run/ior/transfersize/')
        record_size = self.params.get("r", '/run/ior/recordsize/*')
        stripe_size = self.params.get("s", '/run/ior/stripesize/*')
        stripe_count = self.params.get("c", '/run/ior/stripecount/')
        async_io = self.params.get("a", '/run/ior/asyncio/')
        object_class = self.params.get("o", '/run/ior/objectclass/')

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(createmode, createuid, creategid, createsize,
                             createsetid, None, None, createsvc)

            with open(self.hostfile_clients) as client_file:
                new_text = client_file.read().replace('slots=1',
                                                      'slots={0}').format(slots)

            with open(self.hostfile_clients, "w") as client_file:
                client_file.write(new_text)

            pool_uuid = self.pool.get_uuid_str()
            tmp_rank_list = []
#.........这里部分代码省略.........
开发者ID:daos-stack,项目名称:daos,代码行数:103,代码来源:multiple_clients.py

示例9: test_exclude

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import create [as 别名]
    def test_exclude(self):
        """
        Pass bad parameters to pool connect

        :avocado: tags=pool,poolexclude,badparam,badexclude
        """
        # parameters used in pool create
        createmode = self.params.get("mode", '/run/pool/createmode/')
        createsetid = self.params.get("setname", '/run/pool/createset/')
        createsize = self.params.get("size", '/run/pool/createsize/')

        createuid = os.geteuid()
        creategid = os.getegid()

        # Accumulate a list of pass/fail indicators representing what is
        # expected for each parameter then "and" them to determine the
        # expected result of the test
        expected_for_param = []

        tgtlist = self.params.get("ranklist", '/run/testparams/tgtlist/*/')
        targets = []

        if tgtlist[0] == "NULLPTR":
            targets = None
            self.cancel("skipping null pointer test until DAOS-1929 is fixed")
        else:
            targets.append(tgtlist[0])
        expected_for_param.append(tgtlist[1])

        svclist = self.params.get("ranklist", '/run/testparams/svrlist/*/')
        svc = svclist[0]
        expected_for_param.append(svclist[1])

        setlist = self.params.get("setname",
                                  '/run/testparams/connectsetnames/*/')
        connectset = setlist[0]
        expected_for_param.append(setlist[1])

        uuidlist = self.params.get("uuid", '/run/testparams/UUID/*/')
        excludeuuid = uuidlist[0]
        expected_for_param.append(uuidlist[1])

        # if any parameter is FAIL then the test should FAIL, in this test
        # virtually everyone should FAIL since we are testing bad parameters
        expected_result = 'PASS'
        for result in expected_for_param:
            if result == 'FAIL':
                expected_result = 'FAIL'
                break

        saved_svc = None
        saved_grp = None
        saved_uuid = None
        pool = None
        try:
            # setup the DAOS python API
            with open('../../../.build_vars.json') as build_file:
                data = json.load(build_file)
            context = DaosContext(data['PREFIX'] + '/lib/')

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid, None)

            # trash the the pool service rank list
            if not svc == 'VALID':
                self.cancel("skipping this test until DAOS-1931 is fixed")
                saved_svc = RankList(pool.svc.rl_ranks, pool.svc.rl_nr)
                pool.svc = None

            # trash the pool group value
            if connectset == 'NULLPTR':
                saved_grp = pool.group
                pool.group = None

            # trash the UUID value in various ways
            if excludeuuid == 'NULLPTR':
                self.cancel("skipping this test until DAOS-1932 is fixed")
                ctypes.memmove(saved_uuid, pool.uuid, 16)
                pool.uuid = 0
            if excludeuuid == 'CRAP':
                self.cancel("skipping this test until DAOS-1932 is fixed")
                ctypes.memmove(saved_uuid, pool.uuid, 16)
                pool.uuid[4] = 244

            pool.exclude(targets)

            if expected_result in ['FAIL']:
                self.fail("Test was expected to fail but it passed.\n")

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            if expected_result in ['PASS']:
                self.fail("Test was expected to pass but it failed.\n")
        finally:
            if pool is not None:
                if saved_svc is not None:
#.........这里部分代码省略.........
开发者ID:daos-stack,项目名称:daos,代码行数:103,代码来源:bad_exclude.py

示例10: PoolAttributeTest

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import create [as 别名]
class PoolAttributeTest(Test):
    """
    Test class Description: Tests DAOS pool attribute get/set/list.
    """
    def setUp(self):
        try:
            self.pool = None
            self.hostlist = None
            with open('../../../.build_vars.json') as build_file:
                build_paths = json.load(build_file)
                basepath = os.path.normpath(build_paths['PREFIX'] + "/../")
                server_group = self.params.get("name",
                                               '/server_config/',
                                               'daos_server')
                context = DaosContext(build_paths['PREFIX'] + '/lib/')

                self.hostlist = self.params.get("test_machines", '/run/hosts/*')
                self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                                self.workdir)

                server_utils.run_server(self.hostfile, server_group, basepath)

                createmode = self.params.get("mode",
                                             '/run/attrtests/createmode/')
                createuid = os.geteuid()
                creategid = os.getgid()
                createsetid = self.params.get("setname",
                                              '/run/attrtests/createset/')
                createsize = self.params.get("size",
                                             '/run/attrtests/createsize/')

                self.pool = DaosPool(context)
                self.pool.create(createmode, createuid, creategid, createsize,
                                 createsetid)
                self.pool.connect(1 << 1)
                self.large_data_set = {}

        except DaosApiError as excep:
            print("In the setup exception handler\n")
            print(excep)
            print(traceback.format_exc())

    def tearDown(self):
        try:
            if self.pool is not None:
                self.pool.disconnect()
                self.pool.destroy(1)
        finally:
            server_utils.stop_server(hosts=self.hostlist)

    def create_data_set(self):
        """
        To create the large attribute dictionary
        """
        allchar = string.ascii_letters + string.digits
        for i in range(1024):
            self.large_data_set[str(i)] = (
                "".join(random.choice(allchar)
                        for x in range(random.randint(1, 100))))

    def test_pool_attributes(self):
        """
        Test ID: DAOS-1359

        Test description: Test basic pool attribute tests (sync).

        :avocado: tags=regression,pool,pool_attr,attribute,sync_poolattribute
        """
        expected_for_param = []
        name = self.params.get("name", '/run/attrtests/name_handles/*/')
        expected_for_param.append(name[1])
        value = self.params.get("value", '/run/attrtests/value_handles/*/')
        if value[0] is None:
            self.cancel("skipping these tests until DAOS-2170 is fixed")
        expected_for_param.append(value[1])

        attr_dict = {name[0]:value[0]}
        if name[0] is not None:
            if "largenumberofattr" in name[0]:
                self.create_data_set()
                attr_dict = self.large_data_set

        expected_result = 'PASS'
        for result in expected_for_param:
            if result == 'FAIL':
                expected_result = 'FAIL'
                break
        try:
            self.pool.set_attr(data=attr_dict)
            size, buf = self.pool.list_attr()

            verify_list_attr(attr_dict, size.value, buf)

            if name[0] is not None:
                # Request something that doesn't exist
                if "Negative" in name[0]:
                    name[0] = "rubbish"
                # large attr test messes with the dictionary so skip
                # the get test
                if "largenumberofattr" not in name[0]:
#.........这里部分代码省略.........
开发者ID:daos-stack,项目名称:daos,代码行数:103,代码来源:attribute.py

示例11: CreateManyDkeys

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import create [as 别名]
class CreateManyDkeys(Test):
    """
    Test Class Description:
        Tests that create large numbers of keys in objects/containers and then
        destroy the containers and verify the space has been reclaimed.

    """
    def setUp(self):
        self.agent_sessions = None
        with open('../../../.build_vars.json') as json_f:
            build_paths = json.load(json_f)
        basepath = os.path.normpath(build_paths['PREFIX']  + "/../")
        server_group = self.params.get("name",
                                       '/server_config/',
                                       'daos_server')
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.container = None
        self.hostlist = self.params.get("test_machines", '/run/hosts/*')
        self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                        self.workdir)

        self.agent_sessions = AgentUtils.run_agent(basepath, self.hostlist)
        server_utils.run_server(self.hostfile, server_group, basepath)

        self.pool = DaosPool(self.context)
        self.pool.create(self.params.get("mode", '/run/pool/createmode/*'),
                         os.geteuid(),
                         os.getegid(),
                         self.params.get("size", '/run/pool/createsize/*'),
                         self.params.get("setname", '/run/pool/createset/*'),
                         None)
        self.pool.connect(1 << 1)

    def tearDown(self):
        try:
            if self.hostfile is not None:
                os.remove(self.hostfile)
            if self.pool:
                self.pool.destroy(1)
        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
            server_utils.stop_server(hosts=self.hostlist)

    def write_a_bunch_of_values(self, how_many):
        """
        Write data to an object, each with a dkey and akey.  The how_many
        parameter determines how many key:value pairs are written.
        """

        self.container = DaosContainer(self.context)
        self.container.create(self.pool.handle)
        self.container.open()

        ioreq = IORequest(self.context, self.container, None)
        epoch = self.container.get_new_epoch()
        c_epoch = ctypes.c_uint64(epoch)

        print("Started Writing the Dataset-----------\n")
        inc = 50000
        last_key = inc
        for key in range(how_many):
            c_dkey = ctypes.create_string_buffer("dkey {0}".format(key))
            c_akey = ctypes.create_string_buffer("akey {0}".format(key))
            c_value = ctypes.create_string_buffer(
                "some data that gets stored with the key {0}".format(key))
            c_size = ctypes.c_size_t(ctypes.sizeof(c_value))
            ioreq.single_insert(c_dkey,
                                c_akey,
                                c_value,
                                c_size,
                                c_epoch)

            if key > last_key:
                print("written: {}".format(key))
                sys.stdout.flush()
                last_key = key + inc

        self.container.commit_epoch(c_epoch)

        print("Started Verification of the Dataset-----------\n")
        last_key = inc
        for key in range(how_many):
            c_dkey = ctypes.create_string_buffer("dkey {0}".format(key))
            c_akey = ctypes.create_string_buffer("akey {0}".format(key))
            the_data = "some data that gets stored with the key {0}".format(key)
            val = ioreq.single_fetch(c_dkey,
                                     c_akey,
                                     len(the_data)+1,
                                     c_epoch)

            if the_data != (repr(val.value)[1:-1]):
                self.fail("ERROR: Data mismatch for dkey = {0}, akey={1}, "
                          "Expected Value={2} and Received Value={3}\n"
                          .format("dkey {0}".format(key),
                                  "akey {0}".format(key),
                                  the_data,
                                  repr(val.value)[1:-1]))

            if key > last_key:
#.........这里部分代码省略.........
开发者ID:daos-stack,项目名称:daos,代码行数:103,代码来源:create_many_dkeys.py

示例12: RebuildNoCap

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import create [as 别名]
class RebuildNoCap(Test):

    """
    Test Class Description:
    This class contains tests for pool rebuild.

    :avocado: tags=pool,rebuild,nocap
    """
    def setUp(self):
        """ setup for the test """
        self.agent_sessions = None
        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as build_file:
            build_paths = json.load(build_file)
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")

        # generate a hostfile
        self.hostlist = self.params.get("test_machines", '/run/hosts/')
        self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                        self.workdir)

        # fire up the DAOS servers
        self.server_group = self.params.get("name", '/run/server_config/',
                                            'daos_server')
        self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist)
        server_utils.run_server(self.hostfile, self.server_group,
                                build_paths['PREFIX'] + '/../')

        # create a pool to test with
        createmode = self.params.get("mode", '/run/pool/createmode/')
        createuid = self.params.get("uid", '/run/pool/createuid/')
        creategid = self.params.get("gid", '/run/pool/creategid/')
        createsetid = self.params.get("setname", '/run/pool/createset/')
        createsize = self.params.get("size", '/run/pool/createsize/')
        self.pool = DaosPool(self.context)
        self.pool.create(createmode, createuid, creategid, createsize,
                         createsetid)
        uuid = self.pool.get_uuid_str()

        time.sleep(2)

        # stuff some bogus data into the pool
        how_many_bytes = long(self.params.get("datasize",
                                              '/run/testparams/datatowrite/'))
        exepath = os.path.join(build_paths['PREFIX'],
                               "/../src/tests/ftest/util/write_some_data.py")
        cmd = "export DAOS_POOL={0}; export DAOS_SVCL=1; mpirun"\
              " --np 1 --host {1} {2} {3} testfile".format(
                  uuid, self.hostlist[0], exepath, how_many_bytes)
        subprocess.call(cmd, shell=True)

    def tearDown(self):
        """ cleanup after the test """

        try:
            os.remove(self.hostfile)
            if self.pool:
                self.pool.destroy(1)
        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
            server_utils.stop_server(hosts=self.hostlist)


    def test_rebuild_no_capacity(self):
        """
        :avocado: tags=pool,rebuild,nocap
        """
        try:
            print("\nsetup complete, starting test\n")

            # create a server object that references on of our pool target hosts
            # and then kill it
            svr_to_kill = int(self.params.get("rank_to_kill",
                                              '/run/testparams/ranks/'))
            d_server = DaosServer(self.context, bytes(self.server_group),
                                  svr_to_kill)

            time.sleep(1)
            d_server.kill(1)

            # exclude the target from the dead server
            self.pool.exclude([svr_to_kill])

            # exclude should trigger rebuild, check
            self.pool.connect(1 << 1)
            status = self.pool.pool_query()
            if not status.pi_ntargets == len(self.hostlist):
                self.fail("target count wrong.\n")
            if not status.pi_ndisabled == 1:
                self.fail("disabled target count wrong.\n")

            # the pool should be too full to start a rebuild so
            # expecting an error
            # not sure yet specifically what error
            if status.pi_rebuild_st.rs_errno == 0:
                self.fail("expecting rebuild to fail but it didn't.\n")

        except DaosApiError as excep:
#.........这里部分代码省略.........
开发者ID:daos-stack,项目名称:daos,代码行数:103,代码来源:rebuild_no_cap.py

示例13: OpenContainerTest

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import create [as 别名]
class OpenContainerTest(Test):
    """
    Tests DAOS container bad create (non existing pool handle, bad uuid)
    and close.

    :avocado: tags=container,containeropen
    """
    def setUp(self):

        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as build_file:
            build_paths = json.load(build_file)
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")

        self.server_group = self.params.get("name", '/server_config/',
                                            'daos_server')

        # setup the DAOS python API
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.pool1 = None
        self.pool2 = None
        self.container1 = None
        self.container2 = None

        self.hostfile = None
        self.hostlist = self.params.get("test_machines", '/run/hosts/*')
        self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                        self.workdir)

        # common parameters used in pool create
        self.createmode = self.params.get("mode",
                                          '/run/createtests/createmode/')
        self.createsetid = self.params.get("setname",
                                           '/run/createtests/createset/')
        self.createsize = self.params.get("size",
                                          '/run/createtests/createsize/')

        # pool 1 UID GID
        self.createuid1 = self.params.get("uid", '/run/createtests/createuid1/')
        self.creategid1 = self.params.get("gid", '/run/createtests/creategid1/')

        # pool 2 UID GID
        self.createuid2 = self.params.get("uid", '/run/createtests/createuid2/')
        self.creategid2 = self.params.get("gid", '/run/createtests/creategid2/')

        self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist)
        server_utils.run_server(self.hostfile, self.server_group, self.basepath)

    def tearDown(self):
        try:
            if self.container1 is not None:
                self.container1.destroy()
            if self.container2 is not None:
                self.container2.destroy()
            if self.pool1 is not None and self.pool1.attached:
                self.pool1.destroy(1)
            if self.pool2 is not None and self.pool2.attached:
                self.pool2.destroy(1)
        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
            server_utils.stop_server(hosts=self.hostlist)

    def test_container_open(self):
        """
        Test basic container bad create.

        :avocado: tags=container,containeropen
        """
        container_uuid = None
        expected_for_param = []
        uuidlist = self.params.get("uuid", '/run/createtests/uuids/*/')
        container_uuid = uuidlist[0]
        expected_for_param.append(uuidlist[1])

        pohlist = self.params.get("poh", '/run/createtests/handles/*/')
        poh = pohlist[0]
        expected_for_param.append(pohlist[1])

        expected_result = 'PASS'
        for result in expected_for_param:
            if result == 'FAIL':
                expected_result = 'FAIL'
                break

        try:
            # create two pools and try to create containers in these pools
            self.pool1 = DaosPool(self.context)
            self.pool1.create(self.createmode, self.createuid1, self.creategid1,
                              self.createsize, self.createsetid, None)

            self.pool2 = DaosPool(self.context)
            self.pool2.create(self.createmode, self.createuid2, self.creategid2,
                              self.createsize, None, None)

            # Connect to the pools
            self.pool1.connect(1 << 1)
            self.pool2.connect(1 << 1)

            # defines pool handle for container open
#.........这里部分代码省略.........
开发者ID:daos-stack,项目名称:daos,代码行数:103,代码来源:open.py

示例14: EightServers

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import create [as 别名]
class EightServers(Test):
    """
    Test class Description: Runs IOR with 8 servers.

    """

    def __init__(self, *args, **kwargs):

        super(EightServers, self).__init__(*args, **kwargs)

        self.basepath = None
        self.server_group = None
        self.context = None
        self.pool = None
        self.num_procs = None
        self.hostlist_servers = None
        self.hostfile_servers = None
        self.hostlist_clients = None
        self.hostfile_clients = None
        self.mpio = None

    def setUp(self):
        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as build_file:
            build_paths = json.load(build_file)
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")
        print("<<{}>>".format(self.basepath))
        self.server_group = self.params.get("name", '/server_config/',
                                            'daos_server')

        # setup the DAOS python API
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')

        self.hostlist_servers = self.params.get("test_servers",
                                                '/run/hosts/test_machines/*')
        self.hostfile_servers = (
            write_host_file.write_host_file(self.hostlist_servers,
                                            self.workdir))
        print("Host file servers is: {}".format(self.hostfile_servers))

        self.hostlist_clients = self.params.get("test_clients",
                                                '/run/hosts/test_machines/*')
        self.num_procs = self.params.get("np", '/run/ior/client_processes/*')
        self.hostfile_clients = (
            write_host_file.write_host_file(self.hostlist_clients, self.workdir,
                                            None))
        print("Host file clients is: {}".format(self.hostfile_clients))

        server_utils.run_server(self.hostfile_servers, self.server_group,
                                self.basepath)

    def tearDown(self):
        try:
            if self.pool is not None and self.pool.attached:
                self.pool.destroy(1)
        finally:
            server_utils.stop_server(hosts=self.hostlist_servers)

    def executable(self, iorflags=None):
        """
        Executable function to run ior for ssf and fpp
        """

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/pool/createmode/*/')
        createuid = os.geteuid()
        creategid = os.getegid()
        createsetid = self.params.get("setname", '/run/pool/createset/')
        createscm_size = self.params.get("scm_size", '/run/pool/createsize/')
        createnvme_size = self.params.get("nvme_size", '/run/pool/createsize/')
        createsvc = self.params.get("svcn", '/run/pool/createsvc/')
        iteration = self.params.get("iter", '/run/ior/iteration/')
        block_size = self.params.get("b", '/run/ior/transfersize_blocksize/*/')
        transfer_size = self.params.get("t",
                                        '/run/ior/transfersize_blocksize/*/')

        try:
            # initialize MpioUtils
            self.mpio = MpioUtils()
            if self.mpio.mpich_installed(self.hostlist_clients) is False:
                self.fail("Exiting Test: Mpich not installed")

            #print self.mpio.mpichinstall
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(createmode, createuid, creategid,
                             createscm_size, createsetid, None, None, createsvc,
                             createnvme_size)

            pool_uuid = self.pool.get_uuid_str()
            svc_list = ""
            for i in range(createsvc):
                svc_list += str(int(self.pool.svc.rl_ranks[i])) + ":"
            svc_list = svc_list[:-1]

            print ("svc_list: {}".format(svc_list))

            ior_utils.run_ior_mpiio(self.basepath, self.mpio.mpichinstall,
                                    pool_uuid, svc_list, self.num_procs,
#.........这里部分代码省略.........
开发者ID:daos-stack,项目名称:daos,代码行数:103,代码来源:eight_servers_mpiio.py

示例15: ContainerAsync

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import create [as 别名]
class ContainerAsync(Test):
    """
    Tests DAOS pool connect permissions (non existing pool handle, bad uuid)
    and close.
    """

    def __init__(self, *args, **kwargs):
        super(ContainerAsync, self).__init__(*args, **kwargs)
        self.container1 = None
        self.container2 = None

    def setUp(self):
        self.agent_sessions = None
        self.hostlist = None
        self.pool = None

        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as build_file:
            build_paths = json.load(build_file)
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")

        self.server_group = self.params.get("name", '/server_config/',
                                            'daos_server')

        # setup the DAOS python API
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.pool = None

        self.hostlist = self.params.get("test_machines", '/run/hosts/*')
        self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                        self.workdir)
        print("Host file is: {}".format(self.hostfile))

        self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist)
        server_utils.run_server(self.hostfile, self.server_group, self.basepath)
        time.sleep(10)

    def tearDown(self):
        try:
            if self.pool is not None and self.pool.attached:
                self.pool.destroy(1)
        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
            time.sleep(5)
            server_utils.stop_server(hosts=self.hostlist)

    def test_createasync(self):
        """
        Test container create for asynchronous mode.

        :avocado: tags=container,containerasync,createasync
        """

        global GLOB_SIGNAL
        global GLOB_RC

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/createtests/createmode/*/')
        createsetid = self.params.get("setname", '/run/createtests/createset/')
        createsize = self.params.get("size", '/run/createtests/createsize/')
        createuid = os.geteuid()
        creategid = os.getegid()

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)

            self.pool.create(createmode, createuid, creategid,
                             createsize, createsetid, None)

            poh = self.pool.handle

            self.pool.connect(1 << 1)

            # Container initialization and creation
            self.container1 = DaosContainer(self.context)
            self.container2 = DaosContainer(self.context)


            GLOB_SIGNAL = threading.Event()
            self.container1.create(poh, None, cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC != 0:
                self.fail("RC not as expected in async test")
            print("RC after successful container create: ", GLOB_RC)

            # Try to recreate container after destroying pool,
            # this should fail. Checking rc after failure.
            self.pool.destroy(1)
            GLOB_SIGNAL = threading.Event()
            GLOB_RC = -9900000
            self.container2.create(poh, None, cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC == 0:
                self.fail("RC not as expected in async test")
            print("RC after unsuccessful container create: ", GLOB_RC)
#.........这里部分代码省略.........
开发者ID:daos-stack,项目名称:daos,代码行数:103,代码来源:container_async.py


注:本文中的daos_api.DaosPool.create方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。