当前位置: 首页>>代码示例>>Python>>正文


Python DaosPool.get_uuid_str方法代码示例

本文整理汇总了Python中daos_api.DaosPool.get_uuid_str方法的典型用法代码示例。如果您正苦于以下问题:Python DaosPool.get_uuid_str方法的具体用法?Python DaosPool.get_uuid_str怎么用?Python DaosPool.get_uuid_str使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在daos_api.DaosPool的用法示例。


在下文中一共展示了DaosPool.get_uuid_str方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_global_handle

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import get_uuid_str [as 别名]
    def test_global_handle(self):
        """
        Test ID: DAO

        Test Description: Use a pool handle in another process.

        :avocado: tags=pool,poolhandle,vm,small,regression
        """

        try:

            # use the uid/gid of the user running the test, these should
            # be perfectly valid
            createuid = os.geteuid()
            creategid = os.getegid()

            # parameters used in pool create that are in yaml
            createmode = self.params.get("mode", '/run/testparams/createmode/')
            createsetid = self.params.get("setname",
                                          '/run/testparams/createset/')
            createsize = self.params.get("size", '/run/testparams/createsize/')

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid, None)
            pool.connect(1 << 1)

            # create a container just to make sure handle is good
            container = DaosContainer(self.context)
            container.create(pool.handle)

            # create a global handle
            iov_len, buf_len, buf = pool.local2global()

            # this should work in the future but need on-line server addition
            #arg_list = (buf_len, iov_len, buf, pool.get_uuid_str(), 0)
            #p = Process(target=check_handle, args=arg_list)
            #p.start()
            #p.join()
            # for now verifying global handle in the same process which is not
            # the intended use case
            check_handle(buf_len, iov_len, buf, pool.get_uuid_str(), 0)

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            self.fail("Expecting to pass but test has failed.\n")
开发者ID:daos-stack,项目名称:daos,代码行数:51,代码来源:global_handle.py

示例2: test_array_obj

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import get_uuid_str [as 别名]
    def test_array_obj(self):
        """
        Test ID: DAOS-961

        Test Description: Writes an array to an object and then reads it
        back and verifies it.

        :avocado: tags=object,arrayobj,regression,vm,small
        """
        try:
            # parameters used in pool create
            createmode = self.params.get("mode", '/run/pool_params/createmode/')
            createsetid = self.params.get("setname",
                                          '/run/pool_params/createset/')
            createsize = self.params.get("size", '/run/pool_params/createsize/')
            createuid = os.geteuid()
            creategid = os.getegid()

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid, None)
            self.plog.info("Pool %s created.", pool.get_uuid_str())

            # need a connection to create container
            pool.connect(1 << 1)

            # create a container
            container = DaosContainer(self.context)
            container.create(pool.handle)
            self.plog.info("Container %s created.", container.get_uuid_str())

            # now open it
            container.open()

            # do a query and compare the UUID returned from create with
            # that returned by query
            container.query()

            if container.get_uuid_str() != c_uuid_to_str(
                    container.info.ci_uuid):
                self.fail("Container UUID did not match the one in info\n")

            # create an object and write some data into it
            thedata = []
            thedata.append("data string one")
            thedata.append("data string two")
            thedata.append("data string tre")
            dkey = "this is the dkey"
            akey = "this is the akey"

            self.plog.info("writing array to dkey >%s< akey >%s<.", dkey, akey)
            oid, epoch = container.write_an_array_value(thedata, dkey, akey,
                                                        obj_cls=3)

            # read the data back and make sure its correct
            length = len(thedata[0])
            thedata2 = container.read_an_array(len(thedata), length+1,
                                               dkey, akey, oid, epoch)
            if thedata[0][0:length-1] != thedata2[0][0:length-1]:
                self.plog.error("Data mismatch")
                self.plog.error("Wrote: >%s<", thedata[0])
                self.plog.error("Read: >%s<", thedata2[0])
                self.fail("Write data, read it back, didn't match\n")

            if thedata[2][0:length-1] != thedata2[2][0:length-1]:
                self.plog.error("Data mismatch")
                self.plog.error("Wrote: >%s<", thedata[2])
                self.plog.error("Read: >%s<", thedata2[2])
                self.fail("Write data, read it back, didn't match\n")

            container.close()

            # wait a few seconds and then destroy
            time.sleep(5)
            container.destroy()

            # cleanup the pool
            pool.disconnect()
            pool.destroy(1)
            self.plog.info("Test Complete")

        except DaosApiError as excep:
            self.plog.error("Test Failed, exception was thrown.")
            print(excep)
            print(traceback.format_exc())
            self.fail("Test was expected to pass but it failed.\n")
开发者ID:daos-stack,项目名称:daos,代码行数:90,代码来源:array_obj_test.py

示例3: MultipleClients

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import get_uuid_str [as 别名]
class MultipleClients(Test):
    """
    Test class Description: Runs IOR with multiple clients.

    """
    def setUp(self):
        self.agent_sessions = None
        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as build_file:
            build_paths = json.load(build_file)
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")

        self.server_group = self.params.get("name", '/server_config/',
                                            'daos_server')
        self.daosctl = self.basepath + '/install/bin/daosctl'

        # setup the DAOS python API
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.pool = None

        self.hostlist_servers = self.params.get("test_servers",
                                                '/run/hosts/test_machines/*')
        self.hostfile_servers = (
            write_host_file.write_host_file(self.hostlist_servers,
                                            self.workdir))
        print("Host file servers is: {}".format(self.hostfile_servers))

        self.hostlist_clients = (
            self.params.get("clients",
                            '/run/hosts/test_machines/test_clients/*'))
        self.hostfile_clients = (
            write_host_file.write_host_file(self.hostlist_clients,
                                            self.workdir))
        print("Host file clientsis: {}".format(self.hostfile_clients))

        self.agent_sessions = AgentUtils.run_agent(self.basepath,
                                                   self.hostlist_servers,
                                                   self.hostlist_clients)
        server_utils.run_server(self.hostfile_servers, self.server_group,
                                self.basepath)

        if int(str(self.name).split("-")[0]) == 1:
            ior_utils.build_ior(self.basepath)

    def tearDown(self):
        try:
            if self.hostfile_clients is not None:
                os.remove(self.hostfile_clients)
            if self.hostfile_servers is not None:
                os.remove(self.hostfile_servers)
            if self.pool is not None and self.pool.attached:
                self.pool.destroy(1)
        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist_clients,
                                      self.agent_sessions)
            server_utils.stop_server(hosts=self.hostlist_servers)

    def test_multipleclients(self):
        """
        Test ID: DAOS-1263
        Test Description: Test IOR with 16 and 32 clients config.
        Use Cases: Different combinations of 16/32 Clients, 8b/1k/4k
                   record size, 1m/8m stripesize and 16 async io.
        :avocado: tags=ior,twoservers,multipleclients
        """

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/pool/createmode/*/')
        createuid = os.geteuid()
        creategid = os.getegid()
        createsetid = self.params.get("setname", '/run/pool/createset/')
        createsize = self.params.get("size", '/run/pool/createsize/')
        createsvc = self.params.get("svcn", '/run/pool/createsvc/')
        iteration = self.params.get("iter", '/run/ior/iteration/')
        slots = self.params.get("slots", '/run/ior/clientslots/*')
        ior_flags = self.params.get("F", '/run/ior/iorflags/')
        transfer_size = self.params.get("t", '/run/ior/transfersize/')
        record_size = self.params.get("r", '/run/ior/recordsize/*')
        stripe_size = self.params.get("s", '/run/ior/stripesize/*')
        stripe_count = self.params.get("c", '/run/ior/stripecount/')
        async_io = self.params.get("a", '/run/ior/asyncio/')
        object_class = self.params.get("o", '/run/ior/objectclass/')

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(createmode, createuid, creategid, createsize,
                             createsetid, None, None, createsvc)

            with open(self.hostfile_clients) as client_file:
                new_text = client_file.read().replace('slots=1',
                                                      'slots={0}').format(slots)

            with open(self.hostfile_clients, "w") as client_file:
                client_file.write(new_text)

            pool_uuid = self.pool.get_uuid_str()
            tmp_rank_list = []
#.........这里部分代码省略.........
开发者ID:daos-stack,项目名称:daos,代码行数:103,代码来源:multiple_clients.py

示例4: IorSingleServer

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import get_uuid_str [as 别名]
class IorSingleServer(Test):
    """
    Tests IOR with Single Server config.

    """
    def setUp(self):
        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as build_file:
            build_paths = json.load(build_file)
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")

        self.server_group = self.params.get("name", '/server_config/',
                                            'daos_server')
        self.daosctl = self.basepath + '/install/bin/daosctl'

        # setup the DAOS python API
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.pool = None

        self.hostlist_servers = self.params.get("test_servers",
                                                '/run/hosts/test_machines/*')
        self.hostfile_servers = (
            write_host_file.write_host_file(self.hostlist_servers,
                                            self.workdir))
        print("Host file servers is: {}".format(self.hostfile_servers))

        self.hostlist_clients = (
            self.params.get("clients",
                            '/run/hosts/test_machines/diff_clients/*'))
        self.hostfile_clients = (
            write_host_file.write_host_file(self.hostlist_clients,
                                            self.workdir))
        print("Host file clientsis: {}".format(self.hostfile_clients))

        self.agent_sessions = AgentUtils.run_agent(self.basepath,
                                                   self.hostlist_servers,
                                                   self.hostlist_clients)
        server_utils.run_server(self.hostfile_servers, self.server_group,
                                self.basepath)

        if int(str(self.name).split("-")[0]) == 1:
            ior_utils.build_ior(self.basepath)

    def tearDown(self):
        try:
            if self.hostfile_clients is not None:
                os.remove(self.hostfile_clients)
            if self.hostfile_servers is not None:
                os.remove(self.hostfile_servers)
            if self.pool is not None and self.pool.attached:
                self.pool.destroy(1)
        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist_clients,
                                      self.agent_sessions)
            server_utils.stop_server(hosts=self.hostlist_servers)

    def test_singleserver(self):
        """
        Test IOR with Single Server config.

        :avocado: tags=ior,singleserver
        """

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/createtests/createmode/*/')
        createuid = os.geteuid()
        creategid = os.getegid()
        createsetid = self.params.get("setname", '/run/createtests/createset/')
        createsize = self.params.get("size", '/run/createtests/createsize/')
        createsvc = self.params.get("svcn", '/run/createtests/createsvc/')
        iteration = self.params.get("iter", '/run/ior/iteration/')
        ior_flags = self.params.get("F", '/run/ior/iorflags/')
        transfer_size = self.params.get("t", '/run/ior/transfersize/')
        record_size = self.params.get("r", '/run/ior/recordsize/')
        segment_count = self.params.get("s", '/run/ior/segmentcount/')
        stripe_count = self.params.get("c", '/run/ior/stripecount/')
        async_io = self.params.get("a", '/run/ior/asyncio/')
        object_class = self.params.get("o", '/run/ior/objectclass/')

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(createmode, createuid, creategid,
                             createsize, createsetid, None, None, createsvc)
            pool_uuid = self.pool.get_uuid_str()
            print ("pool_uuid: {}".format(pool_uuid))
            tmp_rank_list = []
            svc_list = ""
            for i in range(createsvc):
                tmp_rank_list.append(int(self.pool.svc.rl_ranks[i]))
                svc_list += str(tmp_rank_list[i]) + ":"
            svc_list = svc_list[:-1]

            if len(self.hostlist_clients) == 1:
                block_size = '12g'
            elif len(self.hostlist_clients) == 2:
                block_size = '6g'
            elif len(self.hostlist_clients) == 4:
#.........这里部分代码省略.........
开发者ID:daos-stack,项目名称:daos,代码行数:103,代码来源:ior_single_server.py

示例5: ObjectMetadata

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import get_uuid_str [as 别名]

#.........这里部分代码省略.........
                container = DaosContainer(self.context)
                container.create(self.pool.handle)
                container_array.append(container)
            self.d_log.debug("Container Remove Iteration {} ".format(k))
            for cont in container_array:
                cont.destroy()

    def thread_control(self, threads, operation):
        """
        Start threads and wait till all threads execution is finished.
        It check queue for "FAIL" message and fail the avocado test.
        """
        self.d_log.debug("IOR {0} Threads Started -----".format(operation))
        for thrd in threads:
            thrd.start()
        for thrd in threads:
            thrd.join()

        while not self.out_queue.empty():
            if self.out_queue.get() == "FAIL":
                return "FAIL"
        self.d_log.debug("IOR {0} Threads Finished -----".format(operation))
        return "PASS"

    @avocado.fail_on(DaosApiError)
    def test_metadata_server_restart(self):
        """
        Test ID: DAOS-1512
        Test Description: This test will verify 2000 IOR small size container
                          after server restart. Test will write IOR in 5
                          different threads for faster execution time. Each
                          thread will create 400 (8bytes) containers to the
                          same pool. Restart the servers, read IOR container
                          file written previously and validate data integrity
                          by using IOR option "-R -G 1".
        :avocado: tags=metadata,metadata_ior,nvme,small
        """
        self.pool_connect = False
        files_per_thread = 400
        total_ior_threads = 5
        threads = []
        ior_args = {}

        createsvc = self.params.get("svcn", '/run/pool/createsvc/')
        svc_list = ""
        for i in range(createsvc):
            svc_list += str(int(self.pool.svc.rl_ranks[i])) + ":"
        svc_list = svc_list[:-1]

        ior_args['client_hostfile'] = self.hostfile_clients
        ior_args['pool_uuid'] = self.pool.get_uuid_str()
        ior_args['svc_list'] = svc_list
        ior_args['basepath'] = self.basepath
        ior_args['server_group'] = self.server_group
        ior_args['tmp_dir'] = self.workdir
        ior_args['iorwriteflags'] = self.params.get("F",
                                                    '/run/ior/iorwriteflags/')
        ior_args['iorreadflags'] = self.params.get("F",
                                                   '/run/ior/iorreadflags/')
        ior_args['iteration'] = self.params.get("iter", '/run/ior/iteration/')
        ior_args['stripe_size'] = self.params.get("s", '/run/ior/stripesize/*')
        ior_args['stripe_count'] = self.params.get("c", '/run/ior/stripecount/')
        ior_args['async_io'] = self.params.get("a", '/run/ior/asyncio/')
        ior_args['object_class'] = self.params.get("o", '/run/ior/objectclass/')
        ior_args['slots'] = self.params.get("slots", '/run/ior/clientslots/*')

        ior_args['files_per_thread'] = files_per_thread
        self.out_queue = Queue.Queue()

        #IOR write threads
        for i in range(total_ior_threads):
            threads.append(threading.Thread(target=ior_runner_thread,
                                            args=(self.out_queue,
                                                  "Thread-{}".format(i),
                                                  "write"),
                                            kwargs=ior_args))
        if self.thread_control(threads, "write") == "FAIL":
            self.d_log.error(" IOR write Thread FAIL")
            self.fail(" IOR write Thread FAIL")

        #Server Restart
        if self.agent_sessions:
            AgentUtils.stop_agent(self.hostlist_clients, self.agent_sessions)
        server_utils.stop_server(hosts=self.hostlist)
        self.agent_sessions = AgentUtils.run_agent(self.basepath,
                                                   self.hostlist_clients,
                                                   self.hostlist)
        server_utils.run_server(self.hostfile, self.server_group, self.basepath)

        #Read IOR with verification with same number of threads
        threads = []
        for i in range(total_ior_threads):
            threads.append(threading.Thread(target=ior_runner_thread,
                                            args=(self.out_queue,
                                                  "Thread-{}".format(i),
                                                  "read"),
                                            kwargs=ior_args))
        if self.thread_control(threads, "read") == "FAIL":
            self.d_log.error(" IOR write Thread FAIL")
            self.fail(" IOR read Thread FAIL")
开发者ID:daos-stack,项目名称:daos,代码行数:104,代码来源:metadata.py

示例6: RebuildNoCap

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import get_uuid_str [as 别名]
class RebuildNoCap(Test):

    """
    Test Class Description:
    This class contains tests for pool rebuild.

    :avocado: tags=pool,rebuild,nocap
    """
    def setUp(self):
        """ setup for the test """
        self.agent_sessions = None
        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as build_file:
            build_paths = json.load(build_file)
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")

        # generate a hostfile
        self.hostlist = self.params.get("test_machines", '/run/hosts/')
        self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                        self.workdir)

        # fire up the DAOS servers
        self.server_group = self.params.get("name", '/run/server_config/',
                                            'daos_server')
        self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist)
        server_utils.run_server(self.hostfile, self.server_group,
                                build_paths['PREFIX'] + '/../')

        # create a pool to test with
        createmode = self.params.get("mode", '/run/pool/createmode/')
        createuid = self.params.get("uid", '/run/pool/createuid/')
        creategid = self.params.get("gid", '/run/pool/creategid/')
        createsetid = self.params.get("setname", '/run/pool/createset/')
        createsize = self.params.get("size", '/run/pool/createsize/')
        self.pool = DaosPool(self.context)
        self.pool.create(createmode, createuid, creategid, createsize,
                         createsetid)
        uuid = self.pool.get_uuid_str()

        time.sleep(2)

        # stuff some bogus data into the pool
        how_many_bytes = long(self.params.get("datasize",
                                              '/run/testparams/datatowrite/'))
        exepath = os.path.join(build_paths['PREFIX'],
                               "/../src/tests/ftest/util/write_some_data.py")
        cmd = "export DAOS_POOL={0}; export DAOS_SVCL=1; mpirun"\
              " --np 1 --host {1} {2} {3} testfile".format(
                  uuid, self.hostlist[0], exepath, how_many_bytes)
        subprocess.call(cmd, shell=True)

    def tearDown(self):
        """ cleanup after the test """

        try:
            os.remove(self.hostfile)
            if self.pool:
                self.pool.destroy(1)
        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
            server_utils.stop_server(hosts=self.hostlist)


    def test_rebuild_no_capacity(self):
        """
        :avocado: tags=pool,rebuild,nocap
        """
        try:
            print("\nsetup complete, starting test\n")

            # create a server object that references on of our pool target hosts
            # and then kill it
            svr_to_kill = int(self.params.get("rank_to_kill",
                                              '/run/testparams/ranks/'))
            d_server = DaosServer(self.context, bytes(self.server_group),
                                  svr_to_kill)

            time.sleep(1)
            d_server.kill(1)

            # exclude the target from the dead server
            self.pool.exclude([svr_to_kill])

            # exclude should trigger rebuild, check
            self.pool.connect(1 << 1)
            status = self.pool.pool_query()
            if not status.pi_ntargets == len(self.hostlist):
                self.fail("target count wrong.\n")
            if not status.pi_ndisabled == 1:
                self.fail("disabled target count wrong.\n")

            # the pool should be too full to start a rebuild so
            # expecting an error
            # not sure yet specifically what error
            if status.pi_rebuild_st.rs_errno == 0:
                self.fail("expecting rebuild to fail but it didn't.\n")

        except DaosApiError as excep:
#.........这里部分代码省略.........
开发者ID:daos-stack,项目名称:daos,代码行数:103,代码来源:rebuild_no_cap.py

示例7: EightServers

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import get_uuid_str [as 别名]
class EightServers(Test):
    """
    Test class Description: Runs IOR with 8 servers.

    """

    def __init__(self, *args, **kwargs):

        super(EightServers, self).__init__(*args, **kwargs)

        self.basepath = None
        self.server_group = None
        self.context = None
        self.pool = None
        self.num_procs = None
        self.hostlist_servers = None
        self.hostfile_servers = None
        self.hostlist_clients = None
        self.hostfile_clients = None
        self.mpio = None

    def setUp(self):
        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as build_file:
            build_paths = json.load(build_file)
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")
        print("<<{}>>".format(self.basepath))
        self.server_group = self.params.get("name", '/server_config/',
                                            'daos_server')

        # setup the DAOS python API
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')

        self.hostlist_servers = self.params.get("test_servers",
                                                '/run/hosts/test_machines/*')
        self.hostfile_servers = (
            write_host_file.write_host_file(self.hostlist_servers,
                                            self.workdir))
        print("Host file servers is: {}".format(self.hostfile_servers))

        self.hostlist_clients = self.params.get("test_clients",
                                                '/run/hosts/test_machines/*')
        self.num_procs = self.params.get("np", '/run/ior/client_processes/*')
        self.hostfile_clients = (
            write_host_file.write_host_file(self.hostlist_clients, self.workdir,
                                            None))
        print("Host file clients is: {}".format(self.hostfile_clients))

        server_utils.run_server(self.hostfile_servers, self.server_group,
                                self.basepath)

    def tearDown(self):
        try:
            if self.pool is not None and self.pool.attached:
                self.pool.destroy(1)
        finally:
            server_utils.stop_server(hosts=self.hostlist_servers)

    def executable(self, iorflags=None):
        """
        Executable function to run ior for ssf and fpp
        """

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/pool/createmode/*/')
        createuid = os.geteuid()
        creategid = os.getegid()
        createsetid = self.params.get("setname", '/run/pool/createset/')
        createscm_size = self.params.get("scm_size", '/run/pool/createsize/')
        createnvme_size = self.params.get("nvme_size", '/run/pool/createsize/')
        createsvc = self.params.get("svcn", '/run/pool/createsvc/')
        iteration = self.params.get("iter", '/run/ior/iteration/')
        block_size = self.params.get("b", '/run/ior/transfersize_blocksize/*/')
        transfer_size = self.params.get("t",
                                        '/run/ior/transfersize_blocksize/*/')

        try:
            # initialize MpioUtils
            self.mpio = MpioUtils()
            if self.mpio.mpich_installed(self.hostlist_clients) is False:
                self.fail("Exiting Test: Mpich not installed")

            #print self.mpio.mpichinstall
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(createmode, createuid, creategid,
                             createscm_size, createsetid, None, None, createsvc,
                             createnvme_size)

            pool_uuid = self.pool.get_uuid_str()
            svc_list = ""
            for i in range(createsvc):
                svc_list += str(int(self.pool.svc.rl_ranks[i])) + ":"
            svc_list = svc_list[:-1]

            print ("svc_list: {}".format(svc_list))

            ior_utils.run_ior_mpiio(self.basepath, self.mpio.mpichinstall,
                                    pool_uuid, svc_list, self.num_procs,
#.........这里部分代码省略.........
开发者ID:daos-stack,项目名称:daos,代码行数:103,代码来源:eight_servers_mpiio.py

示例8: test_connect

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import get_uuid_str [as 别名]

#.........这里部分代码省略.........
        :avocado: tags=pool,poolconnect,badparam,badconnect
        """

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/connecttests/createmode/')
        createuid = self.params.get("uid", '/run/connecttests/uids/createuid/')
        creategid = self.params.get("gid", '/run/connecttests/gids/creategid/')
        createsetid = self.params.get("setname",
                                      '/run/connecttests/setnames/createset/')
        createsize = self.params.get("size",
                                     '/run/connecttests/psize/createsize/')

        # Accumulate a list of pass/fail indicators representing what is
        # expected for each parameter then "and" them to determine the
        # expected result of the test
        expected_for_param = []

        modelist = self.params.get("mode", '/run/connecttests/connectmode/*/')
        connectmode = modelist[0]
        expected_for_param.append(modelist[1])

        svclist = self.params.get("ranklist", '/run/connecttests/svrlist/*/')
        svc = svclist[0]
        expected_for_param.append(svclist[1])

        setlist = self.params.get("setname",
                                  '/run/connecttests/connectsetnames/*/')
        connectset = setlist[0]
        expected_for_param.append(setlist[1])

        uuidlist = self.params.get("uuid", '/run/connecttests/UUID/*/')
        connectuuid = uuidlist[0]
        expected_for_param.append(uuidlist[1])

        # if any parameter is FAIL then the test should FAIL, in this test
        # virtually everyone should FAIL since we are testing bad parameters
        expected_result = 'PASS'
        for result in expected_for_param:
            if result == 'FAIL':
                expected_result = 'FAIL'
                break

        puuid = (ctypes.c_ubyte * 16)()
        psvc = RankList()
        pgroup = ctypes.create_string_buffer(0)
        pool = None
        try:
            # setup the DAOS python API
            with open('../../../.build_vars.json') as build_file:
                data = json.load(build_file)
            context = DaosContext(data['PREFIX'] + '/lib/')

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid, None)
            # save this uuid since we might trash it as part of the test
            ctypes.memmove(puuid, pool.uuid, 16)

            # trash the the pool service rank list
            psvc.rl_ranks = pool.svc.rl_ranks
            psvc.rl_nr = pool.svc.rl_nr
            if not svc == 'VALID':
                rl_ranks = ctypes.POINTER(ctypes.c_uint)()
                pool.svc = RankList(rl_ranks, 1)

            # trash the pool group value
            pgroup = pool.group
            if connectset == 'NULLPTR':
                pool.group = None

            # trash the UUID value in various ways
            if connectuuid == 'NULLPTR':
                pool.uuid = None
            if connectuuid == 'JUNK':
                pool.uuid[4] = 244

            pool.connect(connectmode)

            if expected_result in ['FAIL']:
                self.fail("Test was expected to fail but it passed.\n")

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            if expected_result in ['PASS']:
                self.fail("Test was expected to pass but it failed.\n")

        # cleanup the pool
        finally:
            if pool is not None and pool.attached == 1:
                # restore values in case we trashed them during test
                pool.svc.rl_ranks = psvc.rl_ranks
                pool.svc.rl_nr = psvc.rl_nr
                pool.group = pgroup
                ctypes.memmove(pool.uuid, puuid, 16)
                print("pool uuid after restore {}".format(
                    pool.get_uuid_str()))
                pool.destroy(1)
开发者ID:daos-stack,项目名称:daos,代码行数:104,代码来源:bad_connect.py

示例9: NvmeIo

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import get_uuid_str [as 别名]

#.........这里部分代码省略.........
                .format(free_pool_size, expected_pool_size))

    @avocado.fail_on(DaosApiError)
    def test_nvme_io(self):
        """
        Test ID: DAOS-2082
        Test Description: Test will run IOR with standard and non standard
        sizes.IOR will be run for all Object type supported. Purpose is to
        verify pool size (SCM and NVMe) for IOR file.
        This test is running multiple IOR on same server start instance.
        :avocado: tags=nvme,nvme_io,large
        """
        ior_args = {}

        hostlist_clients = self.params.get("clients", '/run/hosts/*')
        tests = self.params.get("ior_sequence", '/run/ior/*')
        object_type = self.params.get("object_type", '/run/ior/*')
        #Loop for every IOR object type
        for obj_type in object_type:
            for ior_param in tests:
                self.hostfile_clients = write_host_file.write_host_file(
                    hostlist_clients,
                    self.workdir,
                    ior_param[4])
                #There is an issue with NVMe if Transfer size>64M, Skipped this
                #sizes for now
                if ior_param[2] > 67108864:
                    print ("Xfersize > 64M getting failed, DAOS-1264")
                    continue

                self.pool = DaosPool(self.context)
                self.pool.create(self.params.get("mode",
                                                 '/run/pool/createmode/*'),
                                 os.geteuid(),
                                 os.getegid(),
                                 ior_param[0],
                                 self.params.get("setname",
                                                 '/run/pool/createset/*'),
                                 nvme_size=ior_param[1])
                self.pool.connect(1 << 1)
                self.pool_connect = True
                createsvc = self.params.get("svcn", '/run/pool/createsvc/')
                svc_list = ""
                for i in range(createsvc):
                    svc_list += str(int(self.pool.svc.rl_ranks[i])) + ":"
                svc_list = svc_list[:-1]

                ior_args['client_hostfile'] = self.hostfile_clients
                ior_args['pool_uuid'] = self.pool.get_uuid_str()
                ior_args['svc_list'] = svc_list
                ior_args['basepath'] = self.basepath
                ior_args['server_group'] = self.server_group
                ior_args['tmp_dir'] = self.workdir
                ior_args['iorflags'] = self.params.get("iorflags",
                                                       '/run/ior/*')
                ior_args['iteration'] = self.params.get("iteration",
                                                        '/run/ior/*')
                ior_args['stripe_size'] = ior_param[2]
                ior_args['block_size'] = ior_param[3]
                ior_args['stripe_count'] = self.params.get("stripecount",
                                                           '/run/ior/*')
                ior_args['async_io'] = self.params.get("asyncio",
                                                       '/run/ior/*')
                ior_args['object_class'] = obj_type
                ior_args['slots'] = ior_param[4]

                #IOR is going to use the same --daos.stripeSize,
                #--daos.recordSize and Transfer size.
                try:
                    size_before_ior = self.pool.pool_query()
                    ior_utils.run_ior(ior_args['client_hostfile'],
                                      ior_args['iorflags'],
                                      ior_args['iteration'],
                                      ior_args['block_size'],
                                      ior_args['stripe_size'],
                                      ior_args['pool_uuid'],
                                      ior_args['svc_list'],
                                      ior_args['stripe_size'],
                                      ior_args['stripe_size'],
                                      ior_args['stripe_count'],
                                      ior_args['async_io'],
                                      ior_args['object_class'],
                                      ior_args['basepath'],
                                      ior_args['slots'],
                                      filename=str(uuid.uuid4()),
                                      display_output=True)
                    self.verify_pool_size(size_before_ior, ior_args)
                except ior_utils.IorFailed as exe:
                    print (exe)
                    print (traceback.format_exc())
                    self.fail()
                try:
                    if self.pool_connect:
                        self.pool.disconnect()
                        self.pool_connect = False
                    if self.pool:
                        self.pool.destroy(1)
                except DaosApiError as exe:
                    print (exe)
                    self.fail("Failed to Destroy/Disconnect the Pool")
开发者ID:daos-stack,项目名称:daos,代码行数:104,代码来源:nvme_io.py

示例10: SegCount

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import get_uuid_str [as 别名]

#.........这里部分代码省略.........
        self.slots = self.params.get("slots", '/run/ior/clientslots/*')
        self.hostfile_clients = (
            write_host_file.write_host_file(hostlist_clients, self.workdir,
                                            self.slots))
        print("Host file clients is: {}".format(self.hostfile_clients))

        self.agent_sessions = AgentUtils.run_agent(self.basepath,
                                                   self.hostlist_servers,
                                                   self.hostlist_clients)
        server_utils.run_server(hostfile_servers, self.server_group,
                                self.basepath)

        if int(str(self.name).split("-")[0]) == 1:
            ior_utils.build_ior(self.basepath)

    def tearDown(self):
        try:
            if self.pool is not None and self.pool.attached:
                self.pool.destroy(1)
        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist_clients,
                                      self.agent_sessions)
            server_utils.stop_server(hosts=self.hostlist_servers)

    def test_segcount(self):
        """
        Test ID: DAOS-1782
        Test Description: Run IOR with 32,64 and 128 clients with different
                          segment counts.
        Use Cases: Different combinations of 32/64/128 Clients, 8b/1k/4k
                   record size, 1k/4k/1m/8m transfersize and stripesize
                   and 16 async io.
        :avocado: tags=ior,eightservers,ior_segcount,performance
        """

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/pool/createmode/*/')
        createuid = os.geteuid()
        creategid = os.getegid()
        createsetid = self.params.get("setname", '/run/pool/createset/')
        createsize = self.params.get("size", '/run/pool/createsize/')
        createsvc = self.params.get("svcn", '/run/pool/createsvc/')
        iteration = self.params.get("iter", '/run/ior/iteration/')
        ior_flags = self.params.get("F", '/run/ior/iorflags/')
        stripe_count = self.params.get("c", '/run/ior/stripecount/')
        async_io = self.params.get("a", '/run/ior/asyncio/')
        object_class = self.params.get("o", '/run/ior/objectclass/*/')
        record_size = self.params.get("r", '/run/ior/recordsize/*')
        block_size = (
            self.params.get("b",
                            '/run/ior/blocksize_transfersize_stripesize/*/'))
        transfer_size = (
            self.params.get("t",
                            '/run/ior/blocksize_transfersize_stripesize/*/'))
        stripe_size = (
            self.params.get("s",
                            '/run/ior/blocksize_transfersize_stripesize/*/'))


        if block_size == '4k' and self.slots == 16:
            segment_count = 491500
        elif block_size == '4k' and self.slots == 32:
            segment_count = 245750
        elif block_size == '4k' and self.slots == 64:
            segment_count = 122875
        elif block_size == '1m' and self.slots == 16:
            segment_count = 1920
        elif block_size == '1m' and self.slots == 32:
            segment_count = 960
        elif block_size == '1m' and self.slots == 64:
            segment_count = 480
        elif block_size == '4m' and self.slots == 16:
            segment_count = 480
        elif block_size == '4m' and self.slots == 32:
            segment_count = 240
        elif block_size == '4m' and self.slots == 64:
            segment_count = 120

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(createmode, createuid, creategid,
                             createsize, createsetid, None, None, createsvc)

            pool_uuid = self.pool.get_uuid_str()
            svc_list = ""
            for i in range(createsvc):
                svc_list += str(int(self.pool.svc.rl_ranks[i])) + ":"
            svc_list = svc_list[:-1]

            ior_utils.run_ior(self.hostfile_clients, ior_flags, iteration,
                              block_size, transfer_size, pool_uuid, svc_list,
                              record_size, stripe_size, stripe_count, async_io,
                              object_class, self.basepath, self.slots,
                              segment_count)

        except (ior_utils.IorFailed, DaosApiError) as excep:
            self.fail("<SegCount Test FAILED>.{}".format(excep))
开发者ID:daos-stack,项目名称:daos,代码行数:104,代码来源:seg_count.py

示例11: Soak

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import get_uuid_str [as 别名]
class Soak(Test):
    """
    Test class Description: DAOS Soak test cases
    """

    def job_done(self, args):
        """
        This is a callback function called when a job is done

        handle --which job, i.e. the job ID
        state  --string indicating job completion status
        """

        self.soak_results[args["handle"]] = args["state"]


    def create_pool(self):
        """
        Creates a pool that the various tests use for storage.
        """

        createmode = self.params.get("mode", '/run/pool1/createmode/*/')
        createuid = os.geteuid()
        creategid = os.getegid()
        createsetid = self.params.get("setname", '/run/pool1/createset/')
        createsize = self.params.get("size", '/run/pool1/createsize/')
        self.createsvc = self.params.get("svcn", '/run/pool1/createsvc/')

        self.pool = DaosPool(self.context)
        self.pool.create(createmode, createuid, creategid,
                         createsize, createsetid, None, None,
                         self.createsvc)

    def build_ior_script(self, job):
        """
        Builds an IOR command string which is then added to slurm script

        job --which job to read in the yaml file

        """

        # for the moment build IOR
        #IorUtils.build_ior(self.basepath)

        # read job info
        job_params = "/run/" + job + "/"
        job_name = self.params.get("name", job_params)
        job_nodes = self.params.get("nodes", job_params)
        job_processes = self.params.get("process_per_node",
                                        job_params)
        job_spec = self.params.get("jobspec", job_params)

        # read ior cmd info
        spec = "/run/" + job_spec + "/"
        iteration = self.params.get("iter", spec + 'iteration/')
        ior_flags = self.params.get("F", spec + 'iorflags/')
        transfer_size = self.params.get("t", spec + 'transfersize/')
        record_size = self.params.get("r", spec + 'recordsize/*')
        stripe_size = self.params.get("s", spec + 'stripesize/*')
        stripe_count = self.params.get("c", spec + 'stripecount/')
        async_io = self.params.get("a", spec + 'asyncio/')
        object_class = self.params.get("o", spec + 'objectclass/')

        self.partition = self.params.get("partition",
                                         '/run/hosts/test_machines/')

        pool_uuid = self.pool.get_uuid_str()
        tmplist = []
        svc_list = ""
        for i in range(self.createsvc):
            tmplist.append(int(self.pool.svc.rl_ranks[i]))
            svc_list += str(tmplist[i]) + ":"
        svc_list = svc_list[:-1]

        block_size = '1536m'

        if stripe_size == '8m':
            transfer_size = stripe_size

        hostfile = os.path.join(self.tmpdir, "ior_hosts_" + job_name)

        cmd = ior_utils.get_ior_cmd(ior_flags, iteration, block_size,
                                    transfer_size, pool_uuid, svc_list,
                                    record_size, stripe_size, stripe_count,
                                    async_io, object_class, self.basepath,
                                    hostfile, job_processes)

        output = os.path.join(self.tmpdir, job_name + "_results.out")
        script = slurm_utils.write_slurm_script(self.tmpdir, job_name,
                                                output, int(job_nodes), [cmd])
        return script

    def setUp(self):

        # intermediate results are stored in this global
        # start off with it empty
        self.soak_results = {}

        self.partition = None

#.........这里部分代码省略.........
开发者ID:daos-stack,项目名称:daos,代码行数:103,代码来源:soak.py

示例12: DestroyRebuild

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import get_uuid_str [as 别名]
class DestroyRebuild(Test):

    """
    Test Class Description:
    This test verifies destruction of a pool that is rebuilding.

    :avocado: tags=pool,pooldestroy,rebuild,desreb
    """

    build_paths = []
    server_group = ""
    context = None
    pool = None
    hostfile = ""

    def setUp(self):
        """ setup for the test """
        self.agent_sessions = None
        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as build_file:
            build_paths = json.load(build_file)
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")

        # generate a hostfile
        self.hostlist = self.params.get("test_machines", '/run/hosts/')
        self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                        self.workdir)

        # fire up the DAOS servers
        self.server_group = self.params.get("name", '/run/server_config/',
                                            'daos_server')
        self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist)
        server_utils.run_server(self.hostfile, self.server_group,
                                build_paths['PREFIX'] + '/../')

        # create a pool to test with
        createmode = self.params.get("mode", '/run/pool/createmode/')
        createuid = self.params.get("uid", '/run/pool/createuid/')
        creategid = self.params.get("gid", '/run/pool/creategid/')
        createsetid = self.params.get("setname", '/run/pool/createset/')
        createsize = self.params.get("size", '/run/pool/createsize/')
        self.pool = DaosPool(self.context)
        self.pool.create(createmode, createuid, creategid, createsize,
                         createsetid)
        self.pool.get_uuid_str()

        time.sleep(2)

    def tearDown(self):
        """ cleanup after the test """

        try:
            os.remove(self.hostfile)
            if self.pool:
                self.pool.destroy(1)
        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
            server_utils.stop_server(hosts=self.hostlist)


    def test_destroy_while_rebuilding(self):
        """
        :avocado: tags=pool,pooldestroy,rebuild,desreb
        """
        try:
            print("\nsetup complete, starting test\n")

            # create a server object that references on of our pool target hosts
            # and then kill it
            svr_to_kill = int(self.params.get("rank_to_kill",
                                              '/run/testparams/ranks/'))
            server = DaosServer(self.context, bytes(self.server_group),
                                svr_to_kill)

            print("created server ")

            # BUG if you don't connect the rebuild doesn't start correctly
            self.pool.connect(1 << 1)
            status = self.pool.pool_query()
            if not status.pi_ntargets == len(self.hostlist):
                self.fail("target count wrong.\n")
            if not status.pi_ndisabled == 0:
                self.fail("disabled target count wrong.\n")

            print("connect ")

            time.sleep(1)
            server.kill(1)

            print("killed server ")

            # exclude the target from the dead server
            self.pool.exclude([svr_to_kill])

            print("exclude target ")

            #self.pool.disconnect()
            #print "disconnect "
#.........这里部分代码省略.........
开发者ID:daos-stack,项目名称:daos,代码行数:103,代码来源:destroy_rebuild.py

示例13: PoolSvc

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import get_uuid_str [as 别名]
class PoolSvc(Test):
    """
    Tests svc argument while pool create.
    """
    def setUp(self):
        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as build_file:
            build_paths = json.load(build_file)
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")

        self.server_group = self.params.get("name", '/server_config/',
                                            'daos_server')
        self.daosctl = self.basepath + '/install/bin/daosctl'

        # setup the DAOS python API
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.pool = None

        self.hostfile = None
        self.hostlist = self.params.get("test_machines", '/run/hosts/*')
        self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                        self.workdir)
        print("Host file is: {}".format(self.hostfile))

        self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist)
        server_utils.run_server(self.hostfile, self.server_group, self.basepath)

    def tearDown(self):
        try:
            if self.pool is not None and self.pool.attached:
                self.pool.destroy(1)
        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
            server_utils.stop_server(hosts=self.hostlist)

    def test_poolsvc(self):
        """
        Test svc arg during pool create.

        :avocado: tags=pool,svc
        """

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/createtests/createmode/*/')
        createuid = os.geteuid()
        creategid = os.getegid()
        createsetid = self.params.get("setname", '/run/createtests/createset/')
        createsize = self.params.get("size", '/run/createtests/createsize/')
        createsvc = self.params.get("svc", '/run/createtests/createsvc/*/')

        expected_result = createsvc[1]

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(createmode, createuid, creategid,
                             createsize, createsetid, None, None, createsvc[0])
            self.pool.connect(1 << 1)

            # checking returned rank list for server more than 1
            i = 0
            while (
                    int(self.pool.svc.rl_ranks[i]) > 0 and
                    int(self.pool.svc.rl_ranks[i]) <= createsvc[0] and
                    int(self.pool.svc.rl_ranks[i]) != 999999
            ):
                i += 1
            if i != createsvc[0]:
                self.fail("Length of Returned Rank list is not equal to "
                          "the number of Pool Service members.\n")
            rank_list = []
            for j in range(createsvc[0]):
                rank_list.append(int(self.pool.svc.rl_ranks[j]))
                if len(rank_list) != len(set(rank_list)):
                    self.fail("Duplicate values in returned rank list")

            if createsvc[0] == 3:
                self.pool.disconnect()
                cmd = ('{0} kill-leader  --uuid={1}'
                       .format(self.daosctl, self.pool.get_uuid_str()))
                process.system(cmd)
                self.pool.connect(1 << 1)
                self.pool.disconnect()
                server = DaosServer(self.context, self.server_group, 2)
                server.kill(1)
                self.pool.exclude([2])
                self.pool.connect(1 << 1)

            if expected_result in ['FAIL']:
                self.fail("Test was expected to fail but it passed.\n")

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            if expected_result == 'PASS':
                self.fail("Test was expected to pass but it failed.\n")
开发者ID:daos-stack,项目名称:daos,代码行数:100,代码来源:pool_svc.py

示例14: test_bad_handle

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import get_uuid_str [as 别名]
    def test_bad_handle(self):
        """
        Test ID: DAOS-1376

        Test Description: Pass a bogus object handle, should return bad handle.

        :avocado: tags=object,objupdate,objbadhand,regression,vm,small
        """
        try:
            # parameters used in pool create
            createmode = self.params.get("mode", '/run/conttests/createmode/')
            createsetid = self.params.get("setname",
                                          '/run/conttests/createset/')
            createsize = self.params.get("size", '/run/conttests/createsize/')
            createuid = os.geteuid()
            creategid = os.getegid()

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid, None)
            self.plog.info("Pool %s created.", pool.get_uuid_str())

            # need a connection to create container
            pool.connect(1 << 1)

            # create a container
            container = DaosContainer(self.context)
            container.create(pool.handle)
            self.plog.info("Container %s created.", container.get_uuid_str())

            # now open it
            container.open()

            # create an object and write some data into it
            thedata = "a string that I want to stuff into an object"
            thedatasize = len(thedata) + 1
            dkey = "this is the dkey"
            akey = "this is the akey"
            obj, dummy_tx = container.write_an_obj(thedata, thedatasize,
                                                   dkey, akey, None, None, 2)

            saved_oh = obj.obj_handle
            obj.obj_handle = 99999

            obj, dummy_tx = container.write_an_obj(thedata, thedatasize,
                                                   dkey, akey, obj, None, 2)

            container.oh = saved_oh
            container.close()
            container.destroy()
            pool.disconnect()
            pool.destroy(1)
            self.fail("Test was expected to return a -1002 but it has not.\n")

        except DaosApiError as excep:
            container.oh = saved_oh
            container.close()
            container.destroy()
            pool.disconnect()
            pool.destroy(1)
            self.plog.info("Test Complete")
            if '-1002' not in str(excep):
                print(excep)
                print(traceback.format_exc())
                self.fail("Test was expected to get -1002 but it has not.\n")
开发者ID:daos-stack,项目名称:daos,代码行数:69,代码来源:obj_update_bad_param.py

示例15: test_null_values

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import get_uuid_str [as 别名]
    def test_null_values(self):
        """
        Test ID: DAOS-1376

        Test Description: Pass a dkey and an akey that is null.

        :avocado: tags=object,objupdate,objupdatenull,regression,vm,small
        """
        try:
            # parameters used in pool create
            createmode = self.params.get("mode", '/run/conttests/createmode/')
            createsetid = self.params.get("setname",
                                          '/run/conttests/createset/')
            createsize = self.params.get("size", '/run/conttests/createsize/')
            createuid = os.geteuid()
            creategid = os.getegid()

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid, None)
            self.plog.info("Pool %s created.", pool.get_uuid_str())

            # need a connection to create container
            pool.connect(1 << 1)

            # create a container
            container = DaosContainer(self.context)
            container.create(pool.handle)
            self.plog.info("Container %s created.", container.get_uuid_str())

            # now open it
            container.open()

            # data used in the test
            thedata = "a string that I want to stuff into an object"
            thedatasize = len(thedata) + 1

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            self.fail("Test failed during setup .\n")

        try:
            # try using a null dkey
            dkey = None
            akey = "this is the akey"

            container.write_an_obj(thedata, thedatasize, dkey, akey, None,
                                   None, 2)

            container.close()
            container.destroy()
            pool.disconnect()
            pool.destroy(1)
            self.plog.error("Didn't get expected return code.")
            self.fail("Test was expected to return a -1003 but it has not.\n")

        except DaosApiError as excep:
            if '-1003' not in str(excep):
                container.close()
                container.destroy()
                pool.disconnect()
                pool.destroy(1)
                self.plog.error("Didn't get expected return code.")
                print(excep)
                print(traceback.format_exc())
                self.fail("Test was expected to get -1003 but it has not.\n")

        try:
            # try using a null akey/io descriptor
            dkey = "this is the dkey"
            akey = None
            container.write_an_obj(thedata, thedatasize, dkey, akey, None,
                                   None, 2)
            self.fail("Test was expected to return a -1003 but it has not.\n")

        except DaosApiError as excep:
            if '-1003' not in str(excep):
                self.plog.error("Didn't get expected return code.")
                print(excep)
                print(traceback.format_exc())
                self.fail("Test was expected to get -1003 but it has not.\n")

        try:
            # lastly try passing no data
            thedata = None
            thedatasize = 0
            dkey = "this is the dkey"
            akey = "this is the akey"

            container.write_an_obj(thedata, thedatasize, dkey, akey, None,
                                   None, 2)
            self.plog.info("Update with no data worked")

        except DaosApiError as excep:
            container.close()
            container.destroy()
            pool.disconnect()
#.........这里部分代码省略.........
开发者ID:daos-stack,项目名称:daos,代码行数:103,代码来源:obj_update_bad_param.py


注:本文中的daos_api.DaosPool.get_uuid_str方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。