当前位置: 首页>>代码示例>>Python>>正文


Python DaosPool.connect方法代码示例

本文整理汇总了Python中daos_api.DaosPool.connect方法的典型用法代码示例。如果您正苦于以下问题:Python DaosPool.connect方法的具体用法?Python DaosPool.connect怎么用?Python DaosPool.connect使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在daos_api.DaosPool的用法示例。


在下文中一共展示了DaosPool.connect方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_destroy_connect

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import connect [as 别名]
    def test_destroy_connect(self):
        """
        Test destroying a pool that has a connected client with force == false.
        Should fail.

        :avocado: tags=pool,pooldestroy,x
        """
        host = self.hostlist[0]
        try:

            # write out a hostfile and start the servers with it
            self.hostlist = self.params.get("test_machines1", '/run/hosts/')
            hostfile = write_host_file.write_host_file(self.hostlist, self.tmp)

            self.agent_sessions = AgentUtils.run_agent(self.basepath,
                                                       self.hostlist)
            server_utils.run_server(hostfile, self.server_group, self.basepath)

            # parameters used in pool create
            createmode = self.params.get("mode", '/run/poolparams/createmode/')
            createuid = self.params.get("uid", '/run/poolparams/createuid/')
            creategid = self.params.get("gid", '/run/poolparams/creategid/')
            createsetid = self.params.get("setname",
                                          '/run/poolparams/createset/')
            createsize = self.params.get("size", '/run/poolparams/createsize/')

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid, None)

            # need a connection to create container
            pool.connect(1 << 1)

            # destroy pool with connection open
            pool.destroy(0)

            # should throw an exception and not hit this
            self.fail("Shouldn't hit this line.\n")

        except DaosApiError as excep:
            print("got exception which is expected so long as it is BUSY")
            print(excep)
            print(traceback.format_exc())
            # pool should still be there
            exists = check_for_pool.check_for_pool(host, pool.get_uuid_str)
            if exists != 0:
                self.fail("Pool gone, but destroy should have failed.\n")

        # no matter what happens cleanup
        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
            server_utils.stop_server(hosts=self.hostlist)
            os.remove(hostfile)
开发者ID:daos-stack,项目名称:daos,代码行数:58,代码来源:destroy_tests.py

示例2: test_global_handle

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import connect [as 别名]
    def test_global_handle(self):
        """
        Test ID: DAO

        Test Description: Use a pool handle in another process.

        :avocado: tags=pool,poolhandle,vm,small,regression
        """

        try:

            # use the uid/gid of the user running the test, these should
            # be perfectly valid
            createuid = os.geteuid()
            creategid = os.getegid()

            # parameters used in pool create that are in yaml
            createmode = self.params.get("mode", '/run/testparams/createmode/')
            createsetid = self.params.get("setname",
                                          '/run/testparams/createset/')
            createsize = self.params.get("size", '/run/testparams/createsize/')

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid, None)
            pool.connect(1 << 1)

            # create a container just to make sure handle is good
            container = DaosContainer(self.context)
            container.create(pool.handle)

            # create a global handle
            iov_len, buf_len, buf = pool.local2global()

            # this should work in the future but need on-line server addition
            #arg_list = (buf_len, iov_len, buf, pool.get_uuid_str(), 0)
            #p = Process(target=check_handle, args=arg_list)
            #p.start()
            #p.join()
            # for now verifying global handle in the same process which is not
            # the intended use case
            check_handle(buf_len, iov_len, buf, pool.get_uuid_str(), 0)

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            self.fail("Expecting to pass but test has failed.\n")
开发者ID:daos-stack,项目名称:daos,代码行数:51,代码来源:global_handle.py

示例3: test_bad_handle

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import connect [as 别名]
    def test_bad_handle(self):
        """
        Test ID: DAOS-1376

        Test Description: Pass a bogus object handle, should return bad handle.

        :avocado: tags=object,objupdate,objbadhand,regression,vm,small
        """
        try:
            # parameters used in pool create
            createmode = self.params.get("mode", '/run/conttests/createmode/')
            createsetid = self.params.get("setname",
                                          '/run/conttests/createset/')
            createsize = self.params.get("size", '/run/conttests/createsize/')
            createuid = os.geteuid()
            creategid = os.getegid()

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid, None)
            self.plog.info("Pool %s created.", pool.get_uuid_str())

            # need a connection to create container
            pool.connect(1 << 1)

            # create a container
            container = DaosContainer(self.context)
            container.create(pool.handle)
            self.plog.info("Container %s created.", container.get_uuid_str())

            # now open it
            container.open()

            # create an object and write some data into it
            thedata = "a string that I want to stuff into an object"
            thedatasize = len(thedata) + 1
            dkey = "this is the dkey"
            akey = "this is the akey"
            obj, dummy_tx = container.write_an_obj(thedata, thedatasize,
                                                   dkey, akey, None, None, 2)

            saved_oh = obj.obj_handle
            obj.obj_handle = 99999

            obj, dummy_tx = container.write_an_obj(thedata, thedatasize,
                                                   dkey, akey, obj, None, 2)

            container.oh = saved_oh
            container.close()
            container.destroy()
            pool.disconnect()
            pool.destroy(1)
            self.fail("Test was expected to return a -1002 but it has not.\n")

        except DaosApiError as excep:
            container.oh = saved_oh
            container.close()
            container.destroy()
            pool.disconnect()
            pool.destroy(1)
            self.plog.info("Test Complete")
            if '-1002' not in str(excep):
                print(excep)
                print(traceback.format_exc())
                self.fail("Test was expected to get -1002 but it has not.\n")
开发者ID:daos-stack,项目名称:daos,代码行数:69,代码来源:obj_update_bad_param.py

示例4: test_container_create

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import connect [as 别名]
    def test_container_create(self):
        """
        Test ID: DAOS-689

        Test Description: valid and invalid container creation and close.

        :avocado: tags=regression,cont,contcreate
        """

        pool = None
        contuuid = None
        expected_results = []

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            createmode = self.params.get("mode", '/run/poolparams/')
            createuid = os.geteuid()
            creategid = os.getegid()
            createsetid = self.params.get("setname", '/run/poolparams/')
            createsize = self.params.get("size", '/run/poolparams/')

            # setup the pool
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid)
            pool.connect(1 << 1)

            # maybe use the good handle, maybe not
            handleparam = self.params.get("handle", '/run/poolhandle/*')
            if handleparam == 'VALID':
                poh = pool.handle
            else:
                poh = handleparam
                expected_results.append('FAIL')

            # maybe use a good UUID, maybe not
            uuidparam = self.params.get("uuid", "/uuids/*")
            expected_results.append(uuidparam[1])
            if uuidparam[0] == 'NULLPTR':
                self.cancel("skipping this test until DAOS-2043 is fixed")
                contuuid = 'NULLPTR'
            else:
                contuuid = uuid.UUID(uuidparam[0])

            should_fail = False
            for result in expected_results:
                if result == 'FAIL':
                    should_fail = True
                    break

            self.container = DaosContainer(self.context)
            self.container.create(poh, contuuid)

            # check UUID is the specified one
            if (uuidparam[0]).upper() != self.container.get_uuid_str().upper():
                print("uuidparam[0] is {}, uuid_str is {}".format(
                    uuidparam[0], self.container.get_uuid_str()))
                self.fail("Container UUID differs from specified at create\n")

            if should_fail:
                self.fail("Test was expected to fail but it passed.\n")

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            if not should_fail:
                self.fail("Test was expected to pass but it failed.\n")
        finally:
            if pool is not None:
                pool.disconnect()
                pool.destroy(1)
开发者ID:daos-stack,项目名称:daos,代码行数:74,代码来源:create.py

示例5: OpenClose

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import connect [as 别名]
class OpenClose(Test):
    """
    Tests DAOS container open/close function with handle parameter.
    """
    def __init__(self, *args, **kwargs):
        super(OpenClose, self).__init__(*args, **kwargs)
        self.container1 = None
        self.container2 = None

    def setUp(self):
        # these are first since they are referenced in teardown
        self.pool = None
        self.hostlist = None
        self.hostlist = self.params.get("test_servers", '/run/hosts/')

        # get paths from the build_vars generated by build
        with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
                               '../../../../.build_vars.json')) as build_file:
            build_paths = json.load(build_file)
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")
        self.server_group = self.params.get("name", '/server_config/',
                                            'daos_server')

        # setup the DAOS python API
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')

        self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                        self.workdir)

        self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist)
        server_utils.run_server(self.hostfile, self.server_group, self.basepath)

    def tearDown(self):
        try:
            if self.pool is not None and self.pool.attached:
                self.pool.destroy(1)
        finally:
            try:
                if self.agent_sessions:
                    AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
                server_utils.stop_server(hosts=self.hostlist)
            except server_utils.ServerFailed:
                pass

    def test_closehandle(self):
        """
        Test container close function with container handle paramter.

        :avocado: tags=container,openclose,closehandle
        """
        saved_coh = None

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/pool/createmode/')
        createuid = os.geteuid()
        creategid = os.getegid()
        createsetid = self.params.get("setname", '/run/pool/createset/')
        createsize = self.params.get("size", '/run/pool/createsize/')
        coh_params = self.params.get("coh",
                                     '/run/container/container_handle/*/')

        expected_result = coh_params[1]

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(createmode, createuid, creategid,
                             createsize, createsetid, None)

            poh = self.pool.handle
            self.pool.connect(1 << 1)

            # Container initialization and creation
            self.container1 = DaosContainer(self.context)
            self.container1.create(poh)
            str_cuuid = self.container1.get_uuid_str()
            cuuid = uuid.UUID(str_cuuid)
            self.container1.open(poh, cuuid, 2, None)

            # Defining 'good' and 'bad' container handles
            saved_coh = self.container1.coh
            if coh_params[0] == 'GOOD':
                coh = self.container1.coh
            else:
                # create a second container, open to get a handle
                # then close & destroy so handle is invalid
                self.container2 = DaosContainer(self.context)
                self.container2.create(poh)
                self.container2.open(poh, cuuid, 2, None)
                coh = self.container2.coh
                self.container2.close()
                self.container2.destroy()

            # close container with either good or bad handle
            self.container1.close(coh)

            if expected_result in ['FAIL']:
                self.fail("Test was expected to fail but it passed.\n")

#.........这里部分代码省略.........
开发者ID:daos-stack,项目名称:daos,代码行数:103,代码来源:open_close.py

示例6: CreateManyDkeys

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import connect [as 别名]
class CreateManyDkeys(Test):
    """
    Test Class Description:
        Tests that create large numbers of keys in objects/containers and then
        destroy the containers and verify the space has been reclaimed.

    """
    def setUp(self):
        self.agent_sessions = None
        with open('../../../.build_vars.json') as json_f:
            build_paths = json.load(json_f)
        basepath = os.path.normpath(build_paths['PREFIX']  + "/../")
        server_group = self.params.get("name",
                                       '/server_config/',
                                       'daos_server')
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.container = None
        self.hostlist = self.params.get("test_machines", '/run/hosts/*')
        self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                        self.workdir)

        self.agent_sessions = AgentUtils.run_agent(basepath, self.hostlist)
        server_utils.run_server(self.hostfile, server_group, basepath)

        self.pool = DaosPool(self.context)
        self.pool.create(self.params.get("mode", '/run/pool/createmode/*'),
                         os.geteuid(),
                         os.getegid(),
                         self.params.get("size", '/run/pool/createsize/*'),
                         self.params.get("setname", '/run/pool/createset/*'),
                         None)
        self.pool.connect(1 << 1)

    def tearDown(self):
        try:
            if self.hostfile is not None:
                os.remove(self.hostfile)
            if self.pool:
                self.pool.destroy(1)
        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
            server_utils.stop_server(hosts=self.hostlist)

    def write_a_bunch_of_values(self, how_many):
        """
        Write data to an object, each with a dkey and akey.  The how_many
        parameter determines how many key:value pairs are written.
        """

        self.container = DaosContainer(self.context)
        self.container.create(self.pool.handle)
        self.container.open()

        ioreq = IORequest(self.context, self.container, None)
        epoch = self.container.get_new_epoch()
        c_epoch = ctypes.c_uint64(epoch)

        print("Started Writing the Dataset-----------\n")
        inc = 50000
        last_key = inc
        for key in range(how_many):
            c_dkey = ctypes.create_string_buffer("dkey {0}".format(key))
            c_akey = ctypes.create_string_buffer("akey {0}".format(key))
            c_value = ctypes.create_string_buffer(
                "some data that gets stored with the key {0}".format(key))
            c_size = ctypes.c_size_t(ctypes.sizeof(c_value))
            ioreq.single_insert(c_dkey,
                                c_akey,
                                c_value,
                                c_size,
                                c_epoch)

            if key > last_key:
                print("written: {}".format(key))
                sys.stdout.flush()
                last_key = key + inc

        self.container.commit_epoch(c_epoch)

        print("Started Verification of the Dataset-----------\n")
        last_key = inc
        for key in range(how_many):
            c_dkey = ctypes.create_string_buffer("dkey {0}".format(key))
            c_akey = ctypes.create_string_buffer("akey {0}".format(key))
            the_data = "some data that gets stored with the key {0}".format(key)
            val = ioreq.single_fetch(c_dkey,
                                     c_akey,
                                     len(the_data)+1,
                                     c_epoch)

            if the_data != (repr(val.value)[1:-1]):
                self.fail("ERROR: Data mismatch for dkey = {0}, akey={1}, "
                          "Expected Value={2} and Received Value={3}\n"
                          .format("dkey {0}".format(key),
                                  "akey {0}".format(key),
                                  the_data,
                                  repr(val.value)[1:-1]))

            if key > last_key:
#.........这里部分代码省略.........
开发者ID:daos-stack,项目名称:daos,代码行数:103,代码来源:create_many_dkeys.py

示例7: ObjectMetadata

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import connect [as 别名]
class ObjectMetadata(avocado.Test):
    """
    Test Class Description:
        Test the general Metadata operations and boundary conditions.
    """

    def setUp(self):
        self.agent_sessions = None
        self.pool = None
        self.hostlist = None
        self.hostfile_clients = None
        self.hostfile = None
        self.out_queue = None
        self.pool_connect = True

        with open('../../../.build_vars.json') as json_f:
            build_paths = json.load(json_f)

        self.basepath = os.path.normpath(build_paths['PREFIX']  + "/../")
        self.server_group = self.params.get("name",
                                            '/server_config/',
                                            'daos_server')
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.d_log = DaosLog(self.context)
        self.hostlist = self.params.get("servers", '/run/hosts/*')
        self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                        self.workdir)
        self.hostlist_clients = self.params.get("clients", '/run/hosts/*')
        self.hostfile_clients = (
            write_host_file.write_host_file(hostlist_clients, self.workdir))
        self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist,
                                                   self.hostlist_clients)
        server_utils.run_server(self.hostfile, self.server_group, self.basepath)

        self.pool = DaosPool(self.context)
        self.pool.create(self.params.get("mode", '/run/pool/createmode/*'),
                         os.geteuid(),
                         os.getegid(),
                         self.params.get("size", '/run/pool/createsize/*'),
                         self.params.get("setname", '/run/pool/createset/*'),
                         nvme_size=self.params.get("size",
                                                   '/run/pool/nvmesize/*'))

    def tearDown(self):
        try:
            if self.pool_connect:
                self.pool.disconnect()
            if self.pool:
                self.pool.destroy(1)
        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist_clients,
                                      self.agent_sessions)
            server_utils.stop_server(hosts=self.hostlist)

    @avocado.skip("Skipping until DAOS-1936/DAOS-1946 is fixed.")
    def test_metadata_fillup(self):
        """
        Test ID: DAOS-1512
        Test Description: Test to verify no IO happens after metadata is full.
        :avocado: tags=metadata,metadata_fill,nvme,small
        """
        self.pool.connect(2)
        container = DaosContainer(self.context)
        self.d_log.debug("Fillup Metadata....")
        for _cont in range(NO_OF_MAX_CONTAINER):
            container.create(self.pool.handle)
        self.d_log.debug("Metadata Overload...")
        #This should fail with no Metadata space Error.
        try:
            for _cont in range(250):
                container.create(self.pool.handle)
        except DaosApiError as exe:
            print (exe, traceback.format_exc())
            return

        self.fail("Test was expected to fail but it passed.\n")

    @avocado.skip("Skipping until DAOS-1965 is fixed.")
    @avocado.fail_on(DaosApiError)
    def test_metadata_addremove(self):
        """
        Test ID: DAOS-1512
        Test Description: Verify metadata release the space
                          after container delete.
        :avocado: tags=metadata,metadata_free_space,nvme,small
        """
        self.pool.connect(2)
        for k in range(10):
            container_array = []
            self.d_log.debug("Container Create Iteration {}".format(k))
            for cont in range(NO_OF_MAX_CONTAINER):
                container = DaosContainer(self.context)
                container.create(self.pool.handle)
                container_array.append(container)
            self.d_log.debug("Container Remove Iteration {} ".format(k))
            for cont in container_array:
                cont.destroy()

    def thread_control(self, threads, operation):
#.........这里部分代码省略.........
开发者ID:daos-stack,项目名称:daos,代码行数:103,代码来源:metadata.py

示例8: PoolAttributeTest

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import connect [as 别名]
class PoolAttributeTest(Test):
    """
    Test class Description: Tests DAOS pool attribute get/set/list.
    """
    def setUp(self):
        try:
            self.pool = None
            self.hostlist = None
            with open('../../../.build_vars.json') as build_file:
                build_paths = json.load(build_file)
                basepath = os.path.normpath(build_paths['PREFIX'] + "/../")
                server_group = self.params.get("name",
                                               '/server_config/',
                                               'daos_server')
                context = DaosContext(build_paths['PREFIX'] + '/lib/')

                self.hostlist = self.params.get("test_machines", '/run/hosts/*')
                self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                                self.workdir)

                server_utils.run_server(self.hostfile, server_group, basepath)

                createmode = self.params.get("mode",
                                             '/run/attrtests/createmode/')
                createuid = os.geteuid()
                creategid = os.getgid()
                createsetid = self.params.get("setname",
                                              '/run/attrtests/createset/')
                createsize = self.params.get("size",
                                             '/run/attrtests/createsize/')

                self.pool = DaosPool(context)
                self.pool.create(createmode, createuid, creategid, createsize,
                                 createsetid)
                self.pool.connect(1 << 1)
                self.large_data_set = {}

        except DaosApiError as excep:
            print("In the setup exception handler\n")
            print(excep)
            print(traceback.format_exc())

    def tearDown(self):
        try:
            if self.pool is not None:
                self.pool.disconnect()
                self.pool.destroy(1)
        finally:
            server_utils.stop_server(hosts=self.hostlist)

    def create_data_set(self):
        """
        To create the large attribute dictionary
        """
        allchar = string.ascii_letters + string.digits
        for i in range(1024):
            self.large_data_set[str(i)] = (
                "".join(random.choice(allchar)
                        for x in range(random.randint(1, 100))))

    def test_pool_attributes(self):
        """
        Test ID: DAOS-1359

        Test description: Test basic pool attribute tests (sync).

        :avocado: tags=regression,pool,pool_attr,attribute,sync_poolattribute
        """
        expected_for_param = []
        name = self.params.get("name", '/run/attrtests/name_handles/*/')
        expected_for_param.append(name[1])
        value = self.params.get("value", '/run/attrtests/value_handles/*/')
        if value[0] is None:
            self.cancel("skipping these tests until DAOS-2170 is fixed")
        expected_for_param.append(value[1])

        attr_dict = {name[0]:value[0]}
        if name[0] is not None:
            if "largenumberofattr" in name[0]:
                self.create_data_set()
                attr_dict = self.large_data_set

        expected_result = 'PASS'
        for result in expected_for_param:
            if result == 'FAIL':
                expected_result = 'FAIL'
                break
        try:
            self.pool.set_attr(data=attr_dict)
            size, buf = self.pool.list_attr()

            verify_list_attr(attr_dict, size.value, buf)

            if name[0] is not None:
                # Request something that doesn't exist
                if "Negative" in name[0]:
                    name[0] = "rubbish"
                # large attr test messes with the dictionary so skip
                # the get test
                if "largenumberofattr" not in name[0]:
#.........这里部分代码省略.........
开发者ID:daos-stack,项目名称:daos,代码行数:103,代码来源:attribute.py

示例9: BasicSnapshot

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import connect [as 别名]
class BasicSnapshot(Test):
    """
    DAOS-1370 Basic snapshot test

    Test Class Description:
    Test that a snapshot taken of a container remains unchaged even after an
    object in the container has been updated 500 times.
    Create the container.
    Write an object to the container.
    Take a snapshot.
    Write 500 changes to the KV pair of the object.
    Check that the snapshot is still there.
    Confirm that the data in the snapshot is unchanged.
    Destroy the snapshot
    """

    def __init__(self, *args, **kwargs):
        super(BasicSnapshot, self).__init__(*args, **kwargs)
        self.snapshot = None

    def setUp(self):

        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as finput:
            build_paths = json.load(finput)
        basepath = os.path.normpath(build_paths['PREFIX'] + "/../")

        server_group = self.params.get("name", '/server_config/',
                                       'daos_server')

        # setup the DAOS python API
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.d_log = DaosLog(self.context)

        self.hostlist = self.params.get("test_machines", '/run/hosts/*')
        hostfile = write_host_file.write_host_file(self.hostlist, self.workdir)

        server_utils.run_server(hostfile, server_group, basepath)

        # Set up the pool and container.
        try:
            # parameters used in pool create
            createmode = self.params.get("mode", '/run/pool/createmode/')
            createsetid = self.params.get("setname", '/run/pool/createset/')
            createsize = self.params.get("size", '/run/pool/createsize/*')
            createuid = os.geteuid()
            creategid = os.getegid()

            # initialize a pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(createmode, createuid, creategid,
                             createsize, createsetid, None)

            # need a connection to create container
            self.pool.connect(1 << 1)

            # create a container
            self.container = DaosContainer(self.context)
            self.container.create(self.pool.handle)

            # now open it
            self.container.open()

        except DaosApiError as error:
            print(error)
            print(traceback.format_exc())
            self.fail("Test failed before snapshot taken")

    def tearDown(self):
        try:
            if self.container:
                self.container.close()
                self.container.destroy()
            if self.pool:
                self.pool.disconnect()
                self.pool.destroy(1)
        finally:
            server_utils.stop_server()

    def test_basic_snapshot(self):
        """
        Test ID: DAOS-1370

        Test Description:
        Create a pool, container in the pool, object in the container, add
        one key:value to the object.
        Commit the transaction. Perform a snapshot create on the container.
        Create 500 additional transactions with a small change to the object
        in each and commit each after the object update is done.
        Verify the snapshot is still available and the contents remain in
        their original state.

        :avocado: tags=snapshot,basicsnap
        """

        try:
            # create an object and write some data into it
            obj_cls = self.params.get("obj_class", '/run/object_class/*')
            thedata = "Now is the winter of our discontent made glorious"
#.........这里部分代码省略.........
开发者ID:daos-stack,项目名称:daos,代码行数:103,代码来源:basic_snapshot.py

示例10: PunchTest

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import connect [as 别名]
class PunchTest(Test):
    """
    Simple test to verify the 3 different punch calls.
    """
    def setUp(self):
        self.agent_sessions = None
        try:
            # get paths from the build_vars generated by build
            with open('../../../.build_vars.json') as build_file:
                build_paths = json.load(build_file)
                self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")

                self.server_group = self.params.get("name", '/server_config/',
                                                    'daos_server')

                # setup the DAOS python API
                self.context = DaosContext(build_paths['PREFIX'] + '/lib/')

                self.hostlist = self.params.get("test_machines", '/run/hosts/*')
                self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                                self.workdir)

                self.agent_sessions = AgentUtils.run_agent(self.basepath,
                                                           self.hostlist)
                server_utils.run_server(self.hostfile, self.server_group,
                                        self.basepath)

                # parameters used in pool create
                createmode = self.params.get("mode", '/run/pool/createmode/')
                createsetid = self.params.get("setname", '/run/pool/createset/')
                createsize = self.params.get("size", '/run/pool/createsize/')

                createuid = os.geteuid()
                creategid = os.getegid()

                # initialize a python pool object then create the underlying
                # daos storage
                self.pool = DaosPool(self.context)
                self.pool.create(createmode, createuid, creategid,
                                 createsize, createsetid, None)
                self.pool.connect(1 << 1)

                # create a container
                self.container = DaosContainer(self.context)
                self.container.create(self.pool.handle)

                # now open it
                self.container.open()

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            self.fail("Test failed during setup.\n")

    def tearDown(self):

        try:
            if self.container:
                self.container.close()

            # wait a few seconds and then destroy
            time.sleep(5)
            if self.container:
                self.container.destroy()

            # cleanup the pool
            if self.pool:
                self.pool.disconnect()
                self.pool.destroy(1)

            if self.hostfile is not None:
                os.remove(self.hostfile)

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            self.fail("Test failed during teardown.\n")

        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
            server_utils.stop_server(hosts=self.hostlist)

    def test_dkey_punch(self):
        """
        The most basic test of the dkey punch function.

        :avocado: tags=object,punch,dkeypunch,regression,vm,small
        """

        try:
            # create an object and write some data into it
            thedata = "a string that I want to stuff into an object"
            dkey = "this is the dkey"
            akey = "this is the akey"

            obj, txn = self.container.write_an_obj(thedata, len(thedata)+1,
                                                   dkey, akey, obj_cls=1)

            # read the data back and make sure its correct
#.........这里部分代码省略.........
开发者ID:daos-stack,项目名称:daos,代码行数:103,代码来源:punch_test.py

示例11: DestroyRebuild

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import connect [as 别名]
class DestroyRebuild(Test):

    """
    Test Class Description:
    This test verifies destruction of a pool that is rebuilding.

    :avocado: tags=pool,pooldestroy,rebuild,desreb
    """

    build_paths = []
    server_group = ""
    context = None
    pool = None
    hostfile = ""

    def setUp(self):
        """ setup for the test """
        self.agent_sessions = None
        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as build_file:
            build_paths = json.load(build_file)
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")

        # generate a hostfile
        self.hostlist = self.params.get("test_machines", '/run/hosts/')
        self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                        self.workdir)

        # fire up the DAOS servers
        self.server_group = self.params.get("name", '/run/server_config/',
                                            'daos_server')
        self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist)
        server_utils.run_server(self.hostfile, self.server_group,
                                build_paths['PREFIX'] + '/../')

        # create a pool to test with
        createmode = self.params.get("mode", '/run/pool/createmode/')
        createuid = self.params.get("uid", '/run/pool/createuid/')
        creategid = self.params.get("gid", '/run/pool/creategid/')
        createsetid = self.params.get("setname", '/run/pool/createset/')
        createsize = self.params.get("size", '/run/pool/createsize/')
        self.pool = DaosPool(self.context)
        self.pool.create(createmode, createuid, creategid, createsize,
                         createsetid)
        self.pool.get_uuid_str()

        time.sleep(2)

    def tearDown(self):
        """ cleanup after the test """

        try:
            os.remove(self.hostfile)
            if self.pool:
                self.pool.destroy(1)
        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
            server_utils.stop_server(hosts=self.hostlist)


    def test_destroy_while_rebuilding(self):
        """
        :avocado: tags=pool,pooldestroy,rebuild,desreb
        """
        try:
            print("\nsetup complete, starting test\n")

            # create a server object that references on of our pool target hosts
            # and then kill it
            svr_to_kill = int(self.params.get("rank_to_kill",
                                              '/run/testparams/ranks/'))
            server = DaosServer(self.context, bytes(self.server_group),
                                svr_to_kill)

            print("created server ")

            # BUG if you don't connect the rebuild doesn't start correctly
            self.pool.connect(1 << 1)
            status = self.pool.pool_query()
            if not status.pi_ntargets == len(self.hostlist):
                self.fail("target count wrong.\n")
            if not status.pi_ndisabled == 0:
                self.fail("disabled target count wrong.\n")

            print("connect ")

            time.sleep(1)
            server.kill(1)

            print("killed server ")

            # exclude the target from the dead server
            self.pool.exclude([svr_to_kill])

            print("exclude target ")

            #self.pool.disconnect()
            #print "disconnect "
#.........这里部分代码省略.........
开发者ID:daos-stack,项目名称:daos,代码行数:103,代码来源:destroy_rebuild.py

示例12: ObjOpenBadParam

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import connect [as 别名]
class ObjOpenBadParam(Test):
    """
    Test Class Description:
    Pass an assortment of bad parameters to the daos_obj_open function.
    """
    def __init__(self, *args, **kwargs):
        """
        Initialize values for variables that are used in tearDown() such that
        if setUp() fails for any reason, tearDown() will avoid throwing
        an AttributeError exception.
        """
        super(ObjOpenBadParam, self).__init__(*args, **kwargs)
        self.container = None
        self.pool = None

    def setUp(self):
        self.agent_sessions = None
        self.hostlist = None

        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as build_file:
            build_paths = json.load(build_file)
        self.basepath = os.path.normpath(build_paths['PREFIX']  + "/../")

        self.server_group = self.params.get("name", '/server_config/',
                                            'daos_server')

        # setup the DAOS python API
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.d_log = DaosLog(self.context)

        self.hostlist = self.params.get("test_machines", '/run/hosts/*')
        self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                        self.workdir)

        self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist)
        server_utils.run_server(self.hostfile, self.server_group, self.basepath)
        try:
            # parameters used in pool create
            createmode = self.params.get("mode", '/run/pool/createmode/')
            createsetid = self.params.get("setname", '/run/pool/createset/')
            createsize = self.params.get("size", '/run/pool/createsize/')
            createuid = os.geteuid()
            creategid = os.getegid()

            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(createmode, createuid, creategid,
                             createsize, createsetid, None)

            # need a connection to create container
            self.pool.connect(1 << 1)

            # create a container
            self.container = DaosContainer(self.context)
            self.container.create(self.pool.handle)

            # now open it
            self.container.open()

            # create an object and write some data into it
            thedata = "a string that I want to stuff into an object"
            self.datasize = len(thedata) + 1
            self.dkey = "this is the dkey"
            self.akey = "this is the akey"
            self.obj, self.epoch = self.container.write_an_obj(thedata,
                                                               self.datasize,
                                                               self.dkey,
                                                               self.akey,
                                                               obj_cls=1)

            thedata2 = self.container.read_an_obj(self.datasize, self.dkey,
                                                  self.akey, self.obj,
                                                  self.epoch)
            if thedata not in thedata2.value:
                print(thedata)
                print(thedata2.value)
                err_str = "Error reading back data, test failed during the " \
                          "initial setup."
                self.d_log.error(err_str)
                self.fail(err_str)

            # setup leaves object in open state, so closing to start clean
            self.obj.close()

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            self.fail("Test failed during the initial setup.")

    def tearDown(self):
        try:
            self.container.close()
            self.container.destroy()
            self.pool.disconnect()
            self.pool.destroy(1)
        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
#.........这里部分代码省略.........
开发者ID:daos-stack,项目名称:daos,代码行数:103,代码来源:obj_open_bad_param.py

示例13: PoolSvc

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import connect [as 别名]
class PoolSvc(Test):
    """
    Tests svc argument while pool create.
    """
    def setUp(self):
        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as build_file:
            build_paths = json.load(build_file)
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")

        self.server_group = self.params.get("name", '/server_config/',
                                            'daos_server')
        self.daosctl = self.basepath + '/install/bin/daosctl'

        # setup the DAOS python API
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.pool = None

        self.hostfile = None
        self.hostlist = self.params.get("test_machines", '/run/hosts/*')
        self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                        self.workdir)
        print("Host file is: {}".format(self.hostfile))

        self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist)
        server_utils.run_server(self.hostfile, self.server_group, self.basepath)

    def tearDown(self):
        try:
            if self.pool is not None and self.pool.attached:
                self.pool.destroy(1)
        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
            server_utils.stop_server(hosts=self.hostlist)

    def test_poolsvc(self):
        """
        Test svc arg during pool create.

        :avocado: tags=pool,svc
        """

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/createtests/createmode/*/')
        createuid = os.geteuid()
        creategid = os.getegid()
        createsetid = self.params.get("setname", '/run/createtests/createset/')
        createsize = self.params.get("size", '/run/createtests/createsize/')
        createsvc = self.params.get("svc", '/run/createtests/createsvc/*/')

        expected_result = createsvc[1]

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(createmode, createuid, creategid,
                             createsize, createsetid, None, None, createsvc[0])
            self.pool.connect(1 << 1)

            # checking returned rank list for server more than 1
            i = 0
            while (
                    int(self.pool.svc.rl_ranks[i]) > 0 and
                    int(self.pool.svc.rl_ranks[i]) <= createsvc[0] and
                    int(self.pool.svc.rl_ranks[i]) != 999999
            ):
                i += 1
            if i != createsvc[0]:
                self.fail("Length of Returned Rank list is not equal to "
                          "the number of Pool Service members.\n")
            rank_list = []
            for j in range(createsvc[0]):
                rank_list.append(int(self.pool.svc.rl_ranks[j]))
                if len(rank_list) != len(set(rank_list)):
                    self.fail("Duplicate values in returned rank list")

            if createsvc[0] == 3:
                self.pool.disconnect()
                cmd = ('{0} kill-leader  --uuid={1}'
                       .format(self.daosctl, self.pool.get_uuid_str()))
                process.system(cmd)
                self.pool.connect(1 << 1)
                self.pool.disconnect()
                server = DaosServer(self.context, self.server_group, 2)
                server.kill(1)
                self.pool.exclude([2])
                self.pool.connect(1 << 1)

            if expected_result in ['FAIL']:
                self.fail("Test was expected to fail but it passed.\n")

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            if expected_result == 'PASS':
                self.fail("Test was expected to pass but it failed.\n")
开发者ID:daos-stack,项目名称:daos,代码行数:100,代码来源:pool_svc.py

示例14: RebuildNoCap

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import connect [as 别名]
class RebuildNoCap(Test):

    """
    Test Class Description:
    This class contains tests for pool rebuild.

    :avocado: tags=pool,rebuild,nocap
    """
    def setUp(self):
        """ setup for the test """
        self.agent_sessions = None
        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as build_file:
            build_paths = json.load(build_file)
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")

        # generate a hostfile
        self.hostlist = self.params.get("test_machines", '/run/hosts/')
        self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                        self.workdir)

        # fire up the DAOS servers
        self.server_group = self.params.get("name", '/run/server_config/',
                                            'daos_server')
        self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist)
        server_utils.run_server(self.hostfile, self.server_group,
                                build_paths['PREFIX'] + '/../')

        # create a pool to test with
        createmode = self.params.get("mode", '/run/pool/createmode/')
        createuid = self.params.get("uid", '/run/pool/createuid/')
        creategid = self.params.get("gid", '/run/pool/creategid/')
        createsetid = self.params.get("setname", '/run/pool/createset/')
        createsize = self.params.get("size", '/run/pool/createsize/')
        self.pool = DaosPool(self.context)
        self.pool.create(createmode, createuid, creategid, createsize,
                         createsetid)
        uuid = self.pool.get_uuid_str()

        time.sleep(2)

        # stuff some bogus data into the pool
        how_many_bytes = long(self.params.get("datasize",
                                              '/run/testparams/datatowrite/'))
        exepath = os.path.join(build_paths['PREFIX'],
                               "/../src/tests/ftest/util/write_some_data.py")
        cmd = "export DAOS_POOL={0}; export DAOS_SVCL=1; mpirun"\
              " --np 1 --host {1} {2} {3} testfile".format(
                  uuid, self.hostlist[0], exepath, how_many_bytes)
        subprocess.call(cmd, shell=True)

    def tearDown(self):
        """ cleanup after the test """

        try:
            os.remove(self.hostfile)
            if self.pool:
                self.pool.destroy(1)
        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
            server_utils.stop_server(hosts=self.hostlist)


    def test_rebuild_no_capacity(self):
        """
        :avocado: tags=pool,rebuild,nocap
        """
        try:
            print("\nsetup complete, starting test\n")

            # create a server object that references on of our pool target hosts
            # and then kill it
            svr_to_kill = int(self.params.get("rank_to_kill",
                                              '/run/testparams/ranks/'))
            d_server = DaosServer(self.context, bytes(self.server_group),
                                  svr_to_kill)

            time.sleep(1)
            d_server.kill(1)

            # exclude the target from the dead server
            self.pool.exclude([svr_to_kill])

            # exclude should trigger rebuild, check
            self.pool.connect(1 << 1)
            status = self.pool.pool_query()
            if not status.pi_ntargets == len(self.hostlist):
                self.fail("target count wrong.\n")
            if not status.pi_ndisabled == 1:
                self.fail("disabled target count wrong.\n")

            # the pool should be too full to start a rebuild so
            # expecting an error
            # not sure yet specifically what error
            if status.pi_rebuild_st.rs_errno == 0:
                self.fail("expecting rebuild to fail but it didn't.\n")

        except DaosApiError as excep:
#.........这里部分代码省略.........
开发者ID:daos-stack,项目名称:daos,代码行数:103,代码来源:rebuild_no_cap.py

示例15: ObjectDataValidation

# 需要导入模块: from daos_api import DaosPool [as 别名]
# 或者: from daos_api.DaosPool import connect [as 别名]
class ObjectDataValidation(avocado.Test):
    """
    Test Class Description:
        Tests that create Different length records,
        Disconnect the pool/container and reconnect,
        validate the data after reconnect.
    """
    # pylint: disable=too-many-instance-attributes
    def setUp(self):
        self.agent_sessions = None
        self.pool = None
        self.container = None
        self.obj = None
        self.ioreq = None
        self.hostlist = None
        self.hostfile = None
        self.no_of_dkeys = None
        self.no_of_akeys = None
        self.array_size = None
        self.record_length = None

        with open('../../../.build_vars.json') as json_f:
            build_paths = json.load(json_f)
        basepath = os.path.normpath(build_paths['PREFIX']  + "/../")
        server_group = self.params.get("name",
                                       '/server_config/',
                                       'daos_server')
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.d_log = DaosLog(self.context)
        self.hostlist = self.params.get("test_machines", '/run/hosts/*')
        self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                        self.workdir)
        self.no_of_dkeys = self.params.get("no_of_dkeys", '/run/dkeys/*')[0]
        self.no_of_akeys = self.params.get("no_of_akeys", '/run/akeys/*')[0]
        self.array_size = self.params.get("size", '/array_size/')
        self.record_length = self.params.get("length", '/run/record/*')

        self.agent_sessions = AgentUtils.run_agent(basepath, self.hostlist)
        server_utils.run_server(self.hostfile, server_group, basepath)

        self.pool = DaosPool(self.context)
        self.pool.create(self.params.get("mode", '/run/pool/createmode/*'),
                         os.geteuid(),
                         os.getegid(),
                         self.params.get("size", '/run/pool/createsize/*'),
                         self.params.get("setname", '/run/pool/createset/*'),
                         None)
        self.pool.connect(2)

        self.container = DaosContainer(self.context)
        self.container.create(self.pool.handle)
        self.container.open()

        self.obj = DaosObj(self.context, self.container)
        self.obj.create(objcls=1)
        self.obj.open()
        self.ioreq = IORequest(self.context,
                               self.container,
                               self.obj, objtype=4)

    def tearDown(self):
        try:
            if self.container:
                self.container.close()
                self.container.destroy()
            if self.pool:
                self.pool.disconnect()
                self.pool.destroy(1)
        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
            server_utils.stop_server(hosts=self.hostlist)

    def reconnect(self):
        '''
        Function to reconnect the pool/container and reopen the Object
        for read verification.
        '''
        #Close the Obj/Container, Disconnect the Pool.
        self.obj.close()
        self.container.close()
        self.pool.disconnect()
        time.sleep(5)
        #Connect Pool, Open Container and Object
        self.pool.connect(2)
        self.container.open()
        self.obj.open()
        self.ioreq = IORequest(self.context,
                               self.container,
                               self.obj,
                               objtype=4)

    @avocado.fail_on(DaosApiError)
    def test_single_object_validation(self):
        """
        Test ID: DAOS-707
        Test Description: Write Avocado Test to verify single data after
                          pool/container disconnect/reconnect.
        :avocado: tags=single_object,data_verification,medium,vm
        """
#.........这里部分代码省略.........
开发者ID:daos-stack,项目名称:daos,代码行数:103,代码来源:object_integrity.py


注:本文中的daos_api.DaosPool.connect方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。