当前位置: 首页>>代码示例>>Python>>正文


Python Manager.once方法代码示例

本文整理汇总了Python中swift.common.manager.Manager.once方法的典型用法代码示例。如果您正苦于以下问题:Python Manager.once方法的具体用法?Python Manager.once怎么用?Python Manager.once使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在swift.common.manager.Manager的用法示例。


在下文中一共展示了Manager.once方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_to_final_state

# 需要导入模块: from swift.common.manager import Manager [as 别名]
# 或者: from swift.common.manager.Manager import once [as 别名]
def get_to_final_state():
    replicators = Manager(["account-replicator", "container-replicator", "object-replicator"])
    replicators.stop()
    updaters = Manager(["container-updater", "object-updater"])
    updaters.stop()

    replicators.once()
    updaters.once()
    replicators.once()
开发者ID:portante,项目名称:swift,代码行数:11,代码来源:common.py

示例2: get_to_final_state

# 需要导入模块: from swift.common.manager import Manager [as 别名]
# 或者: from swift.common.manager.Manager import once [as 别名]
def get_to_final_state():
    replicators = Manager(['account-replicator', 'container-replicator',
                           'object-replicator'])
    replicators.stop()
    updaters = Manager(['container-updater', 'object-updater'])
    updaters.stop()

    replicators.once()
    updaters.once()
    replicators.once()
开发者ID:701,项目名称:swift,代码行数:12,代码来源:common.py

示例3: TestObjectExpirer

# 需要导入模块: from swift.common.manager import Manager [as 别名]
# 或者: from swift.common.manager.Manager import once [as 别名]
class TestObjectExpirer(unittest.TestCase):

    def setUp(self):
        if len(POLICIES) < 2:
            raise SkipTest('Need more than one policy')

        self.expirer = Manager(['object-expirer'])
        self.expirer.start()
        err = self.expirer.stop()
        if err:
            raise SkipTest('Unable to verify object-expirer service')

        conf_files = []
        for server in self.expirer.servers:
            conf_files.extend(server.conf_files())
        conf_file = conf_files[0]
        self.client = InternalClient(conf_file, 'probe-test', 3)

        (self.pids, self.port2server, self.account_ring, self.container_ring,
         self.object_ring, self.policy, self.url, self.token,
         self.account, self.configs) = reset_environment()
        self.container_name = 'container-%s' % uuid.uuid4()
        self.object_name = 'object-%s' % uuid.uuid4()
        self.brain = BrainSplitter(self.url, self.token, self.container_name,
                                   self.object_name)

    def test_expirer_object_split_brain(self):
        old_policy = random.choice(list(POLICIES))
        wrong_policy = random.choice([p for p in POLICIES if p != old_policy])
        # create an expiring object and a container with the wrong policy
        self.brain.stop_primary_half()
        self.brain.put_container(int(old_policy))
        self.brain.put_object(headers={'X-Delete-After': 2})
        # get the object timestamp
        metadata = self.client.get_object_metadata(
            self.account, self.container_name, self.object_name,
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
        create_timestamp = Timestamp(metadata['x-timestamp'])
        self.brain.start_primary_half()
        # get the expiring object updates in their queue, while we have all
        # the servers up
        Manager(['object-updater']).once()
        self.brain.stop_handoff_half()
        self.brain.put_container(int(wrong_policy))
        # don't start handoff servers, only wrong policy is available

        # make sure auto-created containers get in the account listing
        Manager(['container-updater']).once()
        # this guy should no-op since it's unable to expire the object
        self.expirer.once()

        self.brain.start_handoff_half()
        get_to_final_state()

        # validate object is expired
        found_in_policy = None
        metadata = self.client.get_object_metadata(
            self.account, self.container_name, self.object_name,
            acceptable_statuses=(4,),
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
        self.assert_('x-backend-timestamp' in metadata)
        self.assertEqual(Timestamp(metadata['x-backend-timestamp']),
                         create_timestamp)

        # but it is still in the listing
        for obj in self.client.iter_objects(self.account,
                                            self.container_name):
            if self.object_name == obj['name']:
                break
        else:
            self.fail('Did not find listing for %s' % self.object_name)

        # clear proxy cache
        client.post_container(self.url, self.token, self.container_name, {})
        # run the expirier again after replication
        self.expirer.once()

        # object is not in the listing
        for obj in self.client.iter_objects(self.account,
                                            self.container_name):
            if self.object_name == obj['name']:
                self.fail('Found listing for %s' % self.object_name)

        # and validate object is tombstoned
        found_in_policy = None
        for policy in POLICIES:
            metadata = self.client.get_object_metadata(
                self.account, self.container_name, self.object_name,
                acceptable_statuses=(4,),
                headers={'X-Backend-Storage-Policy-Index': int(policy)})
            if 'x-backend-timestamp' in metadata:
                if found_in_policy:
                    self.fail('found object in %s and also %s' %
                              (found_in_policy, policy))
                found_in_policy = policy
                self.assert_('x-backend-timestamp' in metadata)
                self.assert_(Timestamp(metadata['x-backend-timestamp']) >
                             create_timestamp)
开发者ID:701,项目名称:swift,代码行数:100,代码来源:test_object_expirer.py

示例4: TestObjectExpirer

# 需要导入模块: from swift.common.manager import Manager [as 别名]
# 或者: from swift.common.manager.Manager import once [as 别名]
class TestObjectExpirer(ReplProbeTest):

    def setUp(self):
        if len(ENABLED_POLICIES) < 2:
            raise SkipTest('Need more than one policy')

        self.expirer = Manager(['object-expirer'])
        self.expirer.start()
        err = self.expirer.stop()
        if err:
            raise SkipTest('Unable to verify object-expirer service')

        conf_files = []
        for server in self.expirer.servers:
            conf_files.extend(server.conf_files())
        conf_file = conf_files[0]
        self.client = InternalClient(conf_file, 'probe-test', 3)

        super(TestObjectExpirer, self).setUp()
        self.container_name = 'container-%s' % uuid.uuid4()
        self.object_name = 'object-%s' % uuid.uuid4()
        self.brain = BrainSplitter(self.url, self.token, self.container_name,
                                   self.object_name)

    def test_expirer_object_split_brain(self):
        old_policy = random.choice(ENABLED_POLICIES)
        wrong_policy = random.choice([p for p in ENABLED_POLICIES
                                      if p != old_policy])
        # create an expiring object and a container with the wrong policy
        self.brain.stop_primary_half()
        self.brain.put_container(int(old_policy))
        self.brain.put_object(headers={'X-Delete-After': 2})
        # get the object timestamp
        metadata = self.client.get_object_metadata(
            self.account, self.container_name, self.object_name,
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
        create_timestamp = Timestamp(metadata['x-timestamp'])
        self.brain.start_primary_half()
        # get the expiring object updates in their queue, while we have all
        # the servers up
        Manager(['object-updater']).once()
        self.brain.stop_handoff_half()
        self.brain.put_container(int(wrong_policy))
        # don't start handoff servers, only wrong policy is available

        # make sure auto-created containers get in the account listing
        Manager(['container-updater']).once()
        # this guy should no-op since it's unable to expire the object
        self.expirer.once()

        self.brain.start_handoff_half()
        self.get_to_final_state()

        # validate object is expired
        found_in_policy = None
        metadata = self.client.get_object_metadata(
            self.account, self.container_name, self.object_name,
            acceptable_statuses=(4,),
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
        self.assertTrue('x-backend-timestamp' in metadata)
        self.assertEqual(Timestamp(metadata['x-backend-timestamp']),
                         create_timestamp)

        # but it is still in the listing
        for obj in self.client.iter_objects(self.account,
                                            self.container_name):
            if self.object_name == obj['name']:
                break
        else:
            self.fail('Did not find listing for %s' % self.object_name)

        # clear proxy cache
        client.post_container(self.url, self.token, self.container_name, {})
        # run the expirier again after replication
        self.expirer.once()

        # object is not in the listing
        for obj in self.client.iter_objects(self.account,
                                            self.container_name):
            if self.object_name == obj['name']:
                self.fail('Found listing for %s' % self.object_name)

        # and validate object is tombstoned
        found_in_policy = None
        for policy in ENABLED_POLICIES:
            metadata = self.client.get_object_metadata(
                self.account, self.container_name, self.object_name,
                acceptable_statuses=(4,),
                headers={'X-Backend-Storage-Policy-Index': int(policy)})
            if 'x-backend-timestamp' in metadata:
                if found_in_policy:
                    self.fail('found object in %s and also %s' %
                              (found_in_policy, policy))
                found_in_policy = policy
                self.assertTrue('x-backend-timestamp' in metadata)
                self.assertTrue(Timestamp(metadata['x-backend-timestamp']) >
                                create_timestamp)

    def test_expirer_object_should_not_be_expired(self):
        obj_brain = BrainSplitter(self.url, self.token, self.container_name,
#.........这里部分代码省略.........
开发者ID:nelsonmarcos,项目名称:swift,代码行数:103,代码来源:test_object_expirer.py

示例5: TestReconstructorRebuild

# 需要导入模块: from swift.common.manager import Manager [as 别名]
# 或者: from swift.common.manager.Manager import once [as 别名]
class TestReconstructorRebuild(ECProbeTest):

    def setUp(self):
        super(TestReconstructorRebuild, self).setUp()
        self.container_name = 'container-%s' % uuid.uuid4()
        self.object_name = 'object-%s' % uuid.uuid4()
        # sanity
        self.assertEqual(self.policy.policy_type, EC_POLICY)
        self.reconstructor = Manager(["object-reconstructor"])

    def proxy_get(self):
        # GET object
        headers, body = client.get_object(self.url, self.token,
                                          self.container_name,
                                          self.object_name,
                                          resp_chunk_size=64 * 2 ** 10)
        resp_checksum = md5()
        for chunk in body:
            resp_checksum.update(chunk)
        return resp_checksum.hexdigest()

    def direct_get(self, node, part):
        req_headers = {'X-Backend-Storage-Policy-Index': int(self.policy)}
        headers, data = direct_client.direct_get_object(
            node, part, self.account, self.container_name,
            self.object_name, headers=req_headers,
            resp_chunk_size=64 * 2 ** 20)
        hasher = md5()
        for chunk in data:
            hasher.update(chunk)
        return hasher.hexdigest()

    def _check_node(self, node, part, etag, headers_post):
        # get fragment archive etag
        fragment_archive_etag = self.direct_get(node, part)

        # remove data from the selected node
        part_dir = self.storage_dir('object', node, part=part)
        shutil.rmtree(part_dir, True)

        # this node can't servce the data any more
        try:
            self.direct_get(node, part)
        except direct_client.DirectClientException as err:
            self.assertEqual(err.http_status, 404)
        else:
            self.fail('Node data on %r was not fully destoryed!' %
                      (node,))

        # make sure we can still GET the object and its correct, the
        # proxy is doing decode on remaining fragments to get the obj
        self.assertEqual(etag, self.proxy_get())

        # fire up reconstructor
        self.reconstructor.once()

        # fragment is rebuilt exactly as it was before!
        self.assertEqual(fragment_archive_etag,
                         self.direct_get(node, part))

        # check meta
        meta = client.head_object(self.url, self.token,
                                  self.container_name,
                                  self.object_name)
        for key in headers_post:
            self.assertTrue(key in meta)
            self.assertEqual(meta[key], headers_post[key])

    def _format_node(self, node):
        return '%s#%s' % (node['device'], node['index'])

    def test_main(self):
        # create EC container
        headers = {'X-Storage-Policy': self.policy.name}
        client.put_container(self.url, self.token, self.container_name,
                             headers=headers)

        # PUT object
        contents = Body()
        headers = {'x-object-meta-foo': 'meta-foo'}
        headers_post = {'x-object-meta-bar': 'meta-bar'}

        etag = client.put_object(self.url, self.token,
                                 self.container_name,
                                 self.object_name,
                                 contents=contents, headers=headers)
        client.post_object(self.url, self.token, self.container_name,
                           self.object_name, headers=headers_post)
        del headers_post['X-Auth-Token']  # WTF, where did this come from?

        # built up a list of node lists to kill data from,
        # first try a single node
        # then adjacent nodes and then nodes >1 node apart
        opart, onodes = self.object_ring.get_nodes(
            self.account, self.container_name, self.object_name)
        single_node = [random.choice(onodes)]
        adj_nodes = [onodes[0], onodes[-1]]
        far_nodes = [onodes[0], onodes[-2]]
        test_list = [single_node, adj_nodes, far_nodes]

#.........这里部分代码省略.........
开发者ID:2015-ucsc-hp,项目名称:swift,代码行数:103,代码来源:test_reconstructor_rebuild.py

示例6: TestReconstructorRebuild

# 需要导入模块: from swift.common.manager import Manager [as 别名]
# 或者: from swift.common.manager.Manager import once [as 别名]

#.........这里部分代码省略.........
                                 for (node, exc) in failures]))
        return frag_headers, frag_etags

    @contextmanager
    def _annotate_failure_with_scenario(self, failed, non_durable):
        try:
            yield
        except (AssertionError, ClientException) as err:
            self.fail(
                'Scenario with failed nodes: %r, non-durable nodes: %r\n'
                ' failed with:\n%s' %
                ([self._format_node(self.onodes[n]) for n in failed],
                 [self._format_node(self.onodes[n]) for n in non_durable], err)
            )

    def _test_rebuild_scenario(self, failed, non_durable,
                               reconstructor_cycles):
        # helper method to test a scenario with some nodes missing their
        # fragment and some nodes having non-durable fragments
        with self._annotate_failure_with_scenario(failed, non_durable):
            self._break_nodes(failed, non_durable)

        # make sure we can still GET the object and it is correct; the
        # proxy is doing decode on remaining fragments to get the obj
        with self._annotate_failure_with_scenario(failed, non_durable):
            headers, etag = self.proxy_get()
            self.assertEqual(self.etag, etag)
            for key in self.headers_post:
                self.assertIn(key, headers)
                self.assertEqual(self.headers_post[key], headers[key])

        # fire up reconstructor
        for i in range(reconstructor_cycles):
            self.reconstructor.once()

        # check GET via proxy returns expected data and metadata
        with self._annotate_failure_with_scenario(failed, non_durable):
            headers, etag = self.proxy_get()
            self.assertEqual(self.etag, etag)
            for key in self.headers_post:
                self.assertIn(key, headers)
                self.assertEqual(self.headers_post[key], headers[key])
        # check all frags are intact, durable and have expected metadata
        with self._annotate_failure_with_scenario(failed, non_durable):
            frag_headers, frag_etags = self._assert_all_nodes_have_frag()
            self.assertEqual(self.frag_etags, frag_etags)
            # self._frag_headers include X-Backend-Durable-Timestamp so this
            # assertion confirms that the rebuilt frags are all durable
            self.assertEqual(self.frag_headers, frag_headers)

    def test_rebuild_missing_frags(self):
        # build up a list of node lists to kill data from,
        # first try a single node
        # then adjacent nodes and then nodes >1 node apart
        single_node = (random.randint(0, 5),)
        adj_nodes = (0, 5)
        far_nodes = (0, 4)

        for failed_nodes in [single_node, adj_nodes, far_nodes]:
            self._test_rebuild_scenario(failed_nodes, [], 1)

    def test_rebuild_non_durable_frags(self):
        # build up a list of node lists to make non-durable,
        # first try a single node
        # then adjacent nodes and then nodes >1 node apart
        single_node = (random.randint(0, 5),)
开发者ID:mahak,项目名称:swift,代码行数:70,代码来源:test_reconstructor_rebuild.py

示例7: ProbeTest

# 需要导入模块: from swift.common.manager import Manager [as 别名]
# 或者: from swift.common.manager.Manager import once [as 别名]
class ProbeTest(unittest.TestCase):
    """
    Don't instantiate this directly, use a child class instead.
    """

    def setUp(self):
        p = Popen("resetswift 2>&1", shell=True, stdout=PIPE)
        stdout, _stderr = p.communicate()
        print stdout
        Manager(['all']).stop()
        self.pids = {}
        try:
            self.account_ring = get_ring(
                'account',
                self.acct_cont_required_replicas,
                self.acct_cont_required_devices)
            self.container_ring = get_ring(
                'container',
                self.acct_cont_required_replicas,
                self.acct_cont_required_devices)
            self.policy = get_policy(**self.policy_requirements)
            self.object_ring = get_ring(
                self.policy.ring_name,
                self.obj_required_replicas,
                self.obj_required_devices,
                server='object')
            Manager(['main']).start(wait=False)
            self.port2server = {}
            for server, port in [('account', 6002), ('container', 6001),
                                 ('object', 6000)]:
                for number in xrange(1, 9):
                    self.port2server[port + (number * 10)] = \
                        '%s%d' % (server, number)
            for port in self.port2server:
                check_server(port, self.port2server, self.pids)
            self.port2server[8080] = 'proxy'
            self.url, self.token, self.account = \
                check_server(8080, self.port2server, self.pids)
            self.configs = defaultdict(dict)
            for name in ('account', 'container', 'object'):
                for server_name in (name, '%s-replicator' % name):
                    for server in Manager([server_name]):
                        for i, conf in enumerate(server.conf_files(), 1):
                            self.configs[server.server][i] = conf
            self.replicators = Manager(
                ['account-replicator', 'container-replicator',
                 'object-replicator'])
            self.updaters = Manager(['container-updater', 'object-updater'])
        except BaseException:
            try:
                raise
            finally:
                try:
                    Manager(['all']).kill()
                except Exception:
                    pass

    def tearDown(self):
        Manager(['all']).kill()

    def get_to_final_state(self):
        # these .stop()s are probably not strictly necessary,
        # but may prevent race conditions
        self.replicators.stop()
        self.updaters.stop()

        self.replicators.once()
        self.updaters.once()
        self.replicators.once()
开发者ID:anishnarang,项目名称:gswift,代码行数:71,代码来源:common.py

示例8: ProbeTest

# 需要导入模块: from swift.common.manager import Manager [as 别名]
# 或者: from swift.common.manager.Manager import once [as 别名]
class ProbeTest(unittest.TestCase):
    """
    Don't instantiate this directly, use a child class instead.
    """

    def setUp(self):
        p = Popen("resetswift 2>&1", shell=True, stdout=PIPE)
        stdout, _stderr = p.communicate()
        print stdout
        Manager(['all']).stop()
        self.pids = {}
        try:
            self.account_ring = get_ring(
                'account',
                self.acct_cont_required_replicas,
                self.acct_cont_required_devices)
            self.container_ring = get_ring(
                'container',
                self.acct_cont_required_replicas,
                self.acct_cont_required_devices)
            self.policy = get_policy(**self.policy_requirements)
            self.object_ring = get_ring(
                self.policy.ring_name,
                self.obj_required_replicas,
                self.obj_required_devices,
                server='object')
            Manager(['main']).start(wait=False)
            self.port2server = {}
            for server, port in [('account', 6002), ('container', 6001),
                                 ('object', 6000)]:
                for number in xrange(1, 9):
                    self.port2server[port + (number * 10)] = \
                        '%s%d' % (server, number)
            for port in self.port2server:
                check_server(port, self.port2server, self.pids)
            self.port2server[8080] = 'proxy'
            self.url, self.token, self.account = \
                check_server(8080, self.port2server, self.pids)
            self.configs = defaultdict(dict)
            for name in ('account', 'container', 'object'):
                for server_name in (name, '%s-replicator' % name):
                    for server in Manager([server_name]):
                        for i, conf in enumerate(server.conf_files(), 1):
                            self.configs[server.server][i] = conf
            self.replicators = Manager(
                ['account-replicator', 'container-replicator',
                 'object-replicator'])
            self.updaters = Manager(['container-updater', 'object-updater'])
            self.server_port_to_conf = {}
            # get some configs backend daemon configs loaded up
            for server in ('account', 'container', 'object'):
                self.server_port_to_conf[server] = build_port_to_conf(server)
        except BaseException:
            try:
                raise
            finally:
                try:
                    Manager(['all']).kill()
                except Exception:
                    pass

    def tearDown(self):
        Manager(['all']).kill()

    def device_dir(self, server, node):
        conf = self.server_port_to_conf[server][node['port']]
        return os.path.join(conf['devices'], node['device'])

    def storage_dir(self, server, node, part=None, policy=None):
        policy = policy or self.policy
        device_path = self.device_dir(server, node)
        path_parts = [device_path, get_data_dir(policy)]
        if part is not None:
            path_parts.append(str(part))
        return os.path.join(*path_parts)

    def config_number(self, node):
        _server_type, config_number = get_server_number(
            node['port'], self.port2server)
        return config_number

    def get_to_final_state(self):
        # these .stop()s are probably not strictly necessary,
        # but may prevent race conditions
        self.replicators.stop()
        self.updaters.stop()

        self.replicators.once()
        self.updaters.once()
        self.replicators.once()
开发者ID:2015-ucsc-hp,项目名称:swift,代码行数:92,代码来源:common.py

示例9: test_reconciler_move_object_twice

# 需要导入模块: from swift.common.manager import Manager [as 别名]
# 或者: from swift.common.manager.Manager import once [as 别名]
    def test_reconciler_move_object_twice(self):
        # select some policies
        old_policy = random.choice(ENABLED_POLICIES)
        new_policy = random.choice([p for p in ENABLED_POLICIES
                                    if p != old_policy])

        # setup a split brain
        self.brain.stop_handoff_half()
        # get old_policy on two primaries
        self.brain.put_container(policy_index=int(old_policy))
        self.brain.start_handoff_half()
        self.brain.stop_primary_half()
        # force a recreate on handoffs
        self.brain.put_container(policy_index=int(old_policy))
        self.brain.delete_container()
        self.brain.put_container(policy_index=int(new_policy))
        self.brain.put_object()  # populate memcache with new_policy
        self.brain.start_primary_half()

        # at this point two primaries have old policy
        container_part, container_nodes = self.container_ring.get_nodes(
            self.account, self.container_name)
        head_responses = []
        for node in container_nodes:
            metadata = direct_client.direct_head_container(
                node, container_part, self.account, self.container_name)
            head_responses.append((node, metadata))
        old_container_node_ids = [
            node['id'] for node, metadata in head_responses
            if int(old_policy) ==
            int(metadata['X-Backend-Storage-Policy-Index'])]
        self.assertEqual(2, len(old_container_node_ids))

        # hopefully memcache still has the new policy cached
        self.brain.put_object(headers={'x-object-meta-test': 'custom-meta'},
                              contents='VERIFY')
        # double-check object correctly written to new policy
        conf_files = []
        for server in Manager(['container-reconciler']).servers:
            conf_files.extend(server.conf_files())
        conf_file = conf_files[0]
        int_client = InternalClient(conf_file, 'probe-test', 3)
        int_client.get_object_metadata(
            self.account, self.container_name, self.object_name,
            headers={'X-Backend-Storage-Policy-Index': int(new_policy)})
        int_client.get_object_metadata(
            self.account, self.container_name, self.object_name,
            acceptable_statuses=(4,),
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})

        # shutdown the containers that know about the new policy
        self.brain.stop_handoff_half()

        # and get rows enqueued from old nodes
        for server_type in ('container-replicator', 'container-updater'):
            server = Manager([server_type])
            tuple(server.once(number=n + 1) for n in old_container_node_ids)

        # verify entry in the queue for the "misplaced" new_policy
        for container in int_client.iter_containers('.misplaced_objects'):
            for obj in int_client.iter_objects('.misplaced_objects',
                                               container['name']):
                expected = '%d:/%s/%s/%s' % (new_policy, self.account,
                                             self.container_name,
                                             self.object_name)
                self.assertEqual(obj['name'], expected)

        Manager(['container-reconciler']).once()

        # verify object in old_policy
        int_client.get_object_metadata(
            self.account, self.container_name, self.object_name,
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})

        # verify object is *not* in new_policy
        int_client.get_object_metadata(
            self.account, self.container_name, self.object_name,
            acceptable_statuses=(4,),
            headers={'X-Backend-Storage-Policy-Index': int(new_policy)})

        self.get_to_final_state()

        # verify entry in the queue
        for container in int_client.iter_containers('.misplaced_objects'):
            for obj in int_client.iter_objects('.misplaced_objects',
                                               container['name']):
                expected = '%d:/%s/%s/%s' % (old_policy, self.account,
                                             self.container_name,
                                             self.object_name)
                self.assertEqual(obj['name'], expected)

        Manager(['container-reconciler']).once()

        # and now it flops back
        int_client.get_object_metadata(
            self.account, self.container_name, self.object_name,
            headers={'X-Backend-Storage-Policy-Index': int(new_policy)})
        int_client.get_object_metadata(
            self.account, self.container_name, self.object_name,
            acceptable_statuses=(4,),
#.........这里部分代码省略.........
开发者ID:SmartInfrastructures,项目名称:swift,代码行数:103,代码来源:test_container_merge_policy_index.py

示例10: ProbeTest

# 需要导入模块: from swift.common.manager import Manager [as 别名]
# 或者: from swift.common.manager.Manager import once [as 别名]
class ProbeTest(unittest.TestCase):
    """
    Don't instantiate this directly, use a child class instead.
    """

    def setUp(self):
        resetswift()
        self.pids = {}
        try:
            self.ipport2server = {}
            self.configs = defaultdict(dict)
            self.account_ring = get_ring(
                "account",
                self.acct_cont_required_replicas,
                self.acct_cont_required_devices,
                ipport2server=self.ipport2server,
                config_paths=self.configs,
            )
            self.container_ring = get_ring(
                "container",
                self.acct_cont_required_replicas,
                self.acct_cont_required_devices,
                ipport2server=self.ipport2server,
                config_paths=self.configs,
            )
            self.policy = get_policy(**self.policy_requirements)
            self.object_ring = get_ring(
                self.policy.ring_name,
                self.obj_required_replicas,
                self.obj_required_devices,
                server="object",
                ipport2server=self.ipport2server,
                config_paths=self.configs,
            )

            self.servers_per_port = any(
                int(readconf(c, section_name="object-replicator").get("servers_per_port", "0"))
                for c in self.configs["object-replicator"].values()
            )

            Manager(["main"]).start(wait=False)
            for ipport in self.ipport2server:
                check_server(ipport, self.ipport2server, self.pids)
            proxy_ipport = ("127.0.0.1", 8080)
            self.ipport2server[proxy_ipport] = "proxy"
            self.url, self.token, self.account = check_server(proxy_ipport, self.ipport2server, self.pids)
            self.replicators = Manager(["account-replicator", "container-replicator", "object-replicator"])
            self.updaters = Manager(["container-updater", "object-updater"])
        except BaseException:
            try:
                raise
            finally:
                try:
                    Manager(["all"]).kill()
                except Exception:
                    pass

    def tearDown(self):
        Manager(["all"]).kill()

    def device_dir(self, server, node):
        server_type, config_number = get_server_number((node["ip"], node["port"]), self.ipport2server)
        repl_server = "%s-replicator" % server_type
        conf = readconf(self.configs[repl_server][config_number], section_name=repl_server)
        return os.path.join(conf["devices"], node["device"])

    def storage_dir(self, server, node, part=None, policy=None):
        policy = policy or self.policy
        device_path = self.device_dir(server, node)
        path_parts = [device_path, get_data_dir(policy)]
        if part is not None:
            path_parts.append(str(part))
        return os.path.join(*path_parts)

    def config_number(self, node):
        _server_type, config_number = get_server_number((node["ip"], node["port"]), self.ipport2server)
        return config_number

    def is_local_to(self, node1, node2):
        """
        Return True if both ring devices are "local" to each other (on the same
        "server".
        """
        if self.servers_per_port:
            return node1["ip"] == node2["ip"]

        # Without a disambiguating IP, for SAIOs, we have to assume ports
        # uniquely identify "servers".  SAIOs should be configured to *either*
        # have unique IPs per node (e.g. 127.0.0.1, 127.0.0.2, etc.) OR unique
        # ports per server (i.e. sdb1 & sdb5 would have same port numbers in
        # the 8-disk EC ring).
        return node1["port"] == node2["port"]

    def get_to_final_state(self):
        # these .stop()s are probably not strictly necessary,
        # but may prevent race conditions
        self.replicators.stop()
        self.updaters.stop()

        self.replicators.once()
#.........这里部分代码省略.........
开发者ID:heemanshu,项目名称:swift_liberty,代码行数:103,代码来源:common.py

示例11: TestReconstructorRevert

# 需要导入模块: from swift.common.manager import Manager [as 别名]
# 或者: from swift.common.manager.Manager import once [as 别名]

#.........这里部分代码省略.........
                self.direct_get(onode, opart)
            except direct_client.DirectClientException as err:
                self.assertEqual(err.http_status, 507)
            else:
                self.fail('Node data on %r was not fully destroyed!' %
                          (onode,))

        # now take out another primary
        p_dev3 = self.device_dir('object', onodes[2])
        self.kill_drive(p_dev3)

        # this node can't servce the data any more
        try:
            self.direct_get(onodes[2], opart)
        except direct_client.DirectClientException as err:
            self.assertEqual(err.http_status, 507)
        else:
            self.fail('Node data on %r was not fully destroyed!' %
                      (onode,))

        # make sure we can still GET the object and its correct
        # we're now pulling from handoffs and reconstructing
        etag = self.proxy_get()
        self.assertEqual(etag, contents.etag)

        # rename the dev dirs so they don't 507 anymore
        self.revive_drive(p_dev1)
        self.revive_drive(p_dev2)
        self.revive_drive(p_dev3)

        # fire up reconstructor on handoff nodes only
        for hnode in hnodes:
            hnode_id = (hnode['port'] - 6000) / 10
            self.reconstructor.once(number=hnode_id)

        # first three primaries have data again
        for onode in (onodes[0], onodes[2]):
            self.direct_get(onode, opart)

        # check meta
        meta = client.head_object(self.url, self.token,
                                  self.container_name,
                                  self.object_name)
        for key in headers_post:
            self.assertIn(key, meta)
            self.assertEqual(meta[key], headers_post[key])

        # handoffs are empty
        for hnode in hnodes:
            try:
                self.direct_get(hnode, opart)
            except direct_client.DirectClientException as err:
                self.assertEqual(err.http_status, 404)
            else:
                self.fail('Node data on %r was not fully destroyed!' %
                          (hnode,))

    def test_delete_propagate(self):
        # create EC container
        headers = {'X-Storage-Policy': self.policy.name}
        client.put_container(self.url, self.token, self.container_name,
                             headers=headers)

        # get our node lists
        opart, onodes = self.object_ring.get_nodes(
            self.account, self.container_name, self.object_name)
开发者ID:chenzhongtao,项目名称:swift,代码行数:70,代码来源:test_reconstructor_revert.py

示例12: TestObjectExpirer

# 需要导入模块: from swift.common.manager import Manager [as 别名]
# 或者: from swift.common.manager.Manager import once [as 别名]
class TestObjectExpirer(ReplProbeTest):
    def setUp(self):
        if len(ENABLED_POLICIES) < 2:
            raise SkipTest("Need more than one policy")

        self.expirer = Manager(["object-expirer"])
        self.expirer.start()
        err = self.expirer.stop()
        if err:
            raise SkipTest("Unable to verify object-expirer service")

        conf_files = []
        for server in self.expirer.servers:
            conf_files.extend(server.conf_files())
        conf_file = conf_files[0]
        self.client = InternalClient(conf_file, "probe-test", 3)

        super(TestObjectExpirer, self).setUp()
        self.container_name = "container-%s" % uuid.uuid4()
        self.object_name = "object-%s" % uuid.uuid4()
        self.brain = BrainSplitter(self.url, self.token, self.container_name, self.object_name)

    def test_expirer_object_split_brain(self):
        old_policy = random.choice(ENABLED_POLICIES)
        wrong_policy = random.choice([p for p in ENABLED_POLICIES if p != old_policy])
        # create an expiring object and a container with the wrong policy
        self.brain.stop_primary_half()
        self.brain.put_container(int(old_policy))
        self.brain.put_object(headers={"X-Delete-After": 2})
        # get the object timestamp
        metadata = self.client.get_object_metadata(
            self.account,
            self.container_name,
            self.object_name,
            headers={"X-Backend-Storage-Policy-Index": int(old_policy)},
        )
        create_timestamp = Timestamp(metadata["x-timestamp"])
        self.brain.start_primary_half()
        # get the expiring object updates in their queue, while we have all
        # the servers up
        Manager(["object-updater"]).once()
        self.brain.stop_handoff_half()
        self.brain.put_container(int(wrong_policy))
        # don't start handoff servers, only wrong policy is available

        # make sure auto-created containers get in the account listing
        Manager(["container-updater"]).once()
        # this guy should no-op since it's unable to expire the object
        self.expirer.once()

        self.brain.start_handoff_half()
        self.get_to_final_state()

        # validate object is expired
        found_in_policy = None
        metadata = self.client.get_object_metadata(
            self.account,
            self.container_name,
            self.object_name,
            acceptable_statuses=(4,),
            headers={"X-Backend-Storage-Policy-Index": int(old_policy)},
        )
        self.assertTrue("x-backend-timestamp" in metadata)
        self.assertEqual(Timestamp(metadata["x-backend-timestamp"]), create_timestamp)

        # but it is still in the listing
        for obj in self.client.iter_objects(self.account, self.container_name):
            if self.object_name == obj["name"]:
                break
        else:
            self.fail("Did not find listing for %s" % self.object_name)

        # clear proxy cache
        client.post_container(self.url, self.token, self.container_name, {})
        # run the expirier again after replication
        self.expirer.once()

        # object is not in the listing
        for obj in self.client.iter_objects(self.account, self.container_name):
            if self.object_name == obj["name"]:
                self.fail("Found listing for %s" % self.object_name)

        # and validate object is tombstoned
        found_in_policy = None
        for policy in ENABLED_POLICIES:
            metadata = self.client.get_object_metadata(
                self.account,
                self.container_name,
                self.object_name,
                acceptable_statuses=(4,),
                headers={"X-Backend-Storage-Policy-Index": int(policy)},
            )
            if "x-backend-timestamp" in metadata:
                if found_in_policy:
                    self.fail("found object in %s and also %s" % (found_in_policy, policy))
                found_in_policy = policy
                self.assertTrue("x-backend-timestamp" in metadata)
                self.assertTrue(Timestamp(metadata["x-backend-timestamp"]) > create_timestamp)
开发者ID:iloveyou416068,项目名称:swift-1,代码行数:100,代码来源:test_object_expirer.py

示例13: ProbeTest

# 需要导入模块: from swift.common.manager import Manager [as 别名]
# 或者: from swift.common.manager.Manager import once [as 别名]

#.........这里部分代码省略.........
        policy = policy or self.policy
        device_path = self.device_dir(server, node)
        path_parts = [device_path, get_data_dir(policy)]
        if part is not None:
            path_parts.append(str(part))
        return os.path.join(*path_parts)

    def config_number(self, node):
        _server_type, config_number = get_server_number(
            (node['ip'], node['port']), self.ipport2server)
        return config_number

    def is_local_to(self, node1, node2):
        """
        Return True if both ring devices are "local" to each other (on the same
        "server".
        """
        if self.servers_per_port:
            return node1['ip'] == node2['ip']

        # Without a disambiguating IP, for SAIOs, we have to assume ports
        # uniquely identify "servers".  SAIOs should be configured to *either*
        # have unique IPs per node (e.g. 127.0.0.1, 127.0.0.2, etc.) OR unique
        # ports per server (i.e. sdb1 & sdb5 would have same port numbers in
        # the 8-disk EC ring).
        return node1['port'] == node2['port']

    def get_to_final_state(self):
        # these .stop()s are probably not strictly necessary,
        # but may prevent race conditions
        self.replicators.stop()
        self.updaters.stop()

        self.replicators.once()
        self.updaters.once()
        self.replicators.once()

    def kill_drive(self, device):
        if os.path.ismount(device):
            os.system('sudo umount %s' % device)
        else:
            renamer(device, device + "X")

    def revive_drive(self, device):
        disabled_name = device + "X"
        if os.path.isdir(disabled_name):
            renamer(disabled_name, device)
        else:
            os.system('sudo mount %s' % device)

    def make_internal_client(self):
        tempdir = mkdtemp()
        try:
            conf_path = os.path.join(tempdir, 'internal_client.conf')
            conf_body = """
            [DEFAULT]
            swift_dir = /etc/swift

            [pipeline:main]
            pipeline = catch_errors cache copy proxy-server

            [app:proxy-server]
            use = egg:swift#proxy

            [filter:copy]
            use = egg:swift#copy
开发者ID:jgmerritt,项目名称:swift,代码行数:70,代码来源:common.py

示例14: TestReconstructorRebuild

# 需要导入模块: from swift.common.manager import Manager [as 别名]
# 或者: from swift.common.manager.Manager import once [as 别名]

#.........这里部分代码省略.........
                                 for (node, exc) in failures]))
        return frag_headers, frag_etags

    @contextmanager
    def _annotate_failure_with_scenario(self, failed, non_durable):
        try:
            yield
        except (AssertionError, ClientException) as err:
            self.fail(
                'Scenario with failed nodes: %r, non-durable nodes: %r\n'
                ' failed with:\n%s' %
                ([self._format_node(self.onodes[n]) for n in failed],
                 [self._format_node(self.onodes[n]) for n in non_durable], err)
            )

    def _test_rebuild_scenario(self, failed, non_durable,
                               reconstructor_cycles):
        # helper method to test a scenario with some nodes missing their
        # fragment and some nodes having non-durable fragments
        with self._annotate_failure_with_scenario(failed, non_durable):
            self._break_nodes(failed, non_durable)

        # make sure we can still GET the object and it is correct; the
        # proxy is doing decode on remaining fragments to get the obj
        with self._annotate_failure_with_scenario(failed, non_durable):
            headers, etag = self.proxy_get()
            self.assertEqual(self.etag, etag)
            for key in self.headers_post:
                self.assertIn(key, headers)
                self.assertEqual(self.headers_post[key], headers[key])

        # fire up reconstructor
        for i in range(reconstructor_cycles):
            self.reconstructor.once()

        # check GET via proxy returns expected data and metadata
        with self._annotate_failure_with_scenario(failed, non_durable):
            headers, etag = self.proxy_get()
            self.assertEqual(self.etag, etag)
            for key in self.headers_post:
                self.assertIn(key, headers)
                self.assertEqual(self.headers_post[key], headers[key])
        # check all frags are intact, durable and have expected metadata
        with self._annotate_failure_with_scenario(failed, non_durable):
            frag_headers, frag_etags = self._assert_all_nodes_have_frag()
            self.assertEqual(self.frag_etags, frag_etags)
            # self._frag_headers include X-Backend-Durable-Timestamp so this
            # assertion confirms that the rebuilt frags are all durable
            self.assertEqual(self.frag_headers, frag_headers)

    def test_rebuild_missing_frags(self):
        # build up a list of node lists to kill data from,
        # first try a single node
        # then adjacent nodes and then nodes >1 node apart
        single_node = (random.randint(0, 5),)
        adj_nodes = (0, 5)
        far_nodes = (0, 4)

        for failed_nodes in [single_node, adj_nodes, far_nodes]:
            self._test_rebuild_scenario(failed_nodes, [], 1)

    def test_rebuild_non_durable_frags(self):
        # build up a list of node lists to make non-durable,
        # first try a single node
        # then adjacent nodes and then nodes >1 node apart
        single_node = (random.randint(0, 5),)
开发者ID:bebule,项目名称:swift,代码行数:70,代码来源:test_reconstructor_rebuild.py

示例15: ProbeTest

# 需要导入模块: from swift.common.manager import Manager [as 别名]
# 或者: from swift.common.manager.Manager import once [as 别名]

#.........这里部分代码省略.........
                ['account-replicator', 'container-replicator',
                 'object-replicator'])
            self.updaters = Manager(['container-updater', 'object-updater'])
        except BaseException:
            try:
                raise
            finally:
                try:
                    Manager(['all']).kill()
                except Exception:
                    pass

    def tearDown(self):
        Manager(['all']).kill()

    def device_dir(self, server, node):
        server_type, config_number = get_server_number(
            (node['ip'], node['port']), self.ipport2server)
        repl_server = '%s-replicator' % server_type
        conf = readconf(self.configs[repl_server][config_number],
                        section_name=repl_server)
        return os.path.join(conf['devices'], node['device'])

    def storage_dir(self, server, node, part=None, policy=None):
        policy = policy or self.policy
        device_path = self.device_dir(server, node)
        path_parts = [device_path, get_data_dir(policy)]
        if part is not None:
            path_parts.append(str(part))
        return os.path.join(*path_parts)

    def config_number(self, node):
        _server_type, config_number = get_server_number(
            (node['ip'], node['port']), self.ipport2server)
        return config_number

    def is_local_to(self, node1, node2):
        """
        Return True if both ring devices are "local" to each other (on the same
        "server".
        """
        if self.servers_per_port:
            return node1['ip'] == node2['ip']

        # Without a disambiguating IP, for SAIOs, we have to assume ports
        # uniquely identify "servers".  SAIOs should be configured to *either*
        # have unique IPs per node (e.g. 127.0.0.1, 127.0.0.2, etc.) OR unique
        # ports per server (i.e. sdb1 & sdb5 would have same port numbers in
        # the 8-disk EC ring).
        return node1['port'] == node2['port']

    def get_to_final_state(self):
        # these .stop()s are probably not strictly necessary,
        # but may prevent race conditions
        self.replicators.stop()
        self.updaters.stop()

        self.replicators.once()
        self.updaters.once()
        self.replicators.once()

    def kill_drive(self, device):
        if os.path.ismount(device):
            os.system('sudo umount %s' % device)
        else:
            renamer(device, device + "X")

    def revive_drive(self, device):
        disabled_name = device + "X"
        if os.path.isdir(disabled_name):
            renamer(device + "X", device)
        else:
            os.system('sudo mount %s' % device)

    def make_internal_client(self, object_post_as_copy=True):
        tempdir = mkdtemp()
        try:
            conf_path = os.path.join(tempdir, 'internal_client.conf')
            conf_body = """
            [DEFAULT]
            swift_dir = /etc/swift

            [pipeline:main]
            pipeline = catch_errors cache proxy-server

            [app:proxy-server]
            use = egg:swift#proxy
            object_post_as_copy = %s

            [filter:cache]
            use = egg:swift#memcache

            [filter:catch_errors]
            use = egg:swift#catch_errors
            """ % object_post_as_copy
            with open(conf_path, 'w') as f:
                f.write(dedent(conf_body))
            return internal_client.InternalClient(conf_path, 'test', 1)
        finally:
            shutil.rmtree(tempdir)
开发者ID:Ahiknsr,项目名称:swift,代码行数:104,代码来源:common.py


注:本文中的swift.common.manager.Manager.once方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。