本文整理汇总了Python中test.probe.common.get_to_final_state函数的典型用法代码示例。如果您正苦于以下问题:Python get_to_final_state函数的具体用法?Python get_to_final_state怎么用?Python get_to_final_state使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_to_final_state函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_two_nodes_fail
def test_two_nodes_fail(self):
# Create container1
# Kill container1 servers excepting one of the primaries
# Delete container1 directly to the one primary still up
# Restart other container1 servers
# Get to a final state
# Assert all container1 servers indicate container1 is gone (happens
# because the one node that knew about the delete replicated to the
# others.)
# Assert account level also indicates container1 is gone
container1 = 'container-%s' % uuid4()
cpart, cnodes = self.container_ring.get_nodes(self.account, container1)
client.put_container(self.url, self.token, container1)
cnp_port = kill_nonprimary_server(cnodes, self.port2server, self.pids)
kill_server(cnodes[0]['port'], self.port2server, self.pids)
kill_server(cnodes[1]['port'], self.port2server, self.pids)
direct_client.direct_delete_container(cnodes[2], cpart, self.account,
container1)
start_server(cnodes[0]['port'], self.port2server, self.pids)
start_server(cnodes[1]['port'], self.port2server, self.pids)
start_server(cnp_port, self.port2server, self.pids)
get_to_final_state()
for cnode in cnodes:
exc = None
try:
direct_client.direct_get_container(cnode, cpart, self.account,
container1)
except client.ClientException as err:
exc = err
self.assertEquals(exc.http_status, 404)
headers, containers = client.get_account(self.url, self.token)
self.assertEquals(headers['x-account-container-count'], '0')
self.assertEquals(headers['x-account-object-count'], '0')
self.assertEquals(headers['x-account-bytes-used'], '0')
示例2: test_one_node_fails
def test_one_node_fails(self):
# Create container1
# Kill container1 servers excepting two of the primaries
# Delete container1
# Restart other container1 primary server
# Create container1/object1 (allowed because at least server thinks the
# container exists)
# Get to a final state
# Assert all container1 servers indicate container1 is alive and
# well with object1
# Assert account level also indicates container1 is alive and
# well with object1
container1 = 'container-%s' % uuid4()
cpart, cnodes = self.container_ring.get_nodes(self.account, container1)
client.put_container(self.url, self.token, container1)
kill_nonprimary_server(cnodes, self.port2server, self.pids)
kill_server(cnodes[0]['port'], self.port2server, self.pids)
client.delete_container(self.url, self.token, container1)
start_server(cnodes[0]['port'], self.port2server, self.pids)
client.put_object(self.url, self.token, container1, 'object1', '123')
get_to_final_state()
for cnode in cnodes:
self.assertEquals(
[o['name'] for o in direct_client.direct_get_container(
cnode, cpart, self.account, container1)[1]],
['object1'])
headers, containers = client.get_account(self.url, self.token)
self.assertEquals(headers['x-account-container-count'], '1')
self.assertEquals(headers['x-account-object-count'], '1')
self.assertEquals(headers['x-account-bytes-used'], '3')
示例3: test_sync
def test_sync(self):
all_objects = []
# upload some containers
for policy in ENABLED_POLICIES:
container = 'container-%s-%s' % (policy.name, uuid.uuid4())
client.put_container(self.url, self.token, container,
headers={'X-Storage-Policy': policy.name})
obj = 'object-%s' % uuid.uuid4()
body = 'test-body'
client.put_object(self.url, self.token, container, obj, body)
all_objects.append((policy, container, obj))
Manager(['container-updater']).once()
headers = client.head_account(self.url, self.token)
self.assertEqual(int(headers['x-account-container-count']),
len(ENABLED_POLICIES))
self.assertEqual(int(headers['x-account-object-count']),
len(ENABLED_POLICIES))
self.assertEqual(int(headers['x-account-bytes-used']),
len(ENABLED_POLICIES) * len(body))
part, nodes = self.account_ring.get_nodes(self.account)
for node in nodes:
direct_delete_account(node, part, self.account)
Manager(['account-reaper']).once()
get_to_final_state()
for policy, container, obj in all_objects:
cpart, cnodes = self.container_ring.get_nodes(
self.account, container)
for cnode in cnodes:
try:
direct_head_container(cnode, cpart, self.account,
container)
except ClientException as err:
self.assertEquals(err.http_status, 404)
else:
self.fail('Found un-reaped /%s/%s on %r' %
(self.account, container, node))
object_ring = POLICIES.get_object_ring(policy.idx, '/etc/swift/')
part, nodes = object_ring.get_nodes(self.account, container, obj)
for node in nodes:
try:
direct_get_object(node, part, self.account,
container, obj)
except ClientException as err:
self.assertEquals(err.http_status, 404)
else:
self.fail('Found un-reaped /%s/%s/%s on %r in %s!' %
(self.account, container, obj, node, policy))
示例4: test_sysmeta_after_replication_with_prior_post
def test_sysmeta_after_replication_with_prior_post(self):
sysmeta = {'x-object-sysmeta-foo': 'sysmeta-foo'}
usermeta = {'x-object-meta-bar': 'meta-bar'}
self.brain.put_container(policy_index=0)
# put object
self._put_object()
# put user meta to first server subset
self.brain.stop_handoff_half()
self._post_object(headers=usermeta)
metadata = self._get_object_metadata()
for key in usermeta:
self.assertTrue(key in metadata)
self.assertEqual(metadata[key], usermeta[key])
self.brain.start_handoff_half()
# put newer object with sysmeta to second server subset
self.brain.stop_primary_half()
self._put_object(headers=sysmeta)
metadata = self._get_object_metadata()
for key in sysmeta:
self.assertTrue(key in metadata)
self.assertEqual(metadata[key], sysmeta[key])
self.brain.start_primary_half()
# run replicator
get_to_final_state()
# check stale user metadata is not replicated to first server subset
# and sysmeta is unchanged
self.brain.stop_primary_half()
metadata = self._get_object_metadata()
for key in sysmeta:
self.assertTrue(key in metadata)
self.assertEqual(metadata[key], sysmeta[key])
for key in usermeta:
self.assertFalse(key in metadata)
self.brain.start_primary_half()
# check stale user metadata is removed from second server subset
# and sysmeta is replicated
self.brain.stop_handoff_half()
metadata = self._get_object_metadata()
for key in sysmeta:
self.assertTrue(key in metadata)
self.assertEqual(metadata[key], sysmeta[key])
for key in usermeta:
self.assertFalse(key in metadata)
示例5: test_sysmeta_after_replication_with_subsequent_post
def test_sysmeta_after_replication_with_subsequent_post(self):
sysmeta = {'x-object-sysmeta-foo': 'sysmeta-foo'}
usermeta = {'x-object-meta-bar': 'meta-bar'}
self.brain.put_container(policy_index=0)
# put object
self._put_object()
# put newer object with sysmeta to first server subset
self.brain.stop_primary_half()
self._put_object(headers=sysmeta)
metadata = self._get_object_metadata()
for key in sysmeta:
self.assertTrue(key in metadata)
self.assertEqual(metadata[key], sysmeta[key])
self.brain.start_primary_half()
# post some user meta to second server subset
self.brain.stop_handoff_half()
self._post_object(usermeta)
metadata = self._get_object_metadata()
for key in usermeta:
self.assertTrue(key in metadata)
self.assertEqual(metadata[key], usermeta[key])
for key in sysmeta:
self.assertFalse(key in metadata)
self.brain.start_handoff_half()
# run replicator
get_to_final_state()
# check user metadata has been replicated to first server subset
# and sysmeta is unchanged
self.brain.stop_primary_half()
metadata = self._get_object_metadata()
expected = dict(sysmeta)
expected.update(usermeta)
for key in expected.keys():
self.assertTrue(key in metadata, key)
self.assertEqual(metadata[key], expected[key])
self.brain.start_primary_half()
# check user metadata and sysmeta both on second server subset
self.brain.stop_handoff_half()
metadata = self._get_object_metadata()
for key in expected.keys():
self.assertTrue(key in metadata, key)
self.assertEqual(metadata[key], expected[key])
示例6: test_object_delete_is_replicated
def test_object_delete_is_replicated(self):
self.brain.put_container(policy_index=0)
# put object
self._put_object()
# put newer object with sysmeta to first server subset
self.brain.stop_primary_half()
self._put_object()
self.brain.start_primary_half()
# delete object on second server subset
self.brain.stop_handoff_half()
self._delete_object()
self.brain.start_handoff_half()
# run replicator
get_to_final_state()
# check object deletion has been replicated on first server set
self.brain.stop_primary_half()
self._get_object(expect_statuses=(4,))
self.brain.start_primary_half()
# check object deletion persists on second server set
self.brain.stop_handoff_half()
self._get_object(expect_statuses=(4,))
# put newer object to second server set
self._put_object()
self.brain.start_handoff_half()
# run replicator
get_to_final_state()
# check new object has been replicated on first server set
self.brain.stop_primary_half()
self._get_object()
self.brain.start_primary_half()
# check new object persists on second server set
self.brain.stop_handoff_half()
self._get_object()
示例7: test_first_node_fail
def test_first_node_fail(self):
container = 'container-%s' % uuid4()
client.put_container(self.url, self.token, container)
self.assert_(container in [c['name'] for c in
client.get_account(self.url, self.token)[1]])
object1 = 'object1'
client.put_object(self.url, self.token, container, object1, 'test')
self.assert_(container in [c['name'] for c in
client.get_account(self.url, self.token)[1]])
self.assert_(object1 in [o['name'] for o in
client.get_container(self.url, self.token, container)[1]])
cpart, cnodes = self.container_ring.get_nodes(self.account, container)
kill(self.pids[self.port2server[cnodes[0]['port']]], SIGTERM)
client.delete_object(self.url, self.token, container, object1)
self.assert_(container in [c['name'] for c in
client.get_account(self.url, self.token)[1]])
self.assert_(object1 not in [o['name'] for o in
client.get_container(self.url, self.token, container)[1]])
self.pids[self.port2server[cnodes[0]['port']]] = \
Popen(['chase-container-server',
'/etc/chase/container-server/%d.conf' %
((cnodes[0]['port'] - 6001) / 10)]).pid
sleep(2)
self.assert_(container in [c['name'] for c in
client.get_account(self.url, self.token)[1]])
# This okay because the first node hasn't got the update that the
# object was deleted yet.
self.assert_(object1 in [o['name'] for o in
direct_client.direct_get_container(cnodes[0], cpart,
self.account, container)[1]])
# Unfortunately, the following might pass or fail, depending on the
# position of the account server associated with the first container
# server we had killed. If the associated happens to be the first
# account server, this'll pass, otherwise the first account server will
# serve the listing and not have the container.
# self.assert_(container in [c['name'] for c in
# client.get_account(self.url, self.token)[1]])
object2 = 'object2'
# This will work because at least one (in this case, just one) account
# server has to indicate the container exists for the put to continue.
client.put_object(self.url, self.token, container, object2, 'test')
# First node still doesn't know object1 was deleted yet; this is okay.
self.assert_(object1 in [o['name'] for o in
direct_client.direct_get_container(cnodes[0], cpart,
self.account, container)[1]])
# And, of course, our new object2 exists.
self.assert_(object2 in [o['name'] for o in
client.get_container(self.url, self.token, container)[1]])
get_to_final_state()
# Our container delete never "finalized" because we started using it
# before the delete settled.
self.assert_(container in [c['name'] for c in
client.get_account(self.url, self.token)[1]])
# And, so our object2 should still exist and object1's delete should
# have finalized.
self.assert_(object1 not in [o['name'] for o in
client.get_container(self.url, self.token, container)[1]])
self.assert_(object2 in [o['name'] for o in
client.get_container(self.url, self.token, container)[1]])
示例8: one
# serve the listing and not have the container.
# self.assert_(container in [c['name'] for c in
# client.get_account(self.url, self.token)[1]])
object2 = 'object2'
# This will work because at least one (in this case, just one) account
# server has to indicate the container exists for the put to continue.
client.put_object(self.url, self.token, container, object2, 'test')
self.assert_(object1 not in [o['name'] for o in
direct_client.direct_get_container(cnodes[0], cpart,
self.account, container)[1]])
# And, of course, our new object2 exists.
self.assert_(object2 in [o['name'] for o in
client.get_container(self.url, self.token, container)[1]])
get_to_final_state()
# Our container delete never "finalized" because we started using it
# before the delete settled.
self.assert_(container in [c['name'] for c in
client.get_account(self.url, self.token)[1]])
# And, so our object2 should still exist and object1's delete should
# have finalized.
self.assert_(object1 not in [o['name'] for o in
client.get_container(self.url, self.token, container)[1]])
self.assert_(object2 in [o['name'] for o in
client.get_container(self.url, self.token, container)[1]])
def _get_db_file_path(self, obj_dir):
files = sorted(os.listdir(obj_dir), reverse=True)
for file in files:
if file.endswith('db'):
示例9: test_main
def test_main(self):
container1 = 'container1'
client.put_container(self.url, self.token, container1)
container2 = 'container2'
client.put_container(self.url, self.token, container2)
headers, containers = client.get_account(self.url, self.token)
self.assertEquals(headers['x-account-container-count'], '2')
self.assertEquals(headers['x-account-object-count'], '0')
self.assertEquals(headers['x-account-bytes-used'], '0')
found1 = False
found2 = False
for c in containers:
if c['name'] == container1:
found1 = True
self.assertEquals(c['count'], 0)
self.assertEquals(c['bytes'], 0)
elif c['name'] == container2:
found2 = True
self.assertEquals(c['count'], 0)
self.assertEquals(c['bytes'], 0)
self.assert_(found1)
self.assert_(found2)
client.put_object(self.url, self.token, container2, 'object1', '1234')
headers, containers = client.get_account(self.url, self.token)
self.assertEquals(headers['x-account-container-count'], '2')
self.assertEquals(headers['x-account-object-count'], '0')
self.assertEquals(headers['x-account-bytes-used'], '0')
found1 = False
found2 = False
for c in containers:
if c['name'] == container1:
found1 = True
self.assertEquals(c['count'], 0)
self.assertEquals(c['bytes'], 0)
elif c['name'] == container2:
found2 = True
self.assertEquals(c['count'], 0)
self.assertEquals(c['bytes'], 0)
self.assert_(found1)
self.assert_(found2)
get_to_final_state()
headers, containers = client.get_account(self.url, self.token)
self.assertEquals(headers['x-account-container-count'], '2')
self.assertEquals(headers['x-account-object-count'], '1')
self.assertEquals(headers['x-account-bytes-used'], '4')
found1 = False
found2 = False
for c in containers:
if c['name'] == container1:
found1 = True
self.assertEquals(c['count'], 0)
self.assertEquals(c['bytes'], 0)
elif c['name'] == container2:
found2 = True
self.assertEquals(c['count'], 1)
self.assertEquals(c['bytes'], 4)
self.assert_(found1)
self.assert_(found2)
apart, anodes = self.account_ring.get_nodes(self.account)
kill(self.pids[self.port2server[anodes[0]['port']]], SIGTERM)
client.delete_container(self.url, self.token, container1)
client.put_object(self.url, self.token, container2, 'object2', '12345')
headers, containers = client.get_account(self.url, self.token)
self.assertEquals(headers['x-account-container-count'], '1')
self.assertEquals(headers['x-account-object-count'], '1')
self.assertEquals(headers['x-account-bytes-used'], '4')
found1 = False
found2 = False
for c in containers:
if c['name'] == container1:
found1 = True
elif c['name'] == container2:
found2 = True
self.assertEquals(c['count'], 1)
self.assertEquals(c['bytes'], 4)
self.assert_(not found1)
self.assert_(found2)
ps = []
for n in xrange(1, 5):
ps.append(Popen(['swift-container-updater',
'/etc/swift/container-server/%d.conf' % n,
'once']))
for p in ps:
p.wait()
headers, containers = client.get_account(self.url, self.token)
self.assertEquals(headers['x-account-container-count'], '1')
self.assertEquals(headers['x-account-object-count'], '2')
self.assertEquals(headers['x-account-bytes-used'], '9')
found1 = False
found2 = False
for c in containers:
if c['name'] == container1:
found1 = True
elif c['name'] == container2:
found2 = True
#.........这里部分代码省略.........
示例10: test_main
def test_main(self):
# Create container1 and container2
container1 = 'container1'
client.put_container(self.url, self.token, container1)
container2 = 'container2'
client.put_container(self.url, self.token, container2)
# Assert account level sees them
headers, containers = client.get_account(self.url, self.token)
self.assertEquals(headers['x-account-container-count'], '2')
self.assertEquals(headers['x-account-object-count'], '0')
self.assertEquals(headers['x-account-bytes-used'], '0')
found1 = False
found2 = False
for container in containers:
if container['name'] == container1:
found1 = True
self.assertEquals(container['count'], 0)
self.assertEquals(container['bytes'], 0)
elif container['name'] == container2:
found2 = True
self.assertEquals(container['count'], 0)
self.assertEquals(container['bytes'], 0)
self.assert_(found1)
self.assert_(found2)
# Create container2/object1
client.put_object(self.url, self.token, container2, 'object1', '1234')
# Assert account level doesn't see it yet
headers, containers = client.get_account(self.url, self.token)
self.assertEquals(headers['x-account-container-count'], '2')
self.assertEquals(headers['x-account-object-count'], '0')
self.assertEquals(headers['x-account-bytes-used'], '0')
found1 = False
found2 = False
for container in containers:
if container['name'] == container1:
found1 = True
self.assertEquals(container['count'], 0)
self.assertEquals(container['bytes'], 0)
elif container['name'] == container2:
found2 = True
self.assertEquals(container['count'], 0)
self.assertEquals(container['bytes'], 0)
self.assert_(found1)
self.assert_(found2)
# Get to final state
get_to_final_state()
# Assert account level now sees the container2/object1
headers, containers = client.get_account(self.url, self.token)
self.assertEquals(headers['x-account-container-count'], '2')
self.assertEquals(headers['x-account-object-count'], '1')
self.assertEquals(headers['x-account-bytes-used'], '4')
found1 = False
found2 = False
for container in containers:
if container['name'] == container1:
found1 = True
self.assertEquals(container['count'], 0)
self.assertEquals(container['bytes'], 0)
elif container['name'] == container2:
found2 = True
self.assertEquals(container['count'], 1)
self.assertEquals(container['bytes'], 4)
self.assert_(found1)
self.assert_(found2)
apart, anodes = self.account_ring.get_nodes(self.account)
kill_nonprimary_server(anodes, self.port2server, self.pids)
kill_server(anodes[0]['port'], self.port2server, self.pids)
# Kill account servers excepting two of the primaries
# Delete container1
client.delete_container(self.url, self.token, container1)
# Put container2/object2
client.put_object(self.url, self.token, container2, 'object2', '12345')
# Assert account level knows container1 is gone but doesn't know about
# container2/object2 yet
headers, containers = client.get_account(self.url, self.token)
self.assertEquals(headers['x-account-container-count'], '1')
self.assertEquals(headers['x-account-object-count'], '1')
self.assertEquals(headers['x-account-bytes-used'], '4')
found1 = False
found2 = False
for container in containers:
if container['name'] == container1:
found1 = True
elif container['name'] == container2:
found2 = True
self.assertEquals(container['count'], 1)
self.assertEquals(container['bytes'], 4)
self.assert_(not found1)
self.assert_(found2)
# Run container updaters
#.........这里部分代码省略.........
示例11: test_merge_storage_policy_index
def test_merge_storage_policy_index(self):
# generic split brain
self.brain.stop_primary_half()
self.brain.put_container()
self.brain.start_primary_half()
self.brain.stop_handoff_half()
self.brain.put_container()
self.brain.put_object()
self.brain.start_handoff_half()
# make sure we have some manner of split brain
container_part, container_nodes = self.container_ring.get_nodes(
self.account, self.container_name)
head_responses = []
for node in container_nodes:
metadata = direct_client.direct_head_container(
node, container_part, self.account, self.container_name)
head_responses.append((node, metadata))
found_policy_indexes = \
set(metadata['X-Backend-Storage-Policy-Index'] for
node, metadata in head_responses)
self.assert_(len(found_policy_indexes) > 1,
'primary nodes did not disagree about policy index %r' %
head_responses)
# find our object
orig_policy_index = None
for policy_index in found_policy_indexes:
object_ring = POLICIES.get_object_ring(policy_index, '/etc/swift')
part, nodes = object_ring.get_nodes(
self.account, self.container_name, self.object_name)
for node in nodes:
try:
direct_client.direct_head_object(
node, part, self.account, self.container_name,
self.object_name,
headers={'X-Backend-Storage-Policy-Index':
policy_index})
except direct_client.ClientException as err:
continue
orig_policy_index = policy_index
break
if orig_policy_index is not None:
break
else:
self.fail('Unable to find /%s/%s/%s in %r' % (
self.account, self.container_name, self.object_name,
found_policy_indexes))
get_to_final_state()
Manager(['container-reconciler']).once()
# validate containers
head_responses = []
for node in container_nodes:
metadata = direct_client.direct_head_container(
node, container_part, self.account, self.container_name)
head_responses.append((node, metadata))
found_policy_indexes = \
set(metadata['X-Backend-Storage-Policy-Index'] for
node, metadata in head_responses)
self.assert_(len(found_policy_indexes) == 1,
'primary nodes disagree about policy index %r' %
head_responses)
expected_policy_index = found_policy_indexes.pop()
self.assertNotEqual(orig_policy_index, expected_policy_index)
# validate object placement
orig_policy_ring = POLICIES.get_object_ring(orig_policy_index,
'/etc/swift')
for node in orig_policy_ring.devs:
try:
direct_client.direct_head_object(
node, part, self.account, self.container_name,
self.object_name, headers={
'X-Backend-Storage-Policy-Index': orig_policy_index})
except direct_client.ClientException as err:
if err.http_status == HTTP_NOT_FOUND:
continue
raise
else:
self.fail('Found /%s/%s/%s in %s' % (
self.account, self.container_name, self.object_name,
orig_policy_index))
# use proxy to access object (bad container info might be cached...)
timeout = time.time() + TIMEOUT
while time.time() < timeout:
try:
metadata = client.head_object(self.url, self.token,
self.container_name,
self.object_name)
except ClientException as err:
if err.http_status != HTTP_NOT_FOUND:
raise
time.sleep(1)
else:
break
else:
self.fail('could not HEAD /%s/%s/%s/ from policy %s '
'after %s seconds.' % (
self.account, self.container_name, self.object_name,
expected_policy_index, TIMEOUT))
示例12: test_reconciler_move_object_twice
def test_reconciler_move_object_twice(self):
# select some policies
old_policy = random.choice(ENABLED_POLICIES)
new_policy = random.choice([p for p in ENABLED_POLICIES
if p != old_policy])
# setup a split brain
self.brain.stop_handoff_half()
# get old_policy on two primaries
self.brain.put_container(policy_index=int(old_policy))
self.brain.start_handoff_half()
self.brain.stop_primary_half()
# force a recreate on handoffs
self.brain.put_container(policy_index=int(old_policy))
self.brain.delete_container()
self.brain.put_container(policy_index=int(new_policy))
self.brain.put_object() # populate memcache with new_policy
self.brain.start_primary_half()
# at this point two primaries have old policy
container_part, container_nodes = self.container_ring.get_nodes(
self.account, self.container_name)
head_responses = []
for node in container_nodes:
metadata = direct_client.direct_head_container(
node, container_part, self.account, self.container_name)
head_responses.append((node, metadata))
old_container_node_ids = [
node['id'] for node, metadata in head_responses
if int(old_policy) ==
int(metadata['X-Backend-Storage-Policy-Index'])]
self.assertEqual(2, len(old_container_node_ids))
# hopefully memcache still has the new policy cached
self.brain.put_object()
# double-check object correctly written to new policy
conf_files = []
for server in Manager(['container-reconciler']).servers:
conf_files.extend(server.conf_files())
conf_file = conf_files[0]
client = InternalClient(conf_file, 'probe-test', 3)
client.get_object_metadata(
self.account, self.container_name, self.object_name,
headers={'X-Backend-Storage-Policy-Index': int(new_policy)})
client.get_object_metadata(
self.account, self.container_name, self.object_name,
acceptable_statuses=(4,),
headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
# shutdown the containers that know about the new policy
self.brain.stop_handoff_half()
# and get rows enqueued from old nodes
for server_type in ('container-replicator', 'container-updater'):
server = Manager([server_type])
tuple(server.once(number=n + 1) for n in old_container_node_ids)
# verify entry in the queue for the "misplaced" new_policy
for container in client.iter_containers('.misplaced_objects'):
for obj in client.iter_objects('.misplaced_objects',
container['name']):
expected = '%d:/%s/%s/%s' % (new_policy, self.account,
self.container_name,
self.object_name)
self.assertEqual(obj['name'], expected)
Manager(['container-reconciler']).once()
# verify object in old_policy
client.get_object_metadata(
self.account, self.container_name, self.object_name,
headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
# verify object is *not* in new_policy
client.get_object_metadata(
self.account, self.container_name, self.object_name,
acceptable_statuses=(4,),
headers={'X-Backend-Storage-Policy-Index': int(new_policy)})
get_to_final_state()
# verify entry in the queue
client = InternalClient(conf_file, 'probe-test', 3)
for container in client.iter_containers('.misplaced_objects'):
for obj in client.iter_objects('.misplaced_objects',
container['name']):
expected = '%d:/%s/%s/%s' % (old_policy, self.account,
self.container_name,
self.object_name)
self.assertEqual(obj['name'], expected)
Manager(['container-reconciler']).once()
# and now it flops back
client.get_object_metadata(
self.account, self.container_name, self.object_name,
headers={'X-Backend-Storage-Policy-Index': int(new_policy)})
client.get_object_metadata(
self.account, self.container_name, self.object_name,
acceptable_statuses=(4,),
#.........这里部分代码省略.........
示例13: test_reconcile_manifest
def test_reconcile_manifest(self):
manifest_data = []
def write_part(i):
body = 'VERIFY%0.2d' % i + '\x00' * 1048576
part_name = 'manifest_part_%0.2d' % i
manifest_entry = {
"path": "/%s/%s" % (self.container_name, part_name),
"etag": md5(body).hexdigest(),
"size_bytes": len(body),
}
client.put_object(self.url, self.token, self.container_name,
part_name, contents=body)
manifest_data.append(manifest_entry)
# get an old container stashed
self.brain.stop_primary_half()
policy = random.choice(ENABLED_POLICIES)
self.brain.put_container(policy.idx)
self.brain.start_primary_half()
# write some parts
for i in range(10):
write_part(i)
self.brain.stop_handoff_half()
wrong_policy = random.choice([p for p in ENABLED_POLICIES
if p is not policy])
self.brain.put_container(wrong_policy.idx)
# write some more parts
for i in range(10, 20):
write_part(i)
# write manifest
try:
client.put_object(self.url, self.token, self.container_name,
self.object_name,
contents=utils.json.dumps(manifest_data),
query_string='multipart-manifest=put')
except ClientException as err:
# so as it works out, you can't really upload a multi-part
# manifest for objects that are currently misplaced - you have to
# wait until they're all available - which is about the same as
# some other failure that causes data to be unavailable to the
# proxy at the time of upload
self.assertEqual(err.http_status, 400)
# but what the heck, we'll sneak one in just to see what happens...
direct_manifest_name = self.object_name + '-direct-test'
object_ring = POLICIES.get_object_ring(wrong_policy.idx, '/etc/swift')
part, nodes = object_ring.get_nodes(
self.account, self.container_name, direct_manifest_name)
container_part = self.container_ring.get_part(self.account,
self.container_name)
def translate_direct(data):
return {
'hash': data['etag'],
'bytes': data['size_bytes'],
'name': data['path'],
}
direct_manifest_data = map(translate_direct, manifest_data)
headers = {
'x-container-host': ','.join('%s:%s' % (n['ip'], n['port']) for n
in self.container_ring.devs),
'x-container-device': ','.join(n['device'] for n in
self.container_ring.devs),
'x-container-partition': container_part,
'X-Backend-Storage-Policy-Index': wrong_policy.idx,
'X-Static-Large-Object': 'True',
}
for node in nodes:
direct_client.direct_put_object(
node, part, self.account, self.container_name,
direct_manifest_name,
contents=utils.json.dumps(direct_manifest_data),
headers=headers)
break # one should do it...
self.brain.start_handoff_half()
get_to_final_state()
Manager(['container-reconciler']).once()
# clear proxy cache
client.post_container(self.url, self.token, self.container_name, {})
# let's see how that direct upload worked out...
metadata, body = client.get_object(
self.url, self.token, self.container_name, direct_manifest_name,
query_string='multipart-manifest=get')
self.assertEqual(metadata['x-static-large-object'].lower(), 'true')
for i, entry in enumerate(utils.json.loads(body)):
for key in ('hash', 'bytes', 'name'):
self.assertEquals(entry[key], direct_manifest_data[i][key])
metadata, body = client.get_object(
self.url, self.token, self.container_name, direct_manifest_name)
self.assertEqual(metadata['x-static-large-object'].lower(), 'true')
self.assertEqual(int(metadata['content-length']),
sum(part['size_bytes'] for part in manifest_data))
self.assertEqual(body, ''.join('VERIFY%0.2d' % i + '\x00' * 1048576
for i in range(20)))
#.........这里部分代码省略.........
示例14: test_reconcile_delete
def test_reconcile_delete(self):
# generic split brain
self.brain.stop_primary_half()
self.brain.put_container()
self.brain.put_object()
self.brain.start_primary_half()
self.brain.stop_handoff_half()
self.brain.put_container()
self.brain.delete_object()
self.brain.start_handoff_half()
# make sure we have some manner of split brain
container_part, container_nodes = self.container_ring.get_nodes(
self.account, self.container_name)
head_responses = []
for node in container_nodes:
metadata = direct_client.direct_head_container(
node, container_part, self.account, self.container_name)
head_responses.append((node, metadata))
found_policy_indexes = \
set(metadata['X-Backend-Storage-Policy-Index'] for
node, metadata in head_responses)
self.assert_(len(found_policy_indexes) > 1,
'primary nodes did not disagree about policy index %r' %
head_responses)
# find our object
orig_policy_index = ts_policy_index = None
for policy_index in found_policy_indexes:
object_ring = POLICIES.get_object_ring(policy_index, '/etc/swift')
part, nodes = object_ring.get_nodes(
self.account, self.container_name, self.object_name)
for node in nodes:
try:
direct_client.direct_head_object(
node, part, self.account, self.container_name,
self.object_name,
headers={'X-Backend-Storage-Policy-Index':
policy_index})
except direct_client.ClientException as err:
if 'x-backend-timestamp' in err.http_headers:
ts_policy_index = policy_index
break
else:
orig_policy_index = policy_index
break
if not orig_policy_index:
self.fail('Unable to find /%s/%s/%s in %r' % (
self.account, self.container_name, self.object_name,
found_policy_indexes))
if not ts_policy_index:
self.fail('Unable to find tombstone /%s/%s/%s in %r' % (
self.account, self.container_name, self.object_name,
found_policy_indexes))
get_to_final_state()
Manager(['container-reconciler']).once()
# validate containers
head_responses = []
for node in container_nodes:
metadata = direct_client.direct_head_container(
node, container_part, self.account, self.container_name)
head_responses.append((node, metadata))
new_found_policy_indexes = \
set(metadata['X-Backend-Storage-Policy-Index'] for node,
metadata in head_responses)
self.assert_(len(new_found_policy_indexes) == 1,
'primary nodes disagree about policy index %r' %
dict((node['port'],
metadata['X-Backend-Storage-Policy-Index'])
for node, metadata in head_responses))
expected_policy_index = new_found_policy_indexes.pop()
self.assertEqual(orig_policy_index, expected_policy_index)
# validate object fully deleted
for policy_index in found_policy_indexes:
object_ring = POLICIES.get_object_ring(policy_index, '/etc/swift')
part, nodes = object_ring.get_nodes(
self.account, self.container_name, self.object_name)
for node in nodes:
try:
direct_client.direct_head_object(
node, part, self.account, self.container_name,
self.object_name,
headers={'X-Backend-Storage-Policy-Index':
policy_index})
except direct_client.ClientException as err:
if err.http_status == HTTP_NOT_FOUND:
continue
else:
self.fail('Found /%s/%s/%s in %s on %s' % (
self.account, self.container_name, self.object_name,
orig_policy_index, node))
示例15: test_expirer_object_split_brain
def test_expirer_object_split_brain(self):
old_policy = random.choice(list(POLICIES))
wrong_policy = random.choice([p for p in POLICIES if p != old_policy])
# create an expiring object and a container with the wrong policy
self.brain.stop_primary_half()
self.brain.put_container(int(old_policy))
self.brain.put_object(headers={'X-Delete-After': 2})
# get the object timestamp
metadata = self.client.get_object_metadata(
self.account, self.container_name, self.object_name,
headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
create_timestamp = Timestamp(metadata['x-timestamp'])
self.brain.start_primary_half()
# get the expiring object updates in their queue, while we have all
# the servers up
Manager(['object-updater']).once()
self.brain.stop_handoff_half()
self.brain.put_container(int(wrong_policy))
# don't start handoff servers, only wrong policy is available
# make sure auto-created containers get in the account listing
Manager(['container-updater']).once()
# this guy should no-op since it's unable to expire the object
self.expirer.once()
self.brain.start_handoff_half()
get_to_final_state()
# validate object is expired
found_in_policy = None
metadata = self.client.get_object_metadata(
self.account, self.container_name, self.object_name,
acceptable_statuses=(4,),
headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
self.assert_('x-backend-timestamp' in metadata)
self.assertEqual(Timestamp(metadata['x-backend-timestamp']),
create_timestamp)
# but it is still in the listing
for obj in self.client.iter_objects(self.account,
self.container_name):
if self.object_name == obj['name']:
break
else:
self.fail('Did not find listing for %s' % self.object_name)
# clear proxy cache
client.post_container(self.url, self.token, self.container_name, {})
# run the expirier again after replication
self.expirer.once()
# object is not in the listing
for obj in self.client.iter_objects(self.account,
self.container_name):
if self.object_name == obj['name']:
self.fail('Found listing for %s' % self.object_name)
# and validate object is tombstoned
found_in_policy = None
for policy in POLICIES:
metadata = self.client.get_object_metadata(
self.account, self.container_name, self.object_name,
acceptable_statuses=(4,),
headers={'X-Backend-Storage-Policy-Index': int(policy)})
if 'x-backend-timestamp' in metadata:
if found_in_policy:
self.fail('found object in %s and also %s' %
(found_in_policy, policy))
found_in_policy = policy
self.assert_('x-backend-timestamp' in metadata)
self.assert_(Timestamp(metadata['x-backend-timestamp']) >
create_timestamp)