本文整理汇总了Python中swift.common.internal_client.InternalClient.iter_objects方法的典型用法代码示例。如果您正苦于以下问题:Python InternalClient.iter_objects方法的具体用法?Python InternalClient.iter_objects怎么用?Python InternalClient.iter_objects使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类swift.common.internal_client.InternalClient
的用法示例。
在下文中一共展示了InternalClient.iter_objects方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: download
# 需要导入模块: from swift.common.internal_client import InternalClient [as 别名]
# 或者: from swift.common.internal_client.InternalClient import iter_objects [as 别名]
def download(self, acc, container, u_agent, delay=0, request_tries=3):
self.logger.info('Prefetching objects with InternalClient with ' + str(delay) + ' seconds of delay.')
time.sleep(delay)
swift = InternalClient(PROXY_PATH, u_agent, request_tries=request_tries)
headers = {}
prefetch_list = []
bytes_count = 0
for o in swift.iter_objects(acc, container):
if bytes_count + int(o['bytes']) < self.cache_max_size:
prefetch_list.append(o['name'])
bytes_count += int(o['bytes'])
else:
break
for name in prefetch_list:
object_path = '/v1/' + acc + '/' + container + '/' + name
oid = hashlib.md5(object_path).hexdigest()
status, resp_headers, it = swift.get_object(acc, container, name, headers, ACCEPTABLE_STATUS)
object_size = int(resp_headers.get('Content-Length'))
object_etag = resp_headers.get('Etag')
object_storage_policy_id = '0' # FIXME hardcoded
to_evict = self.cache.access_cache("PUT", oid, object_size, object_etag, object_storage_policy_id)
for ev_object_id in to_evict:
os.remove(os.path.join(self.cache_path, ev_object_id))
self.logger.info('Prefetch Filter - Object ' + name + ' stored in cache with ID: ' + oid)
with open(os.path.join(self.cache_path, oid), 'w') as f:
for el in it:
f.write(el)
示例2: ContainerReconciler
# 需要导入模块: from swift.common.internal_client import InternalClient [as 别名]
# 或者: from swift.common.internal_client.InternalClient import iter_objects [as 别名]
#.........这里部分代码省略.........
return success
def _iter_containers(self):
"""
Generate a list of containers to process.
"""
# hit most recent container first instead of waiting on the updaters
current_container = get_reconciler_container_name(time.time())
yield current_container
container_gen = self.swift.iter_containers(MISPLACED_OBJECTS_ACCOUNT)
self.logger.debug('looking for containers in %s',
MISPLACED_OBJECTS_ACCOUNT)
while True:
one_page = None
try:
one_page = list(itertools.islice(
container_gen, constraints.CONTAINER_LISTING_LIMIT))
except UnexpectedResponse as err:
self.logger.error('Error listing containers in '
'account %s (%s)',
MISPLACED_OBJECTS_ACCOUNT, err)
if not one_page:
# don't generally expect more than one page
break
# reversed order since we expect older containers to be empty
for c in reversed(one_page):
# encoding here is defensive
container = c['name'].encode('utf8')
if container == current_container:
continue # we've already hit this one this pass
yield container
def _iter_objects(self, container):
"""
Generate a list of objects to process.
:param container: the name of the container to process
If the given container is empty and older than reclaim_age this
processor will attempt to reap it.
"""
self.logger.debug('looking for objects in %s', container)
found_obj = False
try:
for raw_obj in self.swift.iter_objects(
MISPLACED_OBJECTS_ACCOUNT, container):
found_obj = True
yield raw_obj
except UnexpectedResponse as err:
self.logger.error('Error listing objects in container %s (%s)',
container, err)
if float(container) < time.time() - self.reclaim_age and \
not found_obj:
# Try to delete old empty containers so the queue doesn't
# grow without bound. It's ok if there's a conflict.
self.swift.delete_container(
MISPLACED_OBJECTS_ACCOUNT, container,
acceptable_statuses=(2, 404, 409, 412))
def reconcile(self):
"""
Main entry point for processing misplaced objects.
Iterate over all queue entries and delegate to reconcile_object.
"""
示例3: TestObjectExpirer
# 需要导入模块: from swift.common.internal_client import InternalClient [as 别名]
# 或者: from swift.common.internal_client.InternalClient import iter_objects [as 别名]
class TestObjectExpirer(ReplProbeTest):
def setUp(self):
self.expirer = Manager(['object-expirer'])
self.expirer.start()
err = self.expirer.stop()
if err:
raise unittest.SkipTest('Unable to verify object-expirer service')
conf_files = []
for server in self.expirer.servers:
conf_files.extend(server.conf_files())
conf_file = conf_files[0]
self.client = InternalClient(conf_file, 'probe-test', 3)
super(TestObjectExpirer, self).setUp()
self.container_name = 'container-%s' % uuid.uuid4()
self.object_name = 'object-%s' % uuid.uuid4()
self.brain = BrainSplitter(self.url, self.token, self.container_name,
self.object_name)
def _check_obj_in_container_listing(self):
for obj in self.client.iter_objects(self.account,
self.container_name):
if self.object_name == obj['name']:
return True
return False
@unittest.skipIf(len(ENABLED_POLICIES) < 2, "Need more than one policy")
def test_expirer_object_split_brain(self):
old_policy = random.choice(ENABLED_POLICIES)
wrong_policy = random.choice([p for p in ENABLED_POLICIES
if p != old_policy])
# create an expiring object and a container with the wrong policy
self.brain.stop_primary_half()
self.brain.put_container(int(old_policy))
self.brain.put_object(headers={'X-Delete-After': 2})
# get the object timestamp
metadata = self.client.get_object_metadata(
self.account, self.container_name, self.object_name,
headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
create_timestamp = Timestamp(metadata['x-timestamp'])
self.brain.start_primary_half()
# get the expiring object updates in their queue, while we have all
# the servers up
Manager(['object-updater']).once()
self.brain.stop_handoff_half()
self.brain.put_container(int(wrong_policy))
# don't start handoff servers, only wrong policy is available
# make sure auto-created containers get in the account listing
Manager(['container-updater']).once()
# this guy should no-op since it's unable to expire the object
self.expirer.once()
self.brain.start_handoff_half()
self.get_to_final_state()
# validate object is expired
found_in_policy = None
metadata = self.client.get_object_metadata(
self.account, self.container_name, self.object_name,
acceptable_statuses=(4,),
headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
self.assertIn('x-backend-timestamp', metadata)
self.assertEqual(Timestamp(metadata['x-backend-timestamp']),
create_timestamp)
# but it is still in the listing
self.assertTrue(self._check_obj_in_container_listing(),
msg='Did not find listing for %s' % self.object_name)
# clear proxy cache
client.post_container(self.url, self.token, self.container_name, {})
# run the expirer again after replication
self.expirer.once()
# object is not in the listing
self.assertFalse(self._check_obj_in_container_listing(),
msg='Found listing for %s' % self.object_name)
# and validate object is tombstoned
found_in_policy = None
for policy in ENABLED_POLICIES:
metadata = self.client.get_object_metadata(
self.account, self.container_name, self.object_name,
acceptable_statuses=(4,),
headers={'X-Backend-Storage-Policy-Index': int(policy)})
if 'x-backend-timestamp' in metadata:
if found_in_policy:
self.fail('found object in %s and also %s' %
(found_in_policy, policy))
found_in_policy = policy
self.assertIn('x-backend-timestamp', metadata)
self.assertGreater(Timestamp(metadata['x-backend-timestamp']),
create_timestamp)
def test_expirer_doesnt_make_async_pendings(self):
#.........这里部分代码省略.........
示例4: ObjectExpirer
# 需要导入模块: from swift.common.internal_client import InternalClient [as 别名]
# 或者: from swift.common.internal_client.InternalClient import iter_objects [as 别名]
class ObjectExpirer(Daemon):
"""
Daemon that queries the internal hidden expiring_objects_account to
discover objects that need to be deleted.
:param conf: The daemon configuration.
"""
def __init__(self, conf):
self.conf = conf
self.logger = get_logger(conf, log_route='object-expirer')
self.interval = int(conf.get('interval') or 300)
self.expiring_objects_account = \
(conf.get('auto_create_account_prefix') or '.') + \
'expiring_objects'
conf_path = conf.get('__file__') or '/etc/swift/object-expirer.conf'
request_tries = int(conf.get('request_tries') or 3)
self.swift = InternalClient(conf_path,
'Swift Object Expirer',
request_tries)
self.report_interval = int(conf.get('report_interval') or 300)
self.report_first_time = self.report_last_time = time()
self.report_objects = 0
self.recon_cache_path = conf.get('recon_cache_path',
'/var/cache/swift')
self.rcache = join(self.recon_cache_path, 'object.recon')
self.concurrency = int(conf.get('concurrency', 1))
if self.concurrency < 1:
raise ValueError("concurrency must be set to at least 1")
self.processes = int(self.conf.get('processes', 0))
self.process = int(self.conf.get('process', 0))
def report(self, final=False):
"""
Emits a log line report of the progress so far, or the final progress
is final=True.
:param final: Set to True for the last report once the expiration pass
has completed.
"""
if final:
elapsed = time() - self.report_first_time
self.logger.info(_('Pass completed in %ds; %d objects expired') %
(elapsed, self.report_objects))
dump_recon_cache({'object_expiration_pass': elapsed,
'expired_last_pass': self.report_objects},
self.rcache, self.logger)
elif time() - self.report_last_time >= self.report_interval:
elapsed = time() - self.report_first_time
self.logger.info(_('Pass so far %ds; %d objects expired') %
(elapsed, self.report_objects))
self.report_last_time = time()
def run_once(self, *args, **kwargs):
"""
Executes a single pass, looking for objects to expire.
:param args: Extra args to fulfill the Daemon interface; this daemon
has no additional args.
:param kwargs: Extra keyword args to fulfill the Daemon interface; this
daemon accepts processes and process keyword args.
These will override the values from the config file if
provided.
"""
processes, process = self.get_process_values(kwargs)
pool = GreenPool(self.concurrency)
containers_to_delete = []
self.report_first_time = self.report_last_time = time()
self.report_objects = 0
try:
self.logger.debug(_('Run begin'))
containers, objects = \
self.swift.get_account_info(self.expiring_objects_account)
self.logger.info(_('Pass beginning; %s possible containers; %s '
'possible objects') % (containers, objects))
for c in self.swift.iter_containers(self.expiring_objects_account):
container = c['name']
timestamp = int(container)
if timestamp > int(time()):
break
containers_to_delete.append(container)
for o in self.swift.iter_objects(self.expiring_objects_account,
container):
obj = o['name'].encode('utf8')
if processes > 0:
obj_process = int(
hashlib.md5('%s/%s' % (container, obj)).
hexdigest(), 16)
if obj_process % processes != process:
continue
timestamp, actual_obj = obj.split('-', 1)
timestamp = int(timestamp)
if timestamp > int(time()):
break
pool.spawn_n(
self.delete_object, actual_obj, timestamp,
container, obj)
pool.waitall()
for container in containers_to_delete:
try:
#.........这里部分代码省略.........
示例5: test_reconciler_move_object_twice
# 需要导入模块: from swift.common.internal_client import InternalClient [as 别名]
# 或者: from swift.common.internal_client.InternalClient import iter_objects [as 别名]
def test_reconciler_move_object_twice(self):
# select some policies
old_policy = random.choice(ENABLED_POLICIES)
new_policy = random.choice([p for p in ENABLED_POLICIES
if p != old_policy])
# setup a split brain
self.brain.stop_handoff_half()
# get old_policy on two primaries
self.brain.put_container(policy_index=int(old_policy))
self.brain.start_handoff_half()
self.brain.stop_primary_half()
# force a recreate on handoffs
self.brain.put_container(policy_index=int(old_policy))
self.brain.delete_container()
self.brain.put_container(policy_index=int(new_policy))
self.brain.put_object() # populate memcache with new_policy
self.brain.start_primary_half()
# at this point two primaries have old policy
container_part, container_nodes = self.container_ring.get_nodes(
self.account, self.container_name)
head_responses = []
for node in container_nodes:
metadata = direct_client.direct_head_container(
node, container_part, self.account, self.container_name)
head_responses.append((node, metadata))
old_container_node_ids = [
node['id'] for node, metadata in head_responses
if int(old_policy) ==
int(metadata['X-Backend-Storage-Policy-Index'])]
self.assertEqual(2, len(old_container_node_ids))
# hopefully memcache still has the new policy cached
self.brain.put_object(headers={'x-object-meta-test': 'custom-meta'},
contents='VERIFY')
# double-check object correctly written to new policy
conf_files = []
for server in Manager(['container-reconciler']).servers:
conf_files.extend(server.conf_files())
conf_file = conf_files[0]
int_client = InternalClient(conf_file, 'probe-test', 3)
int_client.get_object_metadata(
self.account, self.container_name, self.object_name,
headers={'X-Backend-Storage-Policy-Index': int(new_policy)})
int_client.get_object_metadata(
self.account, self.container_name, self.object_name,
acceptable_statuses=(4,),
headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
# shutdown the containers that know about the new policy
self.brain.stop_handoff_half()
# and get rows enqueued from old nodes
for server_type in ('container-replicator', 'container-updater'):
server = Manager([server_type])
tuple(server.once(number=n + 1) for n in old_container_node_ids)
# verify entry in the queue for the "misplaced" new_policy
for container in int_client.iter_containers('.misplaced_objects'):
for obj in int_client.iter_objects('.misplaced_objects',
container['name']):
expected = '%d:/%s/%s/%s' % (new_policy, self.account,
self.container_name,
self.object_name)
self.assertEqual(obj['name'], expected)
Manager(['container-reconciler']).once()
# verify object in old_policy
int_client.get_object_metadata(
self.account, self.container_name, self.object_name,
headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
# verify object is *not* in new_policy
int_client.get_object_metadata(
self.account, self.container_name, self.object_name,
acceptable_statuses=(4,),
headers={'X-Backend-Storage-Policy-Index': int(new_policy)})
self.get_to_final_state()
# verify entry in the queue
for container in int_client.iter_containers('.misplaced_objects'):
for obj in int_client.iter_objects('.misplaced_objects',
container['name']):
expected = '%d:/%s/%s/%s' % (old_policy, self.account,
self.container_name,
self.object_name)
self.assertEqual(obj['name'], expected)
Manager(['container-reconciler']).once()
# and now it flops back
int_client.get_object_metadata(
self.account, self.container_name, self.object_name,
headers={'X-Backend-Storage-Policy-Index': int(new_policy)})
int_client.get_object_metadata(
self.account, self.container_name, self.object_name,
acceptable_statuses=(4,),
#.........这里部分代码省略.........
示例6: ObjectExpirer
# 需要导入模块: from swift.common.internal_client import InternalClient [as 别名]
# 或者: from swift.common.internal_client.InternalClient import iter_objects [as 别名]
class ObjectExpirer(Daemon):
"""
Daemon that queries the internal hidden expiring_objects_account to
discover objects that need to be deleted.
:param conf: The daemon configuration.
"""
def __init__(self, conf):
self.conf = conf
self.logger = get_logger(conf, log_route='object-expirer')
self.interval = int(conf.get('interval') or 300)
self.expiring_objects_account = \
(conf.get('auto_create_account_prefix') or '.') + \
'expiring_objects'
conf_path = conf.get('__file__') or '/etc/swift/object-expirer.conf'
request_tries = int(conf.get('request_tries') or 3)
self.swift = InternalClient(conf_path,
'Swift Object Expirer',
request_tries)
self.report_interval = int(conf.get('report_interval') or 300)
self.report_first_time = self.report_last_time = time()
self.report_objects = 0
self.recon_cache_path = conf.get('recon_cache_path',
'/var/cache/swift')
self.rcache = join(self.recon_cache_path, 'object.recon')
def report(self, final=False):
"""
Emits a log line report of the progress so far, or the final progress
is final=True.
:param final: Set to True for the last report once the expiration pass
has completed.
"""
if final:
elapsed = time() - self.report_first_time
self.logger.info(_('Pass completed in %ds; %d objects expired') %
(elapsed, self.report_objects))
dump_recon_cache({'object_expiration_pass': elapsed,
'expired_last_pass': self.report_objects},
self.rcache, self.logger)
elif time() - self.report_last_time >= self.report_interval:
elapsed = time() - self.report_first_time
self.logger.info(_('Pass so far %ds; %d objects expired') %
(elapsed, self.report_objects))
self.report_last_time = time()
def run_once(self, *args, **kwargs):
"""
Executes a single pass, looking for objects to expire.
:param args: Extra args to fulfill the Daemon interface; this daemon
has no additional args.
:param kwargs: Extra keyword args to fulfill the Daemon interface; this
daemon has no additional keyword args.
"""
self.report_first_time = self.report_last_time = time()
self.report_objects = 0
try:
self.logger.debug(_('Run begin'))
containers, objects = \
self.swift.get_account_info(self.expiring_objects_account)
self.logger.info(_('Pass beginning; %s possible containers; %s '
'possible objects') % (containers, objects))
for c in self.swift.iter_containers(self.expiring_objects_account):
container = c['name']
timestamp = int(container)
if timestamp > int(time()):
break
for o in self.swift.iter_objects(self.expiring_objects_account,
container):
obj = o['name']
timestamp, actual_obj = obj.split('-', 1)
timestamp = int(timestamp)
if timestamp > int(time()):
break
start_time = time()
try:
self.delete_actual_object(actual_obj, timestamp)
self.swift.delete_object(self.expiring_objects_account,
container, obj)
self.report_objects += 1
self.logger.increment('objects')
except (Exception, Timeout), err:
self.logger.increment('errors')
self.logger.exception(
_('Exception while deleting object %s %s %s') %
(container, obj, str(err)))
self.logger.timing_since('timing', start_time)
self.report()
try:
self.swift.delete_container(
self.expiring_objects_account,
container,
acceptable_statuses=(2, HTTP_NOT_FOUND, HTTP_CONFLICT))
except (Exception, Timeout), err:
self.logger.exception(
_('Exception while deleting container %s %s') %
(container, str(err)))
#.........这里部分代码省略.........
示例7: TestObjectExpirer
# 需要导入模块: from swift.common.internal_client import InternalClient [as 别名]
# 或者: from swift.common.internal_client.InternalClient import iter_objects [as 别名]
class TestObjectExpirer(unittest.TestCase):
def setUp(self):
if len(POLICIES) < 2:
raise SkipTest('Need more than one policy')
self.expirer = Manager(['object-expirer'])
self.expirer.start()
err = self.expirer.stop()
if err:
raise SkipTest('Unable to verify object-expirer service')
conf_files = []
for server in self.expirer.servers:
conf_files.extend(server.conf_files())
conf_file = conf_files[0]
self.client = InternalClient(conf_file, 'probe-test', 3)
(self.pids, self.port2server, self.account_ring, self.container_ring,
self.object_ring, self.policy, self.url, self.token,
self.account, self.configs) = reset_environment()
self.container_name = 'container-%s' % uuid.uuid4()
self.object_name = 'object-%s' % uuid.uuid4()
self.brain = BrainSplitter(self.url, self.token, self.container_name,
self.object_name)
def test_expirer_object_split_brain(self):
old_policy = random.choice(list(POLICIES))
wrong_policy = random.choice([p for p in POLICIES if p != old_policy])
# create an expiring object and a container with the wrong policy
self.brain.stop_primary_half()
self.brain.put_container(int(old_policy))
self.brain.put_object(headers={'X-Delete-After': 2})
# get the object timestamp
metadata = self.client.get_object_metadata(
self.account, self.container_name, self.object_name,
headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
create_timestamp = Timestamp(metadata['x-timestamp'])
self.brain.start_primary_half()
# get the expiring object updates in their queue, while we have all
# the servers up
Manager(['object-updater']).once()
self.brain.stop_handoff_half()
self.brain.put_container(int(wrong_policy))
# don't start handoff servers, only wrong policy is available
# make sure auto-created containers get in the account listing
Manager(['container-updater']).once()
# this guy should no-op since it's unable to expire the object
self.expirer.once()
self.brain.start_handoff_half()
get_to_final_state()
# validate object is expired
found_in_policy = None
metadata = self.client.get_object_metadata(
self.account, self.container_name, self.object_name,
acceptable_statuses=(4,),
headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
self.assert_('x-backend-timestamp' in metadata)
self.assertEqual(Timestamp(metadata['x-backend-timestamp']),
create_timestamp)
# but it is still in the listing
for obj in self.client.iter_objects(self.account,
self.container_name):
if self.object_name == obj['name']:
break
else:
self.fail('Did not find listing for %s' % self.object_name)
# clear proxy cache
client.post_container(self.url, self.token, self.container_name, {})
# run the expirier again after replication
self.expirer.once()
# object is not in the listing
for obj in self.client.iter_objects(self.account,
self.container_name):
if self.object_name == obj['name']:
self.fail('Found listing for %s' % self.object_name)
# and validate object is tombstoned
found_in_policy = None
for policy in POLICIES:
metadata = self.client.get_object_metadata(
self.account, self.container_name, self.object_name,
acceptable_statuses=(4,),
headers={'X-Backend-Storage-Policy-Index': int(policy)})
if 'x-backend-timestamp' in metadata:
if found_in_policy:
self.fail('found object in %s and also %s' %
(found_in_policy, policy))
found_in_policy = policy
self.assert_('x-backend-timestamp' in metadata)
self.assert_(Timestamp(metadata['x-backend-timestamp']) >
create_timestamp)
示例8: ObjectExpirer
# 需要导入模块: from swift.common.internal_client import InternalClient [as 别名]
# 或者: from swift.common.internal_client.InternalClient import iter_objects [as 别名]
class ObjectExpirer(Daemon):
"""
Daemon that queries the internal hidden expiring_objects_account to
discover objects that need to be deleted.
:param conf: The daemon configuration.
"""
def __init__(self, conf):
self.conf = conf
self.logger = get_logger(conf, log_route='object-expirer')
self.interval = int(conf.get('interval') or 300)
self.expiring_objects_account = \
(conf.get('auto_create_account_prefix') or '.') + \
(conf.get('expiring_objects_account_name') or 'expiring_objects')
conf_path = conf.get('__file__') or '/etc/swift/object-expirer.conf'
request_tries = int(conf.get('request_tries') or 3)
self.swift = InternalClient(conf_path,
'Swift Object Expirer',
request_tries)
self.report_interval = int(conf.get('report_interval') or 300)
self.report_first_time = self.report_last_time = time()
self.report_objects = 0
self.recon_cache_path = conf.get('recon_cache_path',
'/var/cache/swift')
self.rcache = join(self.recon_cache_path, 'object.recon')
self.concurrency = int(conf.get('concurrency', 1))
if self.concurrency < 1:
raise ValueError("concurrency must be set to at least 1")
self.processes = int(self.conf.get('processes', 0))
self.process = int(self.conf.get('process', 0))
def report(self, final=False):
"""
Emits a log line report of the progress so far, or the final progress
is final=True.
:param final: Set to True for the last report once the expiration pass
has completed.
"""
if final:
elapsed = time() - self.report_first_time
self.logger.info(_('Pass completed in %ds; %d objects expired') %
(elapsed, self.report_objects))
dump_recon_cache({'object_expiration_pass': elapsed,
'expired_last_pass': self.report_objects},
self.rcache, self.logger)
elif time() - self.report_last_time >= self.report_interval:
elapsed = time() - self.report_first_time
self.logger.info(_('Pass so far %ds; %d objects expired') %
(elapsed, self.report_objects))
self.report_last_time = time()
def run_once(self, *args, **kwargs):
"""
Executes a single pass, looking for objects to expire.
:param args: Extra args to fulfill the Daemon interface; this daemon
has no additional args.
:param kwargs: Extra keyword args to fulfill the Daemon interface; this
daemon accepts processes and process keyword args.
These will override the values from the config file if
provided.
"""
processes, process = self.get_process_values(kwargs)
pool = GreenPool(self.concurrency)
containers_to_delete = []
self.report_first_time = self.report_last_time = time()
self.report_objects = 0
try:
self.logger.debug(_('Run begin'))
containers, objects = \
self.swift.get_account_info(self.expiring_objects_account)
self.logger.info(_('Pass beginning; %s possible containers; %s '
'possible objects') % (containers, objects))
for c in self.swift.iter_containers(self.expiring_objects_account):
container = c['name']
timestamp = int(container)
if timestamp > int(time()):
break
containers_to_delete.append(container)
for o in self.swift.iter_objects(self.expiring_objects_account,
container):
obj = o['name'].encode('utf8')
if processes > 0:
obj_process = int(
hashlib.md5('%s/%s' % (container, obj)).
hexdigest(), 16)
if obj_process % processes != process:
continue
timestamp, actual_obj = obj.split('-', 1)
timestamp = int(timestamp)
if timestamp > int(time()):
break
pool.spawn_n(
self.delete_object, actual_obj, timestamp,
container, obj)
pool.waitall()
for container in containers_to_delete:
try:
#.........这里部分代码省略.........
示例9: TestObjectExpirer
# 需要导入模块: from swift.common.internal_client import InternalClient [as 别名]
# 或者: from swift.common.internal_client.InternalClient import iter_objects [as 别名]
class TestObjectExpirer(ReplProbeTest):
def setUp(self):
if len(ENABLED_POLICIES) < 2:
raise SkipTest('Need more than one policy')
self.expirer = Manager(['object-expirer'])
self.expirer.start()
err = self.expirer.stop()
if err:
raise SkipTest('Unable to verify object-expirer service')
conf_files = []
for server in self.expirer.servers:
conf_files.extend(server.conf_files())
conf_file = conf_files[0]
self.client = InternalClient(conf_file, 'probe-test', 3)
super(TestObjectExpirer, self).setUp()
self.container_name = 'container-%s' % uuid.uuid4()
self.object_name = 'object-%s' % uuid.uuid4()
self.brain = BrainSplitter(self.url, self.token, self.container_name,
self.object_name)
def test_expirer_object_split_brain(self):
old_policy = random.choice(ENABLED_POLICIES)
wrong_policy = random.choice([p for p in ENABLED_POLICIES
if p != old_policy])
# create an expiring object and a container with the wrong policy
self.brain.stop_primary_half()
self.brain.put_container(int(old_policy))
self.brain.put_object(headers={'X-Delete-After': 2})
# get the object timestamp
metadata = self.client.get_object_metadata(
self.account, self.container_name, self.object_name,
headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
create_timestamp = Timestamp(metadata['x-timestamp'])
self.brain.start_primary_half()
# get the expiring object updates in their queue, while we have all
# the servers up
Manager(['object-updater']).once()
self.brain.stop_handoff_half()
self.brain.put_container(int(wrong_policy))
# don't start handoff servers, only wrong policy is available
# make sure auto-created containers get in the account listing
Manager(['container-updater']).once()
# this guy should no-op since it's unable to expire the object
self.expirer.once()
self.brain.start_handoff_half()
self.get_to_final_state()
# validate object is expired
found_in_policy = None
metadata = self.client.get_object_metadata(
self.account, self.container_name, self.object_name,
acceptable_statuses=(4,),
headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
self.assertTrue('x-backend-timestamp' in metadata)
self.assertEqual(Timestamp(metadata['x-backend-timestamp']),
create_timestamp)
# but it is still in the listing
for obj in self.client.iter_objects(self.account,
self.container_name):
if self.object_name == obj['name']:
break
else:
self.fail('Did not find listing for %s' % self.object_name)
# clear proxy cache
client.post_container(self.url, self.token, self.container_name, {})
# run the expirier again after replication
self.expirer.once()
# object is not in the listing
for obj in self.client.iter_objects(self.account,
self.container_name):
if self.object_name == obj['name']:
self.fail('Found listing for %s' % self.object_name)
# and validate object is tombstoned
found_in_policy = None
for policy in ENABLED_POLICIES:
metadata = self.client.get_object_metadata(
self.account, self.container_name, self.object_name,
acceptable_statuses=(4,),
headers={'X-Backend-Storage-Policy-Index': int(policy)})
if 'x-backend-timestamp' in metadata:
if found_in_policy:
self.fail('found object in %s and also %s' %
(found_in_policy, policy))
found_in_policy = policy
self.assertTrue('x-backend-timestamp' in metadata)
self.assertTrue(Timestamp(metadata['x-backend-timestamp']) >
create_timestamp)
def test_expirer_object_should_not_be_expired(self):
obj_brain = BrainSplitter(self.url, self.token, self.container_name,
#.........这里部分代码省略.........
示例10: UtilizationAggregator
# 需要导入模块: from swift.common.internal_client import InternalClient [as 别名]
# 或者: from swift.common.internal_client.InternalClient import iter_objects [as 别名]
#.........这里部分代码省略.........
processes = int(kwargs['processes'])
else:
processes = self.processes
if kwargs.get('process') is not None:
process = int(kwargs['process'])
else:
process = self.process
if process < 0:
raise ValueError(
'process must be an integer greater than or equal to 0')
if processes < 0:
raise ValueError(
'processes must be an integer greater than or equal to 0')
if processes and process >= processes:
raise ValueError(
'process must be less than or equal to processes')
return processes, process
def aggregate_container(self, container):
start_time = time()
try:
objs_to_delete = list()
bytes_recvs = dict()
bytes_sents = dict()
ts, tenant_id, account = container.split('_', 2)
ts = int(float(ts))
for o in self.swift.iter_objects(self.sample_account, container):
name = o['name']
objs_to_delete.append(name)
ts, bytes_rv, bytes_st, trans_id, client_ip = name.split('/')
bill_type = self.get_billtype_by_client_ip(client_ip, ts)
bytes_recvs[bill_type] = bytes_recvs.get(bill_type,
0) + int(bytes_rv)
bytes_sents[bill_type] = bytes_sents.get(bill_type,
0) + int(bytes_st)
self.report_objects += 1
for o in objs_to_delete:
self.swift.delete_object(self.sample_account, container, o)
for bill_type, bt_rv in bytes_recvs.items():
t_object = 'transfer/%d/%d/%d_%d_%d' % (ts, bill_type, bt_rv,
bytes_sents[bill_type],
self.report_objects)
self._hidden_update(tenant_id, t_object)
except (Exception, Timeout) as err:
self.logger.increment('errors')
self.logger.exception(
_('Exception while aggregating sample %s %s') %
(container, str(err)))
self.logger.timing_since('timing', start_time)
self.report()
def account_info(self, tenant_id, timestamp):
path = '/v1/%s/%s?prefix=usage/%d&limit=1' % (self.aggregate_account,
tenant_id, timestamp)
resp = self.swift.make_request('GET', path, {}, (2,))
if len(resp.body) == 0:
示例11: ObjectRestorer
# 需要导入模块: from swift.common.internal_client import InternalClient [as 别名]
# 或者: from swift.common.internal_client.InternalClient import iter_objects [as 别名]
class ObjectRestorer(Daemon):
"""
Daemon that queries the internal hidden expiring_objects_account to
discover objects that need to be deleted.
:param conf: The daemon configuration.
"""
def __init__(self, conf):
self.conf = conf
self.container_ring = Ring('/etc/swift', ring_name='container')
self.logger = get_logger(conf, log_route='object-restorer')
self.logger.set_statsd_prefix('s3-object-restorer')
self.interval = int(conf.get('interval') or 300)
self.restoring_object_account = '.s3_restoring_objects'
self.expiring_restored_account = '.s3_expiring_restored_objects'
self.glacier_account_prefix = '.glacier_'
self.todo_container = 'todo'
self.restoring_container = 'restoring'
conf_path = '/etc/swift/s3-object-restorer.conf'
request_tries = int(conf.get('request_tries') or 3)
self.glacier = self._init_glacier()
self.glacier_tmpdir = conf.get('temp_path', '/var/cache/s3/')
self.swift = InternalClient(conf_path,
'Swift Object Restorer',
request_tries)
self.report_interval = int(conf.get('report_interval') or 300)
self.report_first_time = self.report_last_time = time()
self.report_objects = 0
self.recon_cache_path = conf.get('recon_cache_path',
'/var/cache/swift')
self.rcache = join(self.recon_cache_path, 'object.recon')
self.concurrency = int(conf.get('concurrency', 1))
if self.concurrency < 1:
raise ValueError("concurrency must be set to at least 1")
self.processes = int(self.conf.get('processes', 0))
self.process = int(self.conf.get('process', 0))
self.client = Client(self.conf.get('sentry_sdn', ''))
def _init_glacier(self):
con = Layer2(region_name='ap-northeast-1')
return con.get_vault('swift-s3-transition')
def report(self, final=False):
"""
Emits a log line report of the progress so far, or the final progress
is final=True.
:param final: Set to True for the last report once the expiration pass
has completed.
"""
if final:
elapsed = time() - self.report_first_time
self.logger.info(_('Pass completed in %ds; %d objects restored') %
(elapsed, self.report_objects))
dump_recon_cache({'object_expiration_pass': elapsed,
'expired_last_pass': self.report_objects},
self.rcache, self.logger)
elif time() - self.report_last_time >= self.report_interval:
elapsed = time() - self.report_first_time
self.logger.info(_('Pass so far %ds; %d objects restored') %
(elapsed, self.report_objects))
self.report_last_time = time()
def run_once(self, *args, **kwargs):
"""
Executes a single pass, looking for objects to expire.
:param args: Extra args to fulfill the Daemon interface; this daemon
has no additional args.
:param kwargs: Extra keyword args to fulfill the Daemon interface; this
daemon accepts processes and process keyword args.
These will override the values from the config file if
provided.
"""
processes, process = self.get_process_values(kwargs)
pool = GreenPool(self.concurrency)
self.report_first_time = self.report_last_time = time()
self.report_objects = 0
try:
self.logger.debug(_('Run begin'))
for o in self.swift.iter_objects(self.restoring_object_account,
self.todo_container):
obj = o['name'].encode('utf8')
if processes > 0:
obj_process = int(
hashlib.md5('%s/%s' % (self.todo_container, obj)).
hexdigest(), 16)
if obj_process % processes != process:
continue
pool.spawn_n(self.start_object_restoring, obj)
pool.waitall()
for o in self.swift.iter_objects(self.restoring_object_account,
self.restoring_container):
obj = o['name'].encode('utf8')
if processes > 0:
obj_process = int(
#.........这里部分代码省略.........
示例12: ObjectExpirer
# 需要导入模块: from swift.common.internal_client import InternalClient [as 别名]
# 或者: from swift.common.internal_client.InternalClient import iter_objects [as 别名]
class ObjectExpirer(Daemon):
def __init__(self, conf):
super(ObjectExpirer, self).__init__(conf)
self.conf = conf
self.logger = get_logger(conf, log_route='s3-object-expirer')
self.logger.set_statsd_prefix('s3-object-expirer')
self.interval = int(conf.get('interval') or 300)
self.s3_expiring_objects_account = \
(conf.get('auto_create_account_prefix') or '.') + \
(conf.get('expiring_objects_account_name') or
's3_expiring_objects')
conf_path = conf.get('__file__') or '/etc/swift/s3-object-expirer.conf'
request_tries = int(conf.get('request_tries') or 3)
self.swift = InternalClient(conf_path,
'Swift Object Expirer',
request_tries)
self.glacier = self._init_glacier()
self.glacier_account_prefix = '.glacier_'
self.report_interval = int(conf.get('report_interval') or 300)
self.report_first_time = self.report_last_time = time()
self.report_objects = 0
self.recon_cache_path = conf.get('recon_cache_path',
'/var/cache/swift')
self.rcache = join(self.recon_cache_path, 'object.recon')
self.concurrency = int(conf.get('concurrency', 1))
if self.concurrency < 1:
raise ValueError("concurrency must be set to at least 1")
self.processes = int(self.conf.get('processes', 0))
self.process = int(self.conf.get('process', 0))
self.client = Client(self.conf.get('sentry_sdn', ''))
def _init_glacier(self):
con = Layer2(region_name='ap-northeast-1')
return con.get_vault('swift-s3-transition')
def report(self, final=False):
"""
Emits a log line report of the progress so far, or the final progress
is final=True.
:param final: Set to True for the last report once the expiration pass
has completed.
"""
if final:
elapsed = time() - self.report_first_time
self.logger.info(_('Pass completed in %ds; %d objects expired') %
(elapsed, self.report_objects))
dump_recon_cache({'object_expiration_pass': elapsed,
'expired_last_pass': self.report_objects},
self.rcache, self.logger)
elif time() - self.report_last_time >= self.report_interval:
elapsed = time() - self.report_first_time
self.logger.info(_('Pass so far %ds; %d objects expired') %
(elapsed, self.report_objects))
self.report_last_time = time()
def run_once(self, *args, **kwargs):
"""
Executes a single pass, looking for objects to expire.
:param args: Extra args to fulfill the Daemon interface; this daemon
has no additional args.
:param kwargs: Extra keyword args to fulfill the Daemon interface; this
daemon accepts processes and process keyword args.
These will override the values from the config file if
provided.
"""
processes, process = self.get_process_values(kwargs)
pool = GreenPool(self.concurrency)
containers_to_delete = []
self.report_first_time = self.report_last_time = time()
self.report_objects = 0
try:
self.logger.debug(_('Run begin'))
containers, objects = \
self.swift.get_account_info(self.s3_expiring_objects_account)
self.logger.info(_('Pass beginning; %s possible containers; %s '
'possible objects') % (containers, objects))
for c in self.swift.iter_containers(self.
s3_expiring_objects_account):
container = c['name']
timestamp = int(container)
if timestamp > int(time()):
break
containers_to_delete.append(container)
for o in self.swift.iter_objects(self
.s3_expiring_objects_account,
container):
obj = o['name'].encode('utf8')
if processes > 0:
obj_process = int(
hashlib.md5('%s/%s' % (container, obj)).
hexdigest(), 16)
if obj_process % processes != process:
continue
pool.spawn_n(self.delete_object, container, obj)
pool.waitall()
for container in containers_to_delete:
#.........这里部分代码省略.........
示例13: TestObjectExpirer
# 需要导入模块: from swift.common.internal_client import InternalClient [as 别名]
# 或者: from swift.common.internal_client.InternalClient import iter_objects [as 别名]
class TestObjectExpirer(ReplProbeTest):
def setUp(self):
if len(ENABLED_POLICIES) < 2:
raise SkipTest("Need more than one policy")
self.expirer = Manager(["object-expirer"])
self.expirer.start()
err = self.expirer.stop()
if err:
raise SkipTest("Unable to verify object-expirer service")
conf_files = []
for server in self.expirer.servers:
conf_files.extend(server.conf_files())
conf_file = conf_files[0]
self.client = InternalClient(conf_file, "probe-test", 3)
super(TestObjectExpirer, self).setUp()
self.container_name = "container-%s" % uuid.uuid4()
self.object_name = "object-%s" % uuid.uuid4()
self.brain = BrainSplitter(self.url, self.token, self.container_name, self.object_name)
def test_expirer_object_split_brain(self):
old_policy = random.choice(ENABLED_POLICIES)
wrong_policy = random.choice([p for p in ENABLED_POLICIES if p != old_policy])
# create an expiring object and a container with the wrong policy
self.brain.stop_primary_half()
self.brain.put_container(int(old_policy))
self.brain.put_object(headers={"X-Delete-After": 2})
# get the object timestamp
metadata = self.client.get_object_metadata(
self.account,
self.container_name,
self.object_name,
headers={"X-Backend-Storage-Policy-Index": int(old_policy)},
)
create_timestamp = Timestamp(metadata["x-timestamp"])
self.brain.start_primary_half()
# get the expiring object updates in their queue, while we have all
# the servers up
Manager(["object-updater"]).once()
self.brain.stop_handoff_half()
self.brain.put_container(int(wrong_policy))
# don't start handoff servers, only wrong policy is available
# make sure auto-created containers get in the account listing
Manager(["container-updater"]).once()
# this guy should no-op since it's unable to expire the object
self.expirer.once()
self.brain.start_handoff_half()
self.get_to_final_state()
# validate object is expired
found_in_policy = None
metadata = self.client.get_object_metadata(
self.account,
self.container_name,
self.object_name,
acceptable_statuses=(4,),
headers={"X-Backend-Storage-Policy-Index": int(old_policy)},
)
self.assertTrue("x-backend-timestamp" in metadata)
self.assertEqual(Timestamp(metadata["x-backend-timestamp"]), create_timestamp)
# but it is still in the listing
for obj in self.client.iter_objects(self.account, self.container_name):
if self.object_name == obj["name"]:
break
else:
self.fail("Did not find listing for %s" % self.object_name)
# clear proxy cache
client.post_container(self.url, self.token, self.container_name, {})
# run the expirier again after replication
self.expirer.once()
# object is not in the listing
for obj in self.client.iter_objects(self.account, self.container_name):
if self.object_name == obj["name"]:
self.fail("Found listing for %s" % self.object_name)
# and validate object is tombstoned
found_in_policy = None
for policy in ENABLED_POLICIES:
metadata = self.client.get_object_metadata(
self.account,
self.container_name,
self.object_name,
acceptable_statuses=(4,),
headers={"X-Backend-Storage-Policy-Index": int(policy)},
)
if "x-backend-timestamp" in metadata:
if found_in_policy:
self.fail("found object in %s and also %s" % (found_in_policy, policy))
found_in_policy = policy
self.assertTrue("x-backend-timestamp" in metadata)
self.assertTrue(Timestamp(metadata["x-backend-timestamp"]) > create_timestamp)
示例14: ObjectTransitor
# 需要导入模块: from swift.common.internal_client import InternalClient [as 别名]
# 或者: from swift.common.internal_client.InternalClient import iter_objects [as 别名]
class ObjectTransitor(Daemon):
def __init__(self, conf):
super(ObjectTransitor, self).__init__(conf)
self.conf = conf
self.logger = get_logger(conf, log_route='s3-object-transitor')
self.logger.set_statsd_prefix('s3-object-transitor')
self.interval = int(conf.get('interval') or 300)
self.s3_tr_objects_account = \
(conf.get('auto_create_account_prefix') or '.') + \
(conf.get('expiring_objects_account_name') or
's3_transitioning_objects')
conf_path = conf.get('__file__') or \
'/etc/swift/s3-object-transitor.conf'
request_tries = int(conf.get('request_tries') or 3)
self.swift = InternalClient(conf_path, 'Swift Object Transitor',
request_tries)
self.report_interval = int(conf.get('report_interval') or 300)
self.report_first_time = self.report_last_time = time()
self.report_objects = 0
self.recon_cache_path = conf.get('recon_cache_path',
'/var/cache/swift')
self.rcache = join(self.recon_cache_path, 'object.recon')
self.concurrency = int(conf.get('concurrency', 1))
if self.concurrency < 1:
raise ValueError("concurrency must be set to at least 1")
self.processes = int(self.conf.get('processes', 0))
self.process = int(self.conf.get('process', 0))
self.client = Client(self.conf.get('sentry_sdn', ''))
def report(self, final=False):
"""
Emits a log line report of the progress so far, or the final progress
is final=True.
:param final: Set to True for the last report once the expiration pass
has completed.
"""
if final:
elapsed = time() - self.report_first_time
self.logger.info(_('Pass completed in %ds; %d objects '
'transitioned') %
(elapsed, self.report_objects))
dump_recon_cache({'object_transition_pass': elapsed,
'transitioned_last_pass': self.report_objects},
self.rcache, self.logger)
elif time() - self.report_last_time >= self.report_interval:
elapsed = time() - self.report_first_time
self.logger.info(_('Pass so far %ds; %d objects transitioned') %
(elapsed, self.report_objects))
self.report_last_time = time()
def run_once(self, *args, **kwargs):
"""
Executes a single pass, looking for objects to expire.
:param args: Extra args to fulfill the Daemon interface; this daemon
has no additional args.
:param kwargs: Extra keyword args to fulfill the Daemon interface; this
daemon accepts processes and process keyword args.
These will override the values from the config file if
provided.
"""
processes, process = self.get_process_values(kwargs)
pool = GreenPool(self.concurrency)
containers_to_delete = []
self.report_first_time = self.report_last_time = time()
self.report_objects = 0
try:
self.logger.debug(_('Run begin'))
containers, objects = \
self.swift.get_account_info(self.s3_tr_objects_account)
self.logger.info(_('Pass beginning; %s possible containers; %s '
'possible objects') % (containers, objects))
for c in self.swift.iter_containers(self.s3_tr_objects_account):
container = c['name']
timestamp = int(container)
if timestamp > int(time()):
break
containers_to_delete.append(container)
for o in self.swift.iter_objects(self.s3_tr_objects_account,
container):
obj = o['name'].encode('utf8')
if processes > 0:
obj_process = int(
hashlib.md5('%s/%s' % (container, obj)).
hexdigest(), 16)
if obj_process % processes != process:
continue
pool.spawn_n(self.transition_object, container, obj)
pool.waitall()
for container in containers_to_delete:
try:
self.swift.delete_container(self.s3_tr_objects_account,
container, (2, 4))
except (Exception, Timeout) as err:
report_exception(self.logger,
_('Exception while deleting container %s %s') %
(container, str(err)), self.client)
#.........这里部分代码省略.........