本文整理汇总了Python中swift.container.backend.ContainerBroker类的典型用法代码示例。如果您正苦于以下问题:Python ContainerBroker类的具体用法?Python ContainerBroker怎么用?Python ContainerBroker使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ContainerBroker类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_data
def get_data(self, db_path):
"""
Data for generated csv has the following columns:
Account Hash, Container Name, Object Count, Bytes Used
This will just collect whether or not the metadata is set
using a 1 or ''.
:raises sqlite3.Error: does not catch errors connecting to db
"""
line_data = None
broker = ContainerBroker(db_path)
if not broker.is_deleted():
info = broker.get_info()
encoded_container_name = urllib.quote(info["container"])
line_data = '"%s","%s",%d,%d' % (
info["account"],
encoded_container_name,
info["object_count"],
info["bytes_used"],
)
if self.metadata_keys:
metadata_results = ",".join([info["metadata"].get(mkey) and "1" or "" for mkey in self.metadata_keys])
line_data += ",%s" % metadata_results
line_data += "\n"
return line_data
示例2: get_reconciler_broker
def get_reconciler_broker(self, timestamp):
"""
Get a local instance of the reconciler container broker that is
appropriate to enqueue the given timestamp.
:param timestamp: the timestamp of the row to be enqueued
:returns: a local reconciler broker
"""
container = get_reconciler_container_name(timestamp)
if self.reconciler_containers and \
container in self.reconciler_containers:
return self.reconciler_containers[container][1]
account = MISPLACED_OBJECTS_ACCOUNT
part = self.ring.get_part(account, container)
node = self.find_local_handoff_for_part(part)
if not node:
raise DeviceUnavailable(
'No mounted devices found suitable to Handoff reconciler '
'container %s in partition %s' % (container, part))
hsh = hash_path(account, container)
db_dir = storage_directory(DATADIR, part, hsh)
db_path = os.path.join(self.root, node['device'], db_dir, hsh + '.db')
broker = ContainerBroker(db_path, account=account, container=container)
if not os.path.exists(broker.db_file):
try:
broker.initialize(timestamp, 0)
except DatabaseAlreadyExists:
pass
if self.reconciler_containers is not None:
self.reconciler_containers[container] = part, broker, node['id']
return broker
示例3: test_container_stat_get_data
def test_container_stat_get_data(self):
stat = db_stats_collector.ContainerStatsCollector(self.conf)
container_db = ContainerBroker("%s/con.db" % self.containers,
account='test_acc', container='test_con')
container_db.initialize()
container_db.put_object('test_obj', time.time(), 10, 'text', 'faketag')
info = stat.get_data("%s/con.db" % self.containers)
self.assertEquals('''"test_acc","test_con",1,10\n''', info)
示例4: _make_broker
def _make_broker(self, account='a', container='c',
device='sda', part=0):
datadir = os.path.join(
self.testdir, device, 'containers', str(part), 'ash', 'hash')
db_file = os.path.join(datadir, 'hash.db')
broker = ContainerBroker(
db_file, account=account, container=container)
broker.initialize()
return broker
示例5: _abort_rsync_then_merge
def _abort_rsync_then_merge(self, db_file, old_filename):
if super(ContainerReplicatorRpc, self)._abort_rsync_then_merge(
db_file, old_filename):
return True
# if the local db has started sharding since the original 'sync'
# request then abort object replication now; instantiate a fresh broker
# each time this check if performed so to get latest state
broker = ContainerBroker(db_file)
return broker.sharding_initiated()
示例6: process_container
def process_container(self, dbfile):
"""
Process a container, and update the information in the account.
:param dbfile: container DB to process
"""
start_time = time.time()
broker = ContainerBroker(dbfile, logger=self.logger)
info = broker.get_info()
# Don't send updates if the container was auto-created since it
# definitely doesn't have up to date statistics.
if float(info['put_timestamp']) <= 0:
return
if self.account_suppressions.get(info['account'], 0) > time.time():
return
if info['put_timestamp'] > info['reported_put_timestamp'] or \
info['delete_timestamp'] > info['reported_delete_timestamp'] \
or info['object_count'] != info['reported_object_count'] or \
info['bytes_used'] != info['reported_bytes_used']:
container = '/%s/%s' % (info['account'], info['container'])
part, nodes = self.get_account_ring().get_nodes(info['account'])
events = [spawn(self.container_report, node, part, container,
info['put_timestamp'], info['delete_timestamp'],
info['object_count'], info['bytes_used'])
for node in nodes]
successes = 0
failures = 0
for event in events:
if is_success(event.wait()):
successes += 1
else:
failures += 1
if successes > failures:
self.logger.increment('successes')
self.successes += 1
self.logger.debug(
_('Update report sent for %(container)s %(dbfile)s'),
{'container': container, 'dbfile': dbfile})
broker.reported(info['put_timestamp'],
info['delete_timestamp'], info['object_count'],
info['bytes_used'])
else:
self.logger.increment('failures')
self.failures += 1
self.logger.debug(
_('Update report failed for %(container)s %(dbfile)s'),
{'container': container, 'dbfile': dbfile})
self.account_suppressions[info['account']] = until = \
time.time() + self.account_suppression_time
if self.new_account_suppressions:
print >>self.new_account_suppressions, \
info['account'], until
# Only track timing data for attempted updates:
self.logger.timing_since('timing', start_time)
else:
self.logger.increment('no_changes')
self.no_changes += 1
示例7: test_find_replace_enable
def test_find_replace_enable(self):
db_file = os.path.join(self.testdir, 'hash.db')
broker = ContainerBroker(db_file)
broker.account = 'a'
broker.container = 'c'
broker.initialize()
ts = utils.Timestamp.now()
broker.merge_items([
{'name': 'obj%02d' % i, 'created_at': ts.internal, 'size': 0,
'content_type': 'application/octet-stream', 'etag': 'not-really',
'deleted': 0, 'storage_policy_index': 0,
'ctype_timestamp': ts.internal, 'meta_timestamp': ts.internal}
for i in range(100)])
out = StringIO()
err = StringIO()
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
with mock_timestamp_now() as now:
main([broker.db_file, 'find_and_replace', '10', '--enable'])
expected = [
'No shard ranges found to delete.',
'Injected 10 shard ranges.',
'Run container-replicator to replicate them to other nodes.',
"Container moved to state 'sharding' with epoch %s." %
now.internal,
'Run container-sharder on all nodes to shard the container.']
self.assertEqual(expected, out.getvalue().splitlines())
self.assertEqual(['Loaded db broker for a/c.'],
err.getvalue().splitlines())
self._assert_enabled(broker, now)
self.assertEqual(
[(data['lower'], data['upper']) for data in self.shard_data],
[(sr.lower_str, sr.upper_str) for sr in broker.get_shard_ranges()])
示例8: get_reconciler_broker
def get_reconciler_broker(self, timestamp):
"""
Get a local instance of the reconciler container broker that is
appropriate to enqueue the given timestamp.
:param timestamp: the timestamp of the row to be enqueued
:returns: a local reconciler broker
"""
container = get_reconciler_container_name(timestamp)
if self.reconciler_containers and \
container in self.reconciler_containers:
return self.reconciler_containers[container][1]
account = MISPLACED_OBJECTS_ACCOUNT
part = self.ring.get_part(account, container)
node = self.find_local_handoff_for_part(part)
if not node:
raise DeviceUnavailable(
'No mounted devices found suitable to Handoff reconciler '
'container %s in partition %s' % (container, part))
broker = ContainerBroker.create_broker(
os.path.join(self.root, node['device']), part, account, container,
logger=self.logger, put_timestamp=timestamp,
storage_policy_index=0)
if self.reconciler_containers is not None:
self.reconciler_containers[container] = part, broker, node['id']
return broker
示例9: test_run_once_with_get_info_timeout
def test_run_once_with_get_info_timeout(self, mock_dump_recon):
cu = self._get_container_updater()
containers_dir = os.path.join(self.sda1, DATADIR)
os.mkdir(containers_dir)
subdir = os.path.join(containers_dir, 'subdir')
os.mkdir(subdir)
db_file = os.path.join(subdir, 'hash.db')
cb = ContainerBroker(db_file, account='a', container='c')
cb.initialize(normalize_timestamp(1), 0)
timeout = exceptions.LockTimeout(10, db_file)
timeout.cancel()
with mock.patch('swift.container.updater.ContainerBroker.get_info',
side_effect=timeout):
cu.run_once()
log_lines = self.logger.get_lines_for_level('info')
self.assertIn('Failed to get container info (Lock timeout: '
'10 seconds: %s); skipping.' % db_file, log_lines)
示例10: test_error_in_process
def test_error_in_process(self, mock_process, mock_dump_recon):
cu = self._get_container_updater()
containers_dir = os.path.join(self.sda1, DATADIR)
os.mkdir(containers_dir)
subdir = os.path.join(containers_dir, 'subdir')
os.mkdir(subdir)
cb = ContainerBroker(os.path.join(subdir, 'hash.db'), account='a',
container='c', pending_timeout=1)
cb.initialize(normalize_timestamp(1), 0)
cu.run_once()
log_lines = self.logger.get_lines_for_level('error')
self.assertTrue(log_lines)
self.assertIn('Error processing container ', log_lines[0])
self.assertIn('devices/sda1/containers/subdir/hash.db', log_lines[0])
self.assertIn('Boom!', log_lines[0])
self.assertFalse(log_lines[1:])
self.assertEqual(1, len(mock_dump_recon.mock_calls))
示例11: setUp
def setUp(self):
self.testdir = os.path.join(mkdtemp(), 'tmp_test_container_updater')
rmtree(self.testdir, ignore_errors=1)
os.mkdir(self.testdir)
self.devices_dir = os.path.join(self.testdir, 'devices')
os.mkdir(self.devices_dir)
self.sda1 = os.path.join(self.devices_dir, 'sda1')
os.mkdir(self.sda1)
containers_dir = os.path.join(self.sda1, container_server.DATADIR)
os.mkdir(containers_dir)
self.assert_(os.path.exists(containers_dir))
self.subdir = os.path.join(containers_dir, 'subdir')
os.mkdir(self.subdir)
cb = ContainerBroker(os.path.join(self.subdir, 'hash.db'), account='a',
container='c')
cb.initialize(normalize_timestamp(1))
cb.put_object('o', normalize_timestamp(2), 3, 'text/plain',
'68b329da9893e34099c7d8ad5cb9c940')
示例12: container_audit
def container_audit(self, path):
"""
Audits the given container path
:param path: the path to a container db
"""
start_time = time.time()
try:
broker = ContainerBroker(path)
if not broker.is_deleted():
broker.get_info()
self.logger.increment('passes')
self.container_passes += 1
self.logger.debug(_('Audit passed for %s'), broker.db_file)
except (Exception, Timeout):
self.logger.increment('failures')
self.container_failures += 1
self.logger.exception(_('ERROR Could not get container info %s'),
broker.db_file)
self.logger.timing_since('timing', start_time)
示例13: test_container_stat_get_metadata
def test_container_stat_get_metadata(self):
container_db = ContainerBroker("%s/con.db" % self.containers,
account='test_acc', container='test_con')
container_db.initialize(storage_policy_index=0)
container_db.put_object('test_obj', time.time(), 10, 'text', 'faketag')
container_db.update_metadata({'X-Container-Meta-Test1': ('val', 1000)})
self.conf['metadata_keys'] = 'test1,test2'
stat = db_stats_collector.ContainerStatsCollector(self.conf)
info = stat.get_data("%s/con.db" % self.containers)
self.assertEquals('''"test_acc","test_con",1,10,1,\n''', info)
示例14: container_crawl
def container_crawl(self, path):
"""
Crawls the given container path.
:param path: the path to an container db
"""
metaDict = {}
try:
broker = ContainerBroker(path)
if not broker.is_deleted():
#reportedTime = broker.get_info()['put_timestamp']
#if normalize_timestamp(self.crawled_time)
#< reportedTime < normalize_timestamp(start_time):
metaDict = broker.get_info()
metaDict.update(
(key, value)
for key, (value, timestamp) in broker.metadata.iteritems()
if value != '' and is_sys_or_user_meta('container', key))
except (Exception, Timeout):
self.logger.increment('failures')
return metaDict
示例15: test_unicode
def test_unicode(self):
cu = container_updater.ContainerUpdater(
{
"devices": self.devices_dir,
"mount_check": "false",
"swift_dir": self.testdir,
"interval": "1",
"concurrency": "1",
"node_timeout": "15",
}
)
containers_dir = os.path.join(self.sda1, DATADIR)
os.mkdir(containers_dir)
subdir = os.path.join(containers_dir, "subdir")
os.mkdir(subdir)
cb = ContainerBroker(os.path.join(subdir, "hash.db"), account="a", container="\xce\xa9")
cb.initialize(normalize_timestamp(1), 0)
cb.put_object("\xce\xa9", normalize_timestamp(2), 3, "text/plain", "68b329da9893e34099c7d8ad5cb9c940")
def accept(sock, addr):
try:
with Timeout(3):
inc = sock.makefile("rb")
out = sock.makefile("wb")
out.write("HTTP/1.1 201 OK\r\nContent-Length: 0\r\n\r\n")
out.flush()
inc.read()
except BaseException as err:
import traceback
traceback.print_exc()
return err
return None
bindsock = listen(("127.0.0.1", 0))
def spawn_accepts():
events = []
for _junk in range(2):
with Timeout(3):
sock, addr = bindsock.accept()
events.append(spawn(accept, sock, addr))
return events
spawned = spawn(spawn_accepts)
for dev in cu.get_account_ring().devs:
if dev is not None:
dev["port"] = bindsock.getsockname()[1]
cu.run_once()
for event in spawned.wait():
err = event.wait()
if err:
raise err
info = cb.get_info()
self.assertEqual(info["object_count"], 1)
self.assertEqual(info["bytes_used"], 3)
self.assertEqual(info["reported_object_count"], 1)
self.assertEqual(info["reported_bytes_used"], 3)