本文整理汇总了Python中swift.obj.diskfile.DiskFileManager.get_diskfile方法的典型用法代码示例。如果您正苦于以下问题:Python DiskFileManager.get_diskfile方法的具体用法?Python DiskFileManager.get_diskfile怎么用?Python DiskFileManager.get_diskfile使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类swift.obj.diskfile.DiskFileManager
的用法示例。
在下文中一共展示了DiskFileManager.get_diskfile方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _get_object_info
# 需要导入模块: from swift.obj.diskfile import DiskFileManager [as 别名]
# 或者: from swift.obj.diskfile.DiskFileManager import get_diskfile [as 别名]
def _get_object_info(self, account, container, obj, number):
obj_conf = self.configs['object-server']
config_path = obj_conf[number]
options = utils.readconf(config_path, 'app:object-server')
swift_dir = options.get('swift_dir', '/etc/swift')
ring = POLICIES.get_object_ring(int(self.policy), swift_dir)
part, nodes = ring.get_nodes(account, container, obj)
for node in nodes:
# assumes one to one mapping
if node['port'] == int(options.get('bind_port')):
device = node['device']
break
else:
return None
mgr = DiskFileManager(options, get_logger(options))
disk_file = mgr.get_diskfile(device, part, account, container, obj,
self.policy)
info = disk_file.read_metadata()
return info
示例2: TestAuditor
# 需要导入模块: from swift.obj.diskfile import DiskFileManager [as 别名]
# 或者: from swift.obj.diskfile.DiskFileManager import get_diskfile [as 别名]
class TestAuditor(unittest.TestCase):
def setUp(self):
self.testdir = os.path.join(mkdtemp(), 'tmp_test_object_auditor')
self.devices = os.path.join(self.testdir, 'node')
self.rcache = os.path.join(self.testdir, 'object.recon')
self.logger = FakeLogger()
rmtree(self.testdir, ignore_errors=1)
mkdirs(os.path.join(self.devices, 'sda'))
os.mkdir(os.path.join(self.devices, 'sdb'))
# policy 0
self.objects = os.path.join(self.devices, 'sda', get_data_dir(0))
self.objects_2 = os.path.join(self.devices, 'sdb', get_data_dir(0))
os.mkdir(self.objects)
# policy 1
self.objects_p1 = os.path.join(self.devices, 'sda', get_data_dir(1))
self.objects_2_p1 = os.path.join(self.devices, 'sdb', get_data_dir(1))
os.mkdir(self.objects_p1)
self.parts = self.parts_p1 = {}
for part in ['0', '1', '2', '3']:
self.parts[part] = os.path.join(self.objects, part)
self.parts_p1[part] = os.path.join(self.objects_p1, part)
os.mkdir(os.path.join(self.objects, part))
os.mkdir(os.path.join(self.objects_p1, part))
self.conf = dict(
devices=self.devices,
mount_check='false',
object_size_stats='10,100,1024,10240')
self.df_mgr = DiskFileManager(self.conf, self.logger)
# diskfiles for policy 0, 1
self.disk_file = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o', 0)
self.disk_file_p1 = self.df_mgr.get_diskfile('sda', '0', 'a', 'c',
'o', 1)
def tearDown(self):
rmtree(os.path.dirname(self.testdir), ignore_errors=1)
unit.xattr_data = {}
def test_worker_conf_parms(self):
def check_common_defaults():
self.assertEquals(auditor_worker.max_bytes_per_second, 10000000)
self.assertEquals(auditor_worker.log_time, 3600)
# test default values
conf = dict(
devices=self.devices,
mount_check='false',
object_size_stats='10,100,1024,10240')
auditor_worker = auditor.AuditorWorker(conf, self.logger,
self.rcache, self.devices)
check_common_defaults()
self.assertEquals(auditor_worker.diskfile_mgr.disk_chunk_size, 65536)
self.assertEquals(auditor_worker.max_files_per_second, 20)
self.assertEquals(auditor_worker.zero_byte_only_at_fps, 0)
# test specified audit value overrides
conf.update({'disk_chunk_size': 4096})
auditor_worker = auditor.AuditorWorker(conf, self.logger,
self.rcache, self.devices,
zero_byte_only_at_fps=50)
check_common_defaults()
self.assertEquals(auditor_worker.diskfile_mgr.disk_chunk_size, 4096)
self.assertEquals(auditor_worker.max_files_per_second, 50)
self.assertEquals(auditor_worker.zero_byte_only_at_fps, 50)
def test_object_audit_extra_data(self):
def run_tests(disk_file):
auditor_worker = auditor.AuditorWorker(self.conf, self.logger,
self.rcache, self.devices)
data = '0' * 1024
etag = md5()
with disk_file.create() as writer:
writer.write(data)
etag.update(data)
etag = etag.hexdigest()
timestamp = str(normalize_timestamp(time.time()))
metadata = {
'ETag': etag,
'X-Timestamp': timestamp,
'Content-Length': str(os.fstat(writer._fd).st_size),
}
writer.put(metadata)
pre_quarantines = auditor_worker.quarantines
auditor_worker.object_audit(
AuditLocation(disk_file._datadir, 'sda', '0'))
self.assertEquals(auditor_worker.quarantines, pre_quarantines)
os.write(writer._fd, 'extra_data')
auditor_worker.object_audit(
AuditLocation(disk_file._datadir, 'sda', '0'))
self.assertEquals(auditor_worker.quarantines,
pre_quarantines + 1)
run_tests(self.disk_file)
run_tests(self.disk_file_p1)
#.........这里部分代码省略.........
示例3: ObjectController
# 需要导入模块: from swift.obj.diskfile import DiskFileManager [as 别名]
# 或者: from swift.obj.diskfile.DiskFileManager import get_diskfile [as 别名]
class ObjectController(object):
"""Implements the WSGI application for the Swift Object Server."""
def __init__(self, conf, logger=None):
"""
Creates a new WSGI application for the Swift Object Server. An
example configuration is given at
<source-dir>/etc/object-server.conf-sample or
/etc/swift/object-server.conf-sample.
"""
self.logger = logger or get_logger(conf, log_route='object-server')
self.node_timeout = int(conf.get('node_timeout', 3))
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.client_timeout = int(conf.get('client_timeout', 60))
self.disk_chunk_size = int(conf.get('disk_chunk_size', 65536))
self.network_chunk_size = int(conf.get('network_chunk_size', 65536))
self.log_requests = config_true_value(conf.get('log_requests', 'true'))
self.max_upload_time = int(conf.get('max_upload_time', 86400))
self.slow = int(conf.get('slow', 0))
self.keep_cache_private = \
config_true_value(conf.get('keep_cache_private', 'false'))
replication_server = conf.get('replication_server', None)
if replication_server is not None:
replication_server = config_true_value(replication_server)
self.replication_server = replication_server
default_allowed_headers = '''
content-disposition,
content-encoding,
x-delete-at,
x-object-manifest,
x-static-large-object,
'''
extra_allowed_headers = [
header.strip().lower() for header in conf.get(
'allowed_headers', default_allowed_headers).split(',')
if header.strip()
]
self.allowed_headers = set()
for header in extra_allowed_headers:
if header not in DATAFILE_SYSTEM_META:
self.allowed_headers.add(header)
self.expiring_objects_account = \
(conf.get('auto_create_account_prefix') or '.') + \
(conf.get('expiring_objects_account_name') or 'expiring_objects')
self.expiring_objects_container_divisor = \
int(conf.get('expiring_objects_container_divisor') or 86400)
# Initialization was successful, so now apply the network chunk size
# parameter as the default read / write buffer size for the network
# sockets.
#
# NOTE WELL: This is a class setting, so until we get set this on a
# per-connection basis, this affects reading and writing on ALL
# sockets, those between the proxy servers and external clients, and
# those between the proxy servers and the other internal servers.
#
# ** Because the primary motivation for this is to optimize how data
# is written back to the proxy server, we could use the value from the
# disk_chunk_size parameter. However, it affects all created sockets
# using this class so we have chosen to tie it to the
# network_chunk_size parameter value instead.
socket._fileobject.default_bufsize = self.network_chunk_size
# Provide further setup sepecific to an object server implemenation.
self.setup(conf)
def setup(self, conf):
"""
Implementation specific setup. This method is called at the very end
by the constructor to allow a specific implementation to modify
existing attributes or add its own attributes.
:param conf: WSGI configuration parameter
"""
# Common on-disk hierarchy shared across account, container and object
# servers.
self._diskfile_mgr = DiskFileManager(conf, self.logger)
# This is populated by global_conf_callback way below as the semaphore
# is shared by all workers.
if 'replication_semaphore' in conf:
# The value was put in a list so it could get past paste
self.replication_semaphore = conf['replication_semaphore'][0]
else:
self.replication_semaphore = None
self.replication_failure_threshold = int(
conf.get('replication_failure_threshold') or 100)
self.replication_failure_ratio = float(
conf.get('replication_failure_ratio') or 1.0)
def get_diskfile(self, device, partition, account, container, obj,
**kwargs):
"""
Utility method for instantiating a DiskFile object supporting a given
REST API.
An implementation of the object server that wants to use a different
DiskFile class would simply over-ride this method to provide that
behavior.
"""
#.........这里部分代码省略.........
示例4: TestAuditor
# 需要导入模块: from swift.obj.diskfile import DiskFileManager [as 别名]
# 或者: from swift.obj.diskfile.DiskFileManager import get_diskfile [as 别名]
class TestAuditor(unittest.TestCase):
def setUp(self):
self.testdir = os.path.join(mkdtemp(), 'tmp_test_object_auditor')
self.devices = os.path.join(self.testdir, 'node')
self.rcache = os.path.join(self.testdir, 'object.recon')
self.logger = FakeLogger()
rmtree(self.testdir, ignore_errors=1)
mkdirs(os.path.join(self.devices, 'sda'))
self.objects = os.path.join(self.devices, 'sda', 'objects')
os.mkdir(os.path.join(self.devices, 'sdb'))
self.objects_2 = os.path.join(self.devices, 'sdb', 'objects')
os.mkdir(self.objects)
self.parts = {}
for part in ['0', '1', '2', '3']:
self.parts[part] = os.path.join(self.objects, part)
os.mkdir(os.path.join(self.objects, part))
self.conf = dict(
devices=self.devices,
mount_check='false',
object_size_stats='10,100,1024,10240')
self.df_mgr = DiskFileManager(self.conf, self.logger)
self.disk_file = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o')
def tearDown(self):
rmtree(os.path.dirname(self.testdir), ignore_errors=1)
unit.xattr_data = {}
def test_object_audit_extra_data(self):
auditor_worker = auditor.AuditorWorker(self.conf, self.logger,
self.rcache, self.devices)
data = '0' * 1024
etag = md5()
with self.disk_file.create() as writer:
writer.write(data)
etag.update(data)
etag = etag.hexdigest()
timestamp = str(normalize_timestamp(time.time()))
metadata = {
'ETag': etag,
'X-Timestamp': timestamp,
'Content-Length': str(os.fstat(writer._fd).st_size),
}
writer.put(metadata)
pre_quarantines = auditor_worker.quarantines
auditor_worker.object_audit(
AuditLocation(self.disk_file._datadir, 'sda', '0'))
self.assertEquals(auditor_worker.quarantines, pre_quarantines)
os.write(writer._fd, 'extra_data')
auditor_worker.object_audit(
AuditLocation(self.disk_file._datadir, 'sda', '0'))
self.assertEquals(auditor_worker.quarantines, pre_quarantines + 1)
def test_object_audit_diff_data(self):
auditor_worker = auditor.AuditorWorker(self.conf, self.logger,
self.rcache, self.devices)
data = '0' * 1024
etag = md5()
timestamp = str(normalize_timestamp(time.time()))
with self.disk_file.create() as writer:
writer.write(data)
etag.update(data)
etag = etag.hexdigest()
metadata = {
'ETag': etag,
'X-Timestamp': timestamp,
'Content-Length': str(os.fstat(writer._fd).st_size),
}
writer.put(metadata)
pre_quarantines = auditor_worker.quarantines
# remake so it will have metadata
self.disk_file = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o')
auditor_worker.object_audit(
AuditLocation(self.disk_file._datadir, 'sda', '0'))
self.assertEquals(auditor_worker.quarantines, pre_quarantines)
etag = md5()
etag.update('1' + '0' * 1023)
etag = etag.hexdigest()
metadata['ETag'] = etag
with self.disk_file.create() as writer:
writer.write(data)
writer.put(metadata)
auditor_worker.object_audit(
AuditLocation(self.disk_file._datadir, 'sda', '0'))
self.assertEquals(auditor_worker.quarantines, pre_quarantines + 1)
def test_object_audit_no_meta(self):
timestamp = str(normalize_timestamp(time.time()))
path = os.path.join(self.disk_file._datadir, timestamp + '.data')
mkdirs(self.disk_file._datadir)
fp = open(path, 'w')
#.........这里部分代码省略.........
示例5: TestAuditor
# 需要导入模块: from swift.obj.diskfile import DiskFileManager [as 别名]
# 或者: from swift.obj.diskfile.DiskFileManager import get_diskfile [as 别名]
class TestAuditor(unittest.TestCase):
def setUp(self):
self.testdir = os.path.join(mkdtemp(), 'tmp_test_object_auditor')
self.devices = os.path.join(self.testdir, 'node')
self.rcache = os.path.join(self.testdir, 'object.recon')
self.logger = FakeLogger()
rmtree(self.testdir, ignore_errors=1)
mkdirs(os.path.join(self.devices, 'sda'))
os.mkdir(os.path.join(self.devices, 'sdb'))
# policy 0
self.objects = os.path.join(self.devices, 'sda',
get_data_dir(POLICIES[0]))
self.objects_2 = os.path.join(self.devices, 'sdb',
get_data_dir(POLICIES[0]))
os.mkdir(self.objects)
# policy 1
self.objects_p1 = os.path.join(self.devices, 'sda',
get_data_dir(POLICIES[1]))
self.objects_2_p1 = os.path.join(self.devices, 'sdb',
get_data_dir(POLICIES[1]))
os.mkdir(self.objects_p1)
# policy 2
self.objects_p2 = os.path.join(self.devices, 'sda',
get_data_dir(POLICIES[2]))
self.objects_2_p2 = os.path.join(self.devices, 'sdb',
get_data_dir(POLICIES[2]))
os.mkdir(self.objects_p2)
self.parts = {}
self.parts_p1 = {}
self.parts_p2 = {}
for part in ['0', '1', '2', '3']:
self.parts[part] = os.path.join(self.objects, part)
self.parts_p1[part] = os.path.join(self.objects_p1, part)
self.parts_p2[part] = os.path.join(self.objects_p2, part)
os.mkdir(os.path.join(self.objects, part))
os.mkdir(os.path.join(self.objects_p1, part))
os.mkdir(os.path.join(self.objects_p2, part))
self.conf = dict(
devices=self.devices,
mount_check='false',
object_size_stats='10,100,1024,10240')
self.df_mgr = DiskFileManager(self.conf, self.logger)
self.ec_df_mgr = ECDiskFileManager(self.conf, self.logger)
# diskfiles for policy 0, 1, 2
self.disk_file = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o',
policy=POLICIES[0])
self.disk_file_p1 = self.df_mgr.get_diskfile('sda', '0', 'a', 'c',
'o', policy=POLICIES[1])
self.disk_file_ec = self.ec_df_mgr.get_diskfile(
'sda', '0', 'a', 'c', 'o', policy=POLICIES[2], frag_index=1)
def tearDown(self):
rmtree(os.path.dirname(self.testdir), ignore_errors=1)
unit.xattr_data = {}
def test_worker_conf_parms(self):
def check_common_defaults():
self.assertEqual(auditor_worker.max_bytes_per_second, 10000000)
self.assertEqual(auditor_worker.log_time, 3600)
# test default values
conf = dict(
devices=self.devices,
mount_check='false',
object_size_stats='10,100,1024,10240')
auditor_worker = auditor.AuditorWorker(conf, self.logger,
self.rcache, self.devices)
check_common_defaults()
for policy in POLICIES:
mgr = auditor_worker.diskfile_router[policy]
self.assertEqual(mgr.disk_chunk_size, 65536)
self.assertEqual(auditor_worker.max_files_per_second, 20)
self.assertEqual(auditor_worker.zero_byte_only_at_fps, 0)
# test specified audit value overrides
conf.update({'disk_chunk_size': 4096})
auditor_worker = auditor.AuditorWorker(conf, self.logger,
self.rcache, self.devices,
zero_byte_only_at_fps=50)
check_common_defaults()
for policy in POLICIES:
mgr = auditor_worker.diskfile_router[policy]
self.assertEqual(mgr.disk_chunk_size, 4096)
self.assertEqual(auditor_worker.max_files_per_second, 50)
self.assertEqual(auditor_worker.zero_byte_only_at_fps, 50)
def test_object_audit_extra_data(self):
def run_tests(disk_file):
auditor_worker = auditor.AuditorWorker(self.conf, self.logger,
self.rcache, self.devices)
data = '0' * 1024
etag = md5()
with disk_file.create() as writer:
writer.write(data)
etag.update(data)
#.........这里部分代码省略.........
示例6: TestAuditor
# 需要导入模块: from swift.obj.diskfile import DiskFileManager [as 别名]
# 或者: from swift.obj.diskfile.DiskFileManager import get_diskfile [as 别名]
class TestAuditor(unittest.TestCase):
def setUp(self):
self.testdir = os.path.join(mkdtemp(), "tmp_test_object_auditor")
self.devices = os.path.join(self.testdir, "node")
self.rcache = os.path.join(self.testdir, "object.recon")
self.logger = FakeLogger()
rmtree(self.testdir, ignore_errors=1)
mkdirs(os.path.join(self.devices, "sda"))
self.objects = os.path.join(self.devices, "sda", "objects")
os.mkdir(os.path.join(self.devices, "sdb"))
self.objects_2 = os.path.join(self.devices, "sdb", "objects")
os.mkdir(self.objects)
self.parts = {}
for part in ["0", "1", "2", "3"]:
self.parts[part] = os.path.join(self.objects, part)
os.mkdir(os.path.join(self.objects, part))
self.conf = dict(devices=self.devices, mount_check="false", object_size_stats="10,100,1024,10240")
self.df_mgr = DiskFileManager(self.conf, self.logger)
self.disk_file = self.df_mgr.get_diskfile("sda", "0", "a", "c", "o")
def tearDown(self):
rmtree(os.path.dirname(self.testdir), ignore_errors=1)
unit.xattr_data = {}
def test_object_audit_extra_data(self):
auditor_worker = auditor.AuditorWorker(self.conf, self.logger, self.rcache, self.devices)
data = "0" * 1024
etag = md5()
with self.disk_file.create() as writer:
writer.write(data)
etag.update(data)
etag = etag.hexdigest()
timestamp = str(normalize_timestamp(time.time()))
metadata = {"ETag": etag, "X-Timestamp": timestamp, "Content-Length": str(os.fstat(writer._fd).st_size)}
writer.put(metadata)
pre_quarantines = auditor_worker.quarantines
auditor_worker.object_audit(AuditLocation(self.disk_file._datadir, "sda", "0"))
self.assertEquals(auditor_worker.quarantines, pre_quarantines)
os.write(writer._fd, "extra_data")
auditor_worker.object_audit(AuditLocation(self.disk_file._datadir, "sda", "0"))
self.assertEquals(auditor_worker.quarantines, pre_quarantines + 1)
def test_object_audit_diff_data(self):
auditor_worker = auditor.AuditorWorker(self.conf, self.logger, self.rcache, self.devices)
data = "0" * 1024
etag = md5()
timestamp = str(normalize_timestamp(time.time()))
with self.disk_file.create() as writer:
writer.write(data)
etag.update(data)
etag = etag.hexdigest()
metadata = {"ETag": etag, "X-Timestamp": timestamp, "Content-Length": str(os.fstat(writer._fd).st_size)}
writer.put(metadata)
pre_quarantines = auditor_worker.quarantines
# remake so it will have metadata
self.disk_file = self.df_mgr.get_diskfile("sda", "0", "a", "c", "o")
auditor_worker.object_audit(AuditLocation(self.disk_file._datadir, "sda", "0"))
self.assertEquals(auditor_worker.quarantines, pre_quarantines)
etag = md5()
etag.update("1" + "0" * 1023)
etag = etag.hexdigest()
metadata["ETag"] = etag
with self.disk_file.create() as writer:
writer.write(data)
writer.put(metadata)
auditor_worker.object_audit(AuditLocation(self.disk_file._datadir, "sda", "0"))
self.assertEquals(auditor_worker.quarantines, pre_quarantines + 1)
def test_object_audit_no_meta(self):
timestamp = str(normalize_timestamp(time.time()))
path = os.path.join(self.disk_file._datadir, timestamp + ".data")
mkdirs(self.disk_file._datadir)
fp = open(path, "w")
fp.write("0" * 1024)
fp.close()
invalidate_hash(os.path.dirname(self.disk_file._datadir))
auditor_worker = auditor.AuditorWorker(self.conf, self.logger, self.rcache, self.devices)
pre_quarantines = auditor_worker.quarantines
auditor_worker.object_audit(AuditLocation(self.disk_file._datadir, "sda", "0"))
self.assertEquals(auditor_worker.quarantines, pre_quarantines + 1)
def test_object_audit_will_not_swallow_errors_in_tests(self):
timestamp = str(normalize_timestamp(time.time()))
path = os.path.join(self.disk_file._datadir, timestamp + ".data")
mkdirs(self.disk_file._datadir)
with open(path, "w") as f:
write_metadata(f, {"name": "/a/c/o"})
auditor_worker = auditor.AuditorWorker(self.conf, self.logger, self.rcache, self.devices)
def blowup(*args):
raise NameError("tpyo")
#.........这里部分代码省略.........
示例7: TestAuditor
# 需要导入模块: from swift.obj.diskfile import DiskFileManager [as 别名]
# 或者: from swift.obj.diskfile.DiskFileManager import get_diskfile [as 别名]
class TestAuditor(unittest.TestCase):
def setUp(self):
self.testdir = os.path.join(mkdtemp(), 'tmp_test_object_auditor')
self.devices = os.path.join(self.testdir, 'node')
self.logger = FakeLogger()
rmtree(self.testdir, ignore_errors=1)
mkdirs(os.path.join(self.devices, 'sda'))
self.objects = os.path.join(self.devices, 'sda', 'objects')
os.mkdir(os.path.join(self.devices, 'sdb'))
self.objects_2 = os.path.join(self.devices, 'sdb', 'objects')
os.mkdir(self.objects)
self.parts = {}
for part in ['0', '1', '2', '3']:
self.parts[part] = os.path.join(self.objects, part)
os.mkdir(os.path.join(self.objects, part))
self.conf = dict(
devices=self.devices,
mount_check='false',
object_size_stats='10,100,1024,10240')
self.df_mgr = DiskFileManager(self.conf, self.logger)
self.disk_file = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o')
def tearDown(self):
rmtree(os.path.dirname(self.testdir), ignore_errors=1)
unit.xattr_data = {}
def test_object_audit_extra_data(self):
auditor_worker = auditor.AuditorWorker(self.conf, self.logger)
data = '0' * 1024
etag = md5()
with self.disk_file.create() as writer:
writer.write(data)
etag.update(data)
etag = etag.hexdigest()
timestamp = str(normalize_timestamp(time.time()))
metadata = {
'ETag': etag,
'X-Timestamp': timestamp,
'Content-Length': str(os.fstat(writer._fd).st_size),
}
writer.put(metadata)
pre_quarantines = auditor_worker.quarantines
auditor_worker.object_audit(
os.path.join(self.disk_file._datadir, timestamp + '.data'),
'sda', '0')
self.assertEquals(auditor_worker.quarantines, pre_quarantines)
os.write(writer._fd, 'extra_data')
auditor_worker.object_audit(
os.path.join(self.disk_file._datadir, timestamp + '.data'),
'sda', '0')
self.assertEquals(auditor_worker.quarantines, pre_quarantines + 1)
def test_object_audit_diff_data(self):
auditor_worker = auditor.AuditorWorker(self.conf, self.logger)
data = '0' * 1024
etag = md5()
timestamp = str(normalize_timestamp(time.time()))
with self.disk_file.create() as writer:
writer.write(data)
etag.update(data)
etag = etag.hexdigest()
metadata = {
'ETag': etag,
'X-Timestamp': timestamp,
'Content-Length': str(os.fstat(writer._fd).st_size),
}
writer.put(metadata)
pre_quarantines = auditor_worker.quarantines
# remake so it will have metadata
self.disk_file = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o')
auditor_worker.object_audit(
os.path.join(self.disk_file._datadir, timestamp + '.data'),
'sda', '0')
self.assertEquals(auditor_worker.quarantines, pre_quarantines)
etag = md5()
etag.update('1' + '0' * 1023)
etag = etag.hexdigest()
metadata['ETag'] = etag
with self.disk_file.create() as writer:
writer.write(data)
writer.put(metadata)
auditor_worker.object_audit(
os.path.join(self.disk_file._datadir, timestamp + '.data'),
'sda', '0')
self.assertEquals(auditor_worker.quarantines, pre_quarantines + 1)
def test_object_audit_no_meta(self):
timestamp = str(normalize_timestamp(time.time()))
path = os.path.join(self.disk_file._datadir, timestamp + '.data')
mkdirs(self.disk_file._datadir)
#.........这里部分代码省略.........
示例8: MetadataController
# 需要导入模块: from swift.obj.diskfile import DiskFileManager [as 别名]
# 或者: from swift.obj.diskfile.DiskFileManager import get_diskfile [as 别名]
#.........这里部分代码省略.........
#TODO: move kwargs
kwargs = {'account':acc, 'container':con, 'logger':self.logger}
md_broker= swift.container.backend.ContainerBroker(path, **kwargs)
md = md_broker.get_info()
md.update(
(key, value)
for key, (value, timestamp) in md_broker.metadata.iteritems()
if value != '' and is_sys_or_user_meta('container', key))
sys_md = format_con_metadata(md)
user_md = format_custom_metadata(md)
if 'X-Container-Read' in req.headers:
sys_md['container_read_permissions'] = req.headers['X-Container-Read']
if 'X-Container-Write' in req.headers:
sys_md['container_write_permissions'] = req.headers['X-Container-Write']
#TODO: insert container_last_activity_time
#TODO: split meta user/sys
#TODO: insert meta
self.broker.insert_container_md([sys_md])
return
except DatabaseConnectionError as e:
self.logger.warn("DatabaseConnectionError: " + e.path + "\n")
pass
except:
self.logger.warn("%s: %s\n"%(str(sys.exc_info()[0]),str(sys.exc_info()[1])))
pass
#handle object PUT
else:
part = ring.get_part(acc, con, obj)
nodes = ring.get_part_nodes(part)
for node in nodes:
for item in self.devicelist:
if node['device'] in item:
try:
df = self.diskfile_mgr.get_diskfile(item, part, acc, con, obj, stor_policy)
md = df.read_metadata()
sys_md = format_obj_metadata(md)
#df._data_file is a direct path to the objects data
sys_md['object_location'] = df._data_file
user_md = format_custom_metadata(md)
#TODO: insert user meta and sys meta
self.broker.insert_object_md([sys_md])
except:
self.logger.warn("%s: %s\n"%(str(sys.exc_info()[0]),str(sys.exc_info()[1])))
pass
return
@public
@timing_stats()
def DELETE(self, req):
version, acc, con, obj = split_path(req.path, 1, 4, True)
timestamp = Timestamp(time.time()).isoformat()
data_type = ''
md = {}
if not con and not obj:
#do nothing. accounts cannot be deleted
return
elif not obj:
md = format_con_metadata(md)
md['container_delete_time'] = timestamp
md['container_last_activity_time'] = timestamp
data_type = 'container'
for item in \
(data_type + '_uri', data_type + '_name'):
if item in md:
del md[item]
#TODO: overwrite container metadata
示例9: ObjectController
# 需要导入模块: from swift.obj.diskfile import DiskFileManager [as 别名]
# 或者: from swift.obj.diskfile.DiskFileManager import get_diskfile [as 别名]
class ObjectController(BaseStorageServer):
"""Implements the WSGI application for the Swift Object Server."""
def __init__(self, conf, logger=None):
"""
Creates a new WSGI application for the Swift Object Server. An
example configuration is given at
<source-dir>/etc/object-server.conf-sample or
/etc/swift/object-server.conf-sample.
"""
super(ObjectController, self).__init__(conf)
self.logger = logger or get_logger(conf, log_route="object-server")
self.node_timeout = int(conf.get("node_timeout", 3))
self.conn_timeout = float(conf.get("conn_timeout", 0.5))
self.client_timeout = int(conf.get("client_timeout", 60))
self.disk_chunk_size = int(conf.get("disk_chunk_size", 65536))
self.network_chunk_size = int(conf.get("network_chunk_size", 65536))
self.log_requests = config_true_value(conf.get("log_requests", "true"))
self.max_upload_time = int(conf.get("max_upload_time", 86400))
self.slow = int(conf.get("slow", 0))
self.keep_cache_private = config_true_value(conf.get("keep_cache_private", "false"))
default_allowed_headers = """
content-disposition,
content-encoding,
x-delete-at,
x-object-manifest,
x-static-large-object,
"""
extra_allowed_headers = [
header.strip().lower()
for header in conf.get("allowed_headers", default_allowed_headers).split(",")
if header.strip()
]
self.allowed_headers = set()
for header in extra_allowed_headers:
if header not in DATAFILE_SYSTEM_META:
self.allowed_headers.add(header)
self.auto_create_account_prefix = conf.get("auto_create_account_prefix") or "."
self.expiring_objects_account = self.auto_create_account_prefix + (
conf.get("expiring_objects_account_name") or "expiring_objects"
)
self.expiring_objects_container_divisor = int(conf.get("expiring_objects_container_divisor") or 86400)
# Initialization was successful, so now apply the network chunk size
# parameter as the default read / write buffer size for the network
# sockets.
#
# NOTE WELL: This is a class setting, so until we get set this on a
# per-connection basis, this affects reading and writing on ALL
# sockets, those between the proxy servers and external clients, and
# those between the proxy servers and the other internal servers.
#
# ** Because the primary motivation for this is to optimize how data
# is written back to the proxy server, we could use the value from the
# disk_chunk_size parameter. However, it affects all created sockets
# using this class so we have chosen to tie it to the
# network_chunk_size parameter value instead.
socket._fileobject.default_bufsize = self.network_chunk_size
# Provide further setup specific to an object server implementation.
self.setup(conf)
def setup(self, conf):
"""
Implementation specific setup. This method is called at the very end
by the constructor to allow a specific implementation to modify
existing attributes or add its own attributes.
:param conf: WSGI configuration parameter
"""
# Common on-disk hierarchy shared across account, container and object
# servers.
self._diskfile_mgr = DiskFileManager(conf, self.logger)
# This is populated by global_conf_callback way below as the semaphore
# is shared by all workers.
if "replication_semaphore" in conf:
# The value was put in a list so it could get past paste
self.replication_semaphore = conf["replication_semaphore"][0]
else:
self.replication_semaphore = None
self.replication_failure_threshold = int(conf.get("replication_failure_threshold") or 100)
self.replication_failure_ratio = float(conf.get("replication_failure_ratio") or 1.0)
def get_diskfile(self, device, partition, account, container, obj, policy_idx, **kwargs):
"""
Utility method for instantiating a DiskFile object supporting a given
REST API.
An implementation of the object server that wants to use a different
DiskFile class would simply over-ride this method to provide that
behavior.
"""
return self._diskfile_mgr.get_diskfile(device, partition, account, container, obj, policy_idx, **kwargs)
def async_update(
self, op, account, container, obj, host, partition, contdevice, headers_out, objdevice, policy_index
):
"""
Sends or saves an async update.
#.........这里部分代码省略.........
示例10: RestoreMiddleware
# 需要导入模块: from swift.obj.diskfile import DiskFileManager [as 别名]
# 或者: from swift.obj.diskfile.DiskFileManager import get_diskfile [as 别名]
class RestoreMiddleware(object):
def __init__(self, app, conf, *args, **kwargs):
self.app = app
self.conf = conf
self.logger = get_logger(self.conf, log_route='restore')
self._diskfile_mgr = DiskFileManager(conf, self.logger)
def __call__(self, env, start_response):
req = Request(env)
if (req.method == 'PUT') or (req.method == 'POST'):
if 'X-Object-Meta-S3-Restored' in req.headers:
return self.save_object(env)(env, start_response)
if 'X-Object-Meta-S3-Restore' in req.headers:
return self.set_restoring(env)(env, start_response)
return self.app(env, start_response)
def _split_request_path(self, req):
self.device, self.partition, self.account, self.container, \
self.obj = split_and_validate_path(req, 5, 5, True)
def save_object(self, env):
# Restorer 데몬에 의해 호출됨
req = Request(env)
self._split_request_path(req)
try:
disk_file = self.get_diskfile(self.device, self.partition,
self.account, self.container,
self.obj)
except DiskFileDeviceUnavailable:
return HTTPInsufficientStorage(drive=self.device,
request=Request(env))
ori_meta = disk_file.read_metadata()
metadata = {}
metadata.update(val for val in req.headers.iteritems()
if is_user_meta('object', val[0]))
del metadata['X-Object-Meta-S3-Restored']
# Timestamp 값 유지
metadata['X-Timestamp'] = ori_meta['X-Timestamp']
metadata['Content-Type'] = ori_meta['Content-Type']
fsize = req.message_length()
etag = md5()
try:
with disk_file.create(size=fsize) as writer:
def timeout_reader():
with ChunkReadTimeout(60):
return req.environ['wsgi.input'].read(65536)
try:
for chunk in iter(lambda: timeout_reader(), ''):
etag.update(chunk)
writer.write(chunk)
except ChunkReadTimeout:
return HTTPRequestTimeout(request=req)
etag = etag.hexdigest()
metadata['ETag'] = etag
metadata['Content-Length'] = str(fsize)
writer.put(metadata)
except DiskFileNoSpace:
return HTTPInsufficientStorage(drive=self.device, request=req)
return HTTPCreated(request=req, etag=etag)
def set_restoring(self, env):
# Lifecycle Middleware 에서 restore 중이라고 object 를 설정할 때 호출됨
req = Request(env)
self._split_request_path(req)
try:
disk_file = self.get_diskfile(self.device, self.partition,
self.account, self.container,
self.obj)
except DiskFileDeviceUnavailable:
return HTTPInsufficientStorage(drive=self.device,
request=Request(env))
ori_meta = disk_file.read_metadata()
metadata = ori_meta
metadata.update(val for val in req.headers.iteritems()
if is_user_meta('object', val[0]))
# Timestamp 값 유지
with disk_file.create(size=0) as writer:
writer.put(metadata)
return HTTPCreated(request=req, etag=ori_meta['ETag'])
def get_diskfile(self, device, partition, account, container, obj,
**kwargs):
return self._diskfile_mgr.get_diskfile(device, partition, account,
container, obj, **kwargs)
示例11: ObjectController
# 需要导入模块: from swift.obj.diskfile import DiskFileManager [as 别名]
# 或者: from swift.obj.diskfile.DiskFileManager import get_diskfile [as 别名]
class ObjectController(BaseStorageServer):
"""Implements the WSGI application for the Swift Object Server."""
server_type = 'object-server'
def __init__(self, conf, logger=None):
"""
Creates a new WSGI application for the Swift Object Server. An
example configuration is given at
<source-dir>/etc/object-server.conf-sample or
/etc/swift/object-server.conf-sample.
"""
super(ObjectController, self).__init__(conf)
self.logger = logger or get_logger(conf, log_route='object-server')
self.node_timeout = int(conf.get('node_timeout', 3))
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.client_timeout = int(conf.get('client_timeout', 60))
self.disk_chunk_size = int(conf.get('disk_chunk_size', 65536))
self.network_chunk_size = int(conf.get('network_chunk_size', 65536))
self.log_requests = config_true_value(conf.get('log_requests', 'true'))
self.max_upload_time = int(conf.get('max_upload_time', 86400))
self.slow = int(conf.get('slow', 0))
self.keep_cache_private = \
config_true_value(conf.get('keep_cache_private', 'false'))
default_allowed_headers = '''
content-disposition,
content-encoding,
x-delete-at,
x-object-manifest,
x-static-large-object,
'''
extra_allowed_headers = [
header.strip().lower() for header in conf.get(
'allowed_headers', default_allowed_headers).split(',')
if header.strip()
]
self.allowed_headers = set()
for header in extra_allowed_headers:
if header not in DATAFILE_SYSTEM_META:
self.allowed_headers.add(header)
self.auto_create_account_prefix = \
conf.get('auto_create_account_prefix') or '.'
self.expiring_objects_account = self.auto_create_account_prefix + \
(conf.get('expiring_objects_account_name') or 'expiring_objects')
self.expiring_objects_container_divisor = \
int(conf.get('expiring_objects_container_divisor') or 86400)
# Initialization was successful, so now apply the network chunk size
# parameter as the default read / write buffer size for the network
# sockets.
#
# NOTE WELL: This is a class setting, so until we get set this on a
# per-connection basis, this affects reading and writing on ALL
# sockets, those between the proxy servers and external clients, and
# those between the proxy servers and the other internal servers.
#
# ** Because the primary motivation for this is to optimize how data
# is written back to the proxy server, we could use the value from the
# disk_chunk_size parameter. However, it affects all created sockets
# using this class so we have chosen to tie it to the
# network_chunk_size parameter value instead.
socket._fileobject.default_bufsize = self.network_chunk_size
# Provide further setup specific to an object server implementation.
self.setup(conf)
def setup(self, conf):
"""
Implementation specific setup. This method is called at the very end
by the constructor to allow a specific implementation to modify
existing attributes or add its own attributes.
:param conf: WSGI configuration parameter
"""
# Common on-disk hierarchy shared across account, container and object
# servers.
self._diskfile_mgr = DiskFileManager(conf, self.logger)
# This is populated by global_conf_callback way below as the semaphore
# is shared by all workers.
if 'replication_semaphore' in conf:
# The value was put in a list so it could get past paste
self.replication_semaphore = conf['replication_semaphore'][0]
else:
self.replication_semaphore = None
self.replication_failure_threshold = int(
conf.get('replication_failure_threshold') or 100)
self.replication_failure_ratio = float(
conf.get('replication_failure_ratio') or 1.0)
def get_diskfile(self, device, partition, account, container, obj,
policy_idx, **kwargs):
"""
Utility method for instantiating a DiskFile object supporting a given
REST API.
An implementation of the object server that wants to use a different
DiskFile class would simply over-ride this method to provide that
behavior.
"""
#.........这里部分代码省略.........
示例12: ObjectController
# 需要导入模块: from swift.obj.diskfile import DiskFileManager [as 别名]
# 或者: from swift.obj.diskfile.DiskFileManager import get_diskfile [as 别名]
class ObjectController(object):
"""Implements the WSGI application for the Swift Object Server."""
def __init__(self, conf, logger=None):
"""
Creates a new WSGI application for the Swift Object Server. An
example configuration is given at
<source-dir>/etc/object-server.conf-sample or
/etc/swift/object-server.conf-sample.
"""
self.logger = logger or get_logger(conf, log_route="object-server")
self.node_timeout = int(conf.get("node_timeout", 3))
self.conn_timeout = float(conf.get("conn_timeout", 0.5))
self.client_timeout = int(conf.get("client_timeout", 60))
self.disk_chunk_size = int(conf.get("disk_chunk_size", 65536))
self.network_chunk_size = int(conf.get("network_chunk_size", 65536))
self.log_requests = config_true_value(conf.get("log_requests", "true"))
self.max_upload_time = int(conf.get("max_upload_time", 86400))
self.slow = int(conf.get("slow", 0))
self.keep_cache_private = config_true_value(conf.get("keep_cache_private", "false"))
replication_server = conf.get("replication_server", None)
if replication_server is not None:
replication_server = config_true_value(replication_server)
self.replication_server = replication_server
default_allowed_headers = """
content-disposition,
content-encoding,
x-delete-at,
x-object-manifest,
x-static-large-object,
"""
extra_allowed_headers = [
header.strip().lower()
for header in conf.get("allowed_headers", default_allowed_headers).split(",")
if header.strip()
]
self.allowed_headers = set()
for header in extra_allowed_headers:
if header not in DATAFILE_SYSTEM_META:
self.allowed_headers.add(header)
self.expiring_objects_account = (conf.get("auto_create_account_prefix") or ".") + "expiring_objects"
self.expiring_objects_container_divisor = int(conf.get("expiring_objects_container_divisor") or 86400)
# Initialization was successful, so now apply the network chunk size
# parameter as the default read / write buffer size for the network
# sockets.
#
# NOTE WELL: This is a class setting, so until we get set this on a
# per-connection basis, this affects reading and writing on ALL
# sockets, those between the proxy servers and external clients, and
# those between the proxy servers and the other internal servers.
#
# ** Because the primary motivation for this is to optimize how data
# is written back to the proxy server, we could use the value from the
# disk_chunk_size parameter. However, it affects all created sockets
# using this class so we have chosen to tie it to the
# network_chunk_size parameter value instead.
socket._fileobject.default_bufsize = self.network_chunk_size
# Provide further setup sepecific to an object server implemenation.
self.setup(conf)
def setup(self, conf):
"""
Implementation specific setup. This method is called at the very end
by the constructor to allow a specific implementation to modify
existing attributes or add its own attributes.
:param conf: WSGI configuration parameter
"""
# Common on-disk hierarchy shared across account, container and object
# servers.
self._diskfile_mgr = DiskFileManager(conf, self.logger)
# This is populated by global_conf_callback way below as the semaphore
# is shared by all workers.
if "replication_semaphore" in conf:
# The value was put in a list so it could get past paste
self.replication_semaphore = conf["replication_semaphore"][0]
else:
self.replication_semaphore = None
self.replication_failure_threshold = int(conf.get("replication_failure_threshold") or 100)
self.replication_failure_ratio = float(conf.get("replication_failure_ratio") or 1.0)
def get_diskfile(self, device, partition, account, container, obj, **kwargs):
"""
Utility method for instantiating a DiskFile object supporting a given
REST API.
An implementation of the object server that wants to use a different
DiskFile class would simply over-ride this method to provide that
behavior.
"""
return self._diskfile_mgr.get_diskfile(device, partition, account, container, obj, **kwargs)
def async_update(self, op, account, container, obj, host, partition, contdevice, headers_out, objdevice):
"""
Sends or saves an async update.
:param op: operation performed (ex: 'PUT', or 'DELETE')
#.........这里部分代码省略.........
示例13: TruncateMiddleware
# 需要导入模块: from swift.obj.diskfile import DiskFileManager [as 别名]
# 或者: from swift.obj.diskfile.DiskFileManager import get_diskfile [as 别名]
class TruncateMiddleware(object):
def __init__(self, app, conf, *args, **kwargs):
self.app = app
self.conf = conf
self.logger = get_logger(self.conf, log_route='truncate')
self._diskfile_mgr = DiskFileManager(conf, self.logger)
def truncate(self, env):
req = Request(env)
try:
disk_file = self.get_diskfile(self.device, self.partition,
self.account, self.container,
self.obj)
except DiskFileDeviceUnavailable:
return HTTPInsufficientStorage(drive=self.device,
request=Request(copy(env)))
# object flow 상, 임시 데이터를 삭제 후 DiskFileWrite 의 put을 하게 되면,
# _finalize_put을 호출하게 된다. 이 때, metadata에 설정된 X-Timestamp 값으로
# object 파일명을 생성하고, 임시 파일로 대체한다.
# 따라서 별 다른 truncate를 할 필요가 없다.
ori_meta = disk_file.read_metadata()
metadata = {
'X-Timestamp': ori_meta['X-Timestamp'],
'Content-Type': ori_meta['Content-Type'],
'ETag': 'd41d8cd98f00b204e9800998ecf8427e',
'Content-Length': 0,
'X-Object-Meta-Glacier': True,
'X-Object-Meta-S3-Content-Length': ori_meta['Content-Length'],
'X-Object-Meta-S3-ETag': ori_meta['ETag']
}
# 원본 Object Metatdata 도 저장
metadata.update(val for val in ori_meta.iteritems()
if is_user_meta('object', val[0]))
# Object Restore 정보가 있으면 해당 정보 지움.
# 이 경우는 restored object expiration 임.
if 'X-Object-Meta-S3-Restore' in metadata:
del metadata['X-Object-Meta-S3-Restore']
with disk_file.create(size=0) as writer:
writer.put(metadata)
return HTTPCreated(request=req, etag=ori_meta['ETag'])
def get_diskfile(self, device, partition, account, container, obj,
**kwargs):
return self._diskfile_mgr.get_diskfile(device, partition, account,
container, obj, **kwargs)
def __call__(self, env, start_response):
req = Request(copy(env))
method = req.method
if (method == 'PUT' or method == 'POST') and \
GLACIER_FLAG_META in req.headers:
self.device, self.partition, self.account, self.container, \
self.obj = split_and_validate_path(req, 5, 5, True)
return self.truncate(env)(env, start_response)
return self.app(env, start_response)
示例14: TestAuditor
# 需要导入模块: from swift.obj.diskfile import DiskFileManager [as 别名]
# 或者: from swift.obj.diskfile.DiskFileManager import get_diskfile [as 别名]
class TestAuditor(unittest.TestCase):
def setUp(self):
self.testdir = os.path.join(mkdtemp(), "tmp_test_object_auditor")
self.devices = os.path.join(self.testdir, "node")
self.rcache = os.path.join(self.testdir, "object.recon")
self.logger = FakeLogger()
rmtree(self.testdir, ignore_errors=1)
mkdirs(os.path.join(self.devices, "sda"))
os.mkdir(os.path.join(self.devices, "sdb"))
# policy 0
self.objects = os.path.join(self.devices, "sda", get_data_dir(POLICIES[0]))
self.objects_2 = os.path.join(self.devices, "sdb", get_data_dir(POLICIES[0]))
os.mkdir(self.objects)
# policy 1
self.objects_p1 = os.path.join(self.devices, "sda", get_data_dir(POLICIES[1]))
self.objects_2_p1 = os.path.join(self.devices, "sdb", get_data_dir(POLICIES[1]))
os.mkdir(self.objects_p1)
self.parts = self.parts_p1 = {}
for part in ["0", "1", "2", "3"]:
self.parts[part] = os.path.join(self.objects, part)
self.parts_p1[part] = os.path.join(self.objects_p1, part)
os.mkdir(os.path.join(self.objects, part))
os.mkdir(os.path.join(self.objects_p1, part))
self.conf = dict(devices=self.devices, mount_check="false", object_size_stats="10,100,1024,10240")
self.df_mgr = DiskFileManager(self.conf, self.logger)
# diskfiles for policy 0, 1
self.disk_file = self.df_mgr.get_diskfile("sda", "0", "a", "c", "o", policy=POLICIES[0])
self.disk_file_p1 = self.df_mgr.get_diskfile("sda", "0", "a", "c", "o", policy=POLICIES[1])
def tearDown(self):
rmtree(os.path.dirname(self.testdir), ignore_errors=1)
unit.xattr_data = {}
def test_worker_conf_parms(self):
def check_common_defaults():
self.assertEquals(auditor_worker.max_bytes_per_second, 10000000)
self.assertEquals(auditor_worker.log_time, 3600)
# test default values
conf = dict(devices=self.devices, mount_check="false", object_size_stats="10,100,1024,10240")
auditor_worker = auditor.AuditorWorker(conf, self.logger, self.rcache, self.devices)
check_common_defaults()
self.assertEquals(auditor_worker.diskfile_mgr.disk_chunk_size, 65536)
self.assertEquals(auditor_worker.max_files_per_second, 20)
self.assertEquals(auditor_worker.zero_byte_only_at_fps, 0)
# test specified audit value overrides
conf.update({"disk_chunk_size": 4096})
auditor_worker = auditor.AuditorWorker(conf, self.logger, self.rcache, self.devices, zero_byte_only_at_fps=50)
check_common_defaults()
self.assertEquals(auditor_worker.diskfile_mgr.disk_chunk_size, 4096)
self.assertEquals(auditor_worker.max_files_per_second, 50)
self.assertEquals(auditor_worker.zero_byte_only_at_fps, 50)
def test_object_audit_extra_data(self):
def run_tests(disk_file):
auditor_worker = auditor.AuditorWorker(self.conf, self.logger, self.rcache, self.devices)
data = "0" * 1024
etag = md5()
with disk_file.create() as writer:
writer.write(data)
etag.update(data)
etag = etag.hexdigest()
timestamp = str(normalize_timestamp(time.time()))
metadata = {"ETag": etag, "X-Timestamp": timestamp, "Content-Length": str(os.fstat(writer._fd).st_size)}
writer.put(metadata)
pre_quarantines = auditor_worker.quarantines
auditor_worker.object_audit(AuditLocation(disk_file._datadir, "sda", "0", policy=POLICIES.legacy))
self.assertEquals(auditor_worker.quarantines, pre_quarantines)
os.write(writer._fd, "extra_data")
auditor_worker.object_audit(AuditLocation(disk_file._datadir, "sda", "0", policy=POLICIES.legacy))
self.assertEquals(auditor_worker.quarantines, pre_quarantines + 1)
run_tests(self.disk_file)
run_tests(self.disk_file_p1)
def test_object_audit_diff_data(self):
auditor_worker = auditor.AuditorWorker(self.conf, self.logger, self.rcache, self.devices)
data = "0" * 1024
etag = md5()
timestamp = str(normalize_timestamp(time.time()))
with self.disk_file.create() as writer:
writer.write(data)
etag.update(data)
etag = etag.hexdigest()
metadata = {"ETag": etag, "X-Timestamp": timestamp, "Content-Length": str(os.fstat(writer._fd).st_size)}
writer.put(metadata)
pre_quarantines = auditor_worker.quarantines
# remake so it will have metadata
self.disk_file = self.df_mgr.get_diskfile("sda", "0", "a", "c", "o", policy=POLICIES.legacy)
auditor_worker.object_audit(AuditLocation(self.disk_file._datadir, "sda", "0", policy=POLICIES.legacy))
#.........这里部分代码省略.........