本文整理汇总了Python中tarfile.BLOCKSIZE属性的典型用法代码示例。如果您正苦于以下问题:Python tarfile.BLOCKSIZE属性的具体用法?Python tarfile.BLOCKSIZE怎么用?Python tarfile.BLOCKSIZE使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类tarfile
的用法示例。
在下文中一共展示了tarfile.BLOCKSIZE属性的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: format_sparse_map
# 需要导入模块: import tarfile [as 别名]
# 或者: from tarfile import BLOCKSIZE [as 别名]
def format_sparse_map(self):
sparsemap_txt = (str(len(self.sparsemap)) + '\n' +
''.join('{}\n{}\n'.format(*entry) for entry in self.sparsemap))
sparsemap_txt_len = len(sparsemap_txt)
if sparsemap_txt_len % tarfile.BLOCKSIZE:
padding = '\0' * (tarfile.BLOCKSIZE -
sparsemap_txt_len % tarfile.BLOCKSIZE)
else:
padding = ''
return (sparsemap_txt + padding).encode()
示例2: get_sparse_map
# 需要导入模块: import tarfile [as 别名]
# 或者: from tarfile import BLOCKSIZE [as 别名]
def get_sparse_map(input_file):
'''
Return map of the file where actual data is present, ignoring zero-ed
blocks. Last entry of the map spans to the end of file, even if that part is
zero-size (when file ends with zeros).
This function is performance critical.
:param input_file: io.File object
:return: iterable of (offset, size)
'''
zero_block = bytearray(tarfile.BLOCKSIZE)
buf = bytearray(BUF_SIZE)
in_data_block = False
data_block_start = 0
buf_start_offset = 0
while True:
buf_len = input_file.readinto(buf)
if not buf_len:
break
for offset in range(0, buf_len, tarfile.BLOCKSIZE):
if buf[offset:offset+tarfile.BLOCKSIZE] == zero_block:
if in_data_block:
in_data_block = False
yield (data_block_start,
buf_start_offset+offset-data_block_start)
else:
if not in_data_block:
in_data_block = True
data_block_start = buf_start_offset+offset
buf_start_offset += buf_len
if in_data_block:
yield (data_block_start, buf_start_offset-data_block_start)
else:
# always emit last slice to the input end - otherwise extracted file
# will be truncated
yield (buf_start_offset, 0)
示例3: test_eof_marker
# 需要导入模块: import tarfile [as 别名]
# 或者: from tarfile import BLOCKSIZE [as 别名]
def test_eof_marker(self):
# Make sure an end of archive marker is written (two zero blocks).
# tarfile insists on aligning archives to a 20 * 512 byte recordsize.
# So, we create an archive that has exactly 10240 bytes without the
# marker, and has 20480 bytes once the marker is written.
with tarfile.open(tmpname, self.mode) as tar:
t = tarfile.TarInfo("foo")
t.size = tarfile.RECORDSIZE - tarfile.BLOCKSIZE
tar.addfile(t, io.BytesIO(b"a" * t.size))
with self.open(tmpname, "rb") as fobj:
self.assertEqual(len(fobj.read()), tarfile.RECORDSIZE * 2)
示例4: addfile
# 需要导入模块: import tarfile [as 别名]
# 或者: from tarfile import BLOCKSIZE [as 别名]
def addfile(self, tarinfo, fileobj=None):
"""
Add the provided fileobj to the tar using md5copyfileobj
and saves the file md5 in the provided ChecksumTarInfo object.
This method completely replaces TarFile.addfile()
"""
self._check("aw")
tarinfo = copy.copy(tarinfo)
buf = tarinfo.tobuf(self.format, self.encoding, self.errors)
self.fileobj.write(buf)
self.offset += len(buf)
# If there's data to follow, append it.
if fileobj is not None:
tarinfo.data_checksum = md5copyfileobj(
fileobj, self.fileobj, tarinfo.size)
blocks, remainder = divmod(tarinfo.size, tarfile.BLOCKSIZE)
if remainder > 0:
self.fileobj.write(
tarfile.NUL * (tarfile.BLOCKSIZE - remainder))
blocks += 1
self.offset += blocks * tarfile.BLOCKSIZE
self.members.append(tarinfo)
示例5: addfile
# 需要导入模块: import tarfile [as 别名]
# 或者: from tarfile import BLOCKSIZE [as 别名]
def addfile(self, tarinfo, fileobj=None):
"""
Add the provided fileobj to the tar ignoring truncated or vanished
files.
This method completely replaces TarFile.addfile()
"""
self._check("awx")
tarinfo = copy.copy(tarinfo)
buf = tarinfo.tobuf(self.format, self.encoding, self.errors)
self.fileobj.write(buf)
self.offset += len(buf)
# If there's data to follow, append it.
if fileobj is not None:
copyfileobj_pad_truncate(fileobj, self.fileobj, tarinfo.size)
blocks, remainder = divmod(tarinfo.size, tarfile.BLOCKSIZE)
if remainder > 0:
self.fileobj.write(
tarfile.NUL * (tarfile.BLOCKSIZE - remainder))
blocks += 1
self.offset += blocks * tarfile.BLOCKSIZE
self.members.append(tarinfo)
示例6: tarfile_write_padding
# 需要导入模块: import tarfile [as 别名]
# 或者: from tarfile import BLOCKSIZE [as 别名]
def tarfile_write_padding(tarfd, sz):
blocks, remainder = divmod(sz, tarfile.BLOCKSIZE)
if remainder > 0:
tarfd.fileobj.write("\0" * (tarfile.BLOCKSIZE - remainder))
blocks += 1
tarfd.offset += blocks * tarfile.BLOCKSIZE
assert tarfd.offset == tarfd.fileobj.tell()
# Exception interrupts stream processing, tail of the stream can't be recovered.
# Single blob may be malformed, but it's out of blob parser control.
示例7: process_tarfd
# 需要导入模块: import tarfile [as 别名]
# 或者: from tarfile import BLOCKSIZE [as 别名]
def process_tarfd(l, tarfd):
for tin, blob in l:
tin.size = len(blob)
tarfd.addfile(tin)
# copy-paste from tarfile.addfile
tarfd.fileobj.write(blob)
blocks, remainder = divmod(len(blob), tarfile.BLOCKSIZE)
if remainder > 0:
tarfd.fileobj.write('\0' * (tarfile.BLOCKSIZE - remainder))
blocks += 1
tarfd.offset += blocks * tarfile.BLOCKSIZE