本文整理匯總了Python中snappy.StreamDecompressor方法的典型用法代碼示例。如果您正苦於以下問題:Python snappy.StreamDecompressor方法的具體用法?Python snappy.StreamDecompressor怎麽用?Python snappy.StreamDecompressor使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類snappy
的用法示例。
在下文中一共展示了snappy.StreamDecompressor方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _read_with_snappy
# 需要導入模塊: import snappy [as 別名]
# 或者: from snappy import StreamDecompressor [as 別名]
def _read_with_snappy(stream: INetStream, length: int) -> bytes:
decompressor = StreamDecompressor()
data = io.BytesIO()
chunk_size = min(length, MAX_CHUNK_SIZE)
chunk = await stream.read(chunk_size)
remaining = length
while chunk:
chunk = decompressor.decompress(chunk)
data.write(chunk)
remaining -= len(chunk)
if not remaining:
break
chunk = await stream.read(chunk_size)
decompressor.flush()
return data.getvalue()
示例2: __init__
# 需要導入模塊: import snappy [as 別名]
# 或者: from snappy import StreamDecompressor [as 別名]
def __init__(self, infile, mode, **kwargs):
import snappy
self.details = {"size": 999999999} # not true, but OK if we don't seek
super().__init__(fs=None, path="snappy", mode=mode.strip("b") + "b", **kwargs)
self.infile = infile
if "r" in mode:
self.codec = snappy.StreamDecompressor()
else:
self.codec = snappy.StreamCompressor()
示例3: __init__
# 需要導入模塊: import snappy [as 別名]
# 或者: from snappy import StreamDecompressor [as 別名]
def __init__(self, socket):
self._decompressor = snappy.StreamDecompressor()
self._compressor = snappy.StreamCompressor()
super(SnappySocket, self).__init__(socket)
示例4: get_object
# 需要導入模塊: import snappy [as 別名]
# 或者: from snappy import StreamDecompressor [as 別名]
def get_object(self, session: aiohttp.ClientSession, progress_update_fn: Callable) -> None:
"""Method to get the object from S3 after the pre-signed URL has been obtained
Args:
session: The current aiohttp session
progress_update_fn: A callable with arg "completed_bytes" (int) indicating how many bytes have been
downloaded in since last called
Returns:
None
"""
try:
decompressor = snappy.StreamDecompressor()
timeout = aiohttp.ClientTimeout(total=None, connect=2 * 60, sock_connect=None, sock_read=5*60)
async with session.get(self.presigned_s3_url, timeout=timeout) as response:
if response.status != 200:
# An error occurred
body = await response.text()
raise IOError(f"Failed to get {self.object_details.dataset_path} to storage backend."
f" Status: {response.status}. Response: {body}")
async with aiofiles.open(self.object_details.object_path, 'wb') as fd:
while True:
chunk = await response.content.read(self.download_chunk_size)
if not chunk:
fd.write(decompressor.flush())
break
decompressed_chunk = decompressor.decompress(chunk)
await fd.write(decompressed_chunk)
progress_update_fn(completed_bytes=len(decompressed_chunk))
except Exception as err:
logger.exception(err)
raise IOError(f"Failed to get {self.object_details.dataset_path} from storage backend. {err}")
示例5: _create_decompressor
# 需要導入模塊: import snappy [as 別名]
# 或者: from snappy import StreamDecompressor [as 別名]
def _create_decompressor(self, alg):
if alg == "snappy":
return snappy.StreamDecompressor()
elif alg == "lzma":
return lzma.LZMADecompressor()
elif alg == "zstd":
return zstd.ZstdDecompressor().decompressobj()
raise InvalidConfigurationError("invalid compression algorithm: {!r}".format(alg))
示例6: __init__
# 需要導入模塊: import snappy [as 別名]
# 或者: from snappy import StreamDecompressor [as 別名]
def __init__(self, next_fp, mode):
if snappy is None:
raise io.UnsupportedOperation("Snappy is not available")
if mode == "rb":
self.decr = snappy.StreamDecompressor()
self.encr = None
elif mode == "wb":
self.decr = None
self.encr = snappy.StreamCompressor()
else:
raise io.UnsupportedOperation("unsupported mode for SnappyFile")
super().__init__(next_fp)
self.decr_done = False
示例7: restore
# 需要導入模塊: import snappy [as 別名]
# 或者: from snappy import StreamDecompressor [as 別名]
def restore(script):
# Build a list of metrics to restore from our object store and globbing
metrics = search(script)
# For each metric, find the date we want
for i in metrics.keys():
objs = metrics[i]
d = findBackup(script, objs, script.options.date)
logger.info("Restoring %s from timestamp %s" % (i, d))
blobgz = script.store.get("%s%s/%s.wsp.%s" \
% (script.options.storage_path, i, d, script.options.algorithm))
blobSHA = script.store.get("%s%s/%s.sha1" \
% (script.options.storage_path, i, d))
if blobgz is None:
logger.warning("Skipping missing file in object store: %s/%s.wsp.%s" \
% (i, d, script.options.algorithm))
continue
# Decompress
blobgz = StringIO(blobgz)
blob = None
if script.options.algorithm == "gz":
fd = gzip.GzipFile(fileobj=blobgz, mode="rb")
blob = fd.read()
fd.close()
elif script.options.algorithm == "sz":
compressor = snappy.StreamDecompressor()
blob = compressor.decompress(blobgz.getvalue())
try:
compressor.flush()
except UncompressError as e:
logger.error("Corrupt file in store: %s%s/%s.wsp.sz Error %s" \
% (script.options.storage_path, i, d, str(e)))
continue
# Verify
if blobSHA is None:
logger.warning("Missing SHA1 checksum file...no verification")
else:
if hashlib.sha1(blob).hexdigest() != blobSHA:
logger.warning("Backup does NOT verify, skipping metric %s" \
% i)
continue
heal(script, i, blob)
# Clean up
del blob
blobgz.close()