當前位置: 首頁>>代碼示例>>Python>>正文


Python typing.BinaryIO類代碼示例

本文整理匯總了Python中typing.BinaryIO的典型用法代碼示例。如果您正苦於以下問題:Python BinaryIO類的具體用法?Python BinaryIO怎麽用?Python BinaryIO使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。


在下文中一共展示了BinaryIO類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: load

def load(file_handle: typing.BinaryIO) -> TSerializable:
    """load(file) -> object

    This function reads a tnetstring from a file and parses it into a
    python object.  The file must support the read() method, and this
    function promises not to read more data than necessary.
    """
    #  Read the length prefix one char at a time.
    #  Note that the netstring spec explicitly forbids padding zeros.
    c = file_handle.read(1)
    if c == b"":  # we want to detect this special case.
        raise ValueError("not a tnetstring: empty file")
    data_length = b""
    while c.isdigit():
        data_length += c
        if len(data_length) > 9:
            raise ValueError("not a tnetstring: absurdly large length prefix")
        c = file_handle.read(1)
    if c != b":":
        raise ValueError("not a tnetstring: missing or invalid length prefix")

    data = file_handle.read(int(data_length))
    data_type = file_handle.read(1)[0]

    return parse(data_type, data)
開發者ID:StevenVanAcker,項目名稱:mitmproxy,代碼行數:25,代碼來源:tnetstring.py

示例2: put

    def put(
        self, namespace: str, metadata: Dict[str, Any], bytes_io: BinaryIO,
    ) -> None:
        """Store a file."""
        subset = dict_subset(metadata, lambda k, v: k in (
            # We are not storing the 'file_name'
            'image_width', 'image_height', 'original_id', 'version'))
        self._convert_values_to_str(subset)
        if hasattr(bytes_io, 'seekable') and bytes_io.seekable():
            bytes_io.seek(0)

        # When botocore.response.StreamingBody is passed in as bytes_io,
        # the bucket.put_object() call below fails with
        # "AttributeError: 'StreamingBody' object has no attribute 'tell'"
        # so we have to read the stream, getting the bytes:
        if not hasattr(bytes_io, 'tell'):
            bytes_io = bytes_io.read()  # type: ignore

        result = self.bucket.put_object(
            Key=self._get_path(namespace, metadata),
            # done automatically by botocore:  ContentMD5=encoded_md5,
            ContentType=metadata['mime_type'],
            ContentLength=metadata['length'], Body=bytes_io, Metadata=subset)
        # print(result)
        return result
開發者ID:nandoflorestan,項目名稱:keepluggable,代碼行數:25,代碼來源:amazon_s3.py

示例3: __init__

 def __init__(self, archive: BinaryIO, offset: int, length: int, prefix: bytes):
     archive.seek(offset)
     self.name = archive.name
     self.remaining = length
     self.sources = [cast(io.BufferedIOBase, archive)]
     if prefix:
         self.sources.insert(0, cast(io.BufferedIOBase, io.BytesIO(prefix)))
開發者ID:Lattyware,項目名稱:unrpa,代碼行數:7,代碼來源:view.py

示例4: get_index

    def get_index(
        self, archive: BinaryIO, version: Optional[Version] = None
    ) -> Dict[str, ComplexIndexEntry]:
        if not version:
            version = self.version() if self.version else self.detect_version()

        offset = 0
        key: Optional[int] = None
        if self.offset_and_key:
            offset, key = self.offset_and_key
        else:
            offset, key = version.find_offset_and_key(archive)
        archive.seek(offset)
        index: Dict[bytes, IndexEntry] = pickle.loads(
            zlib.decompress(archive.read()), encoding="bytes"
        )
        if key is not None:
            normal_index = UnRPA.deobfuscate_index(key, index)
        else:
            normal_index = UnRPA.normalise_index(index)

        return {
            UnRPA.ensure_str_path(path).replace("/", os.sep): data
            for path, data in normal_index.items()
        }
開發者ID:Lattyware,項目名稱:unrpa,代碼行數:25,代碼來源:__init__.py

示例5: save_to

 async def save_to(self, name: str, fd: BinaryIO):
     async with ClientSession() as client:
         async with client.post(self.get_url, data=name.encode("utf8")) as resp:
             assert resp.status == 200
             while True:
                 data = await resp.content.read(2 << 20)
                 if not data:
                     break
                 fd.write(data)
開發者ID:Mirantis,項目名稱:ceph-monitoring,代碼行數:9,代碼來源:web_storage.py

示例6: read_offset_array

def read_offset_array(file: BinaryIO, count: int):
    """Read an array of offsets to null-terminated strings from the file."""
    cdmat_offsets = str_read(str(count) + 'i', file)
    arr = [None] * count  # type: List[str]

    for ind, off in enumerate(cdmat_offsets):
        file.seek(off)
        arr[ind] = read_nullstr(file)
    return arr
開發者ID:TeamSpen210,項目名稱:srctools,代碼行數:9,代碼來源:mdl.py

示例7: download_into

def download_into(session: requests.Session,
                  url: str, file: BinaryIO, process_func=None) -> None:
  r = session.get(url, stream=True)
  length = int(r.headers.get('Content-Length') or 0)
  received = 0
  for chunk in r.iter_content(CHUNK_SIZE):
    received += len(chunk)
    file.write(chunk)
    if process_func:
      process_func(received, length)
  if not length and process_func:
    process_func(received, received)
開發者ID:archlinuxcn,項目名稱:lilac,代碼行數:12,代碼來源:requestsutils.py

示例8: postprocess

 def postprocess(self, source: ArchiveView, sink: BinaryIO) -> None:
     if self.details:
         key, amount = self.details
         parts = []
         while amount > 0:
             part = source.read(amount)
             amount -= len(part)
             parts.append(part)
         sink.write(obfuscation_run(b"".join(parts), key))
     else:
         raise Exception("find_offset_and_key must be called before postprocess")
     for segment in iter(source.read1, b""):
         sink.write(segment)
開發者ID:Lattyware,項目名稱:unrpa,代碼行數:13,代碼來源:zix.py

示例9: read_nullstr

def read_nullstr(file: BinaryIO, pos: int=None):
    """Read a null-terminated string from the file."""
    if pos is not None:
        if pos == 0:
            return ''
        file.seek(pos)

    text = []
    while True:
        char = file.read(1)
        if char == b'\0':
            return b''.join(text).decode('ascii')
        if not char:
            raise ValueError('Fell off end of file!')
        text.append(char)
開發者ID:TeamSpen210,項目名稱:srctools,代碼行數:15,代碼來源:mdl.py

示例10: add_member_stream

 def add_member_stream( self,
     path: PurePosixPath, mtime: ArchiveMTime,
     content_stream: BinaryIO,
 ) -> None:
     content = content_stream.read()
     assert isinstance(content, bytes), type(content)
     return self.add_member_bytes(path, mtime, content)
開發者ID:tifv,項目名稱:jeolm,代碼行數:7,代碼來源:archive.py

示例11: read_delimited_chunks

def read_delimited_chunks(infile: BinaryIO, chunk_size: int) -> Generator[bytes, None, None]:
    """Yield the contents of infile in chunk_size pieces ending at newlines.
    The individual pieces, except for the last one, end in newlines and
    are smaller than chunk_size if possible.

    Params:
        infile: stream to read from
        chunk_size: maximum size of each chunk

    Yields:
        chunk: chunk with maximum size of chunk_size if possible
    """
    leftover = b""

    while True:
        new_chunk = infile.read(chunk_size)
        chunks = split_chunks(leftover + new_chunk, chunk_size)
        leftover = b""
        # the last item in chunks has to be combined with the next chunk
        # read from the file because it may not actually stop at a
        # newline and to avoid very small chunks.
        if chunks:
            leftover = chunks[-1]
            chunks = chunks[:-1]
        for chunk in chunks:
            yield chunk

        if not new_chunk:
            if leftover:
                yield leftover
            break
開發者ID:razuz,項目名稱:intelmq,代碼行數:31,代碼來源:splitreports.py

示例12: generate_reports

def generate_reports(report_template: Report, infile: BinaryIO, chunk_size: Optional[int],
                     copy_header_line: bool) -> Generator[Report, None, None]:
    """Generate reports from a template and input file, optionally split into chunks.

    If chunk_size is None, a single report is generated with the entire
    contents of infile as the raw data. Otherwise chunk_size should be
    an integer giving the maximum number of bytes in a chunk. The data
    read from infile is then split into chunks of this size at newline
    characters (see read_delimited_chunks). For each of the chunks, this
    function yields a copy of the report_template with that chunk as the
    value of the raw attribute.

    When splitting the data into chunks, if copy_header_line is true,
    the first line the file is read before chunking and then prepended
    to each of the chunks. This is particularly useful when splitting
    CSV files.

    The infile should be a file-like object. generate_reports uses only
    two methods, readline and read, with readline only called once and
    only if copy_header_line is true. Both methods should return bytes
    objects.

    Params:
        report_template: report used as template for all yielded copies
        infile: stream to read from
        chunk_size: maximum size of each chunk
        copy_header_line: copy the first line of the infile to each chunk

    Yields:
        report: a Report object holding the chunk in the raw field
    """
    if chunk_size is None:
        report = report_template.copy()
        data = infile.read()
        if data:
            report.add("raw", data, overwrite=True)
            yield report
    else:
        header = b""
        if copy_header_line:
            header = infile.readline()
        for chunk in read_delimited_chunks(infile, chunk_size):
            report = report_template.copy()
            report.add("raw", header + chunk, overwrite=True)
            yield report
開發者ID:certtools,項目名稱:intelmq,代碼行數:45,代碼來源:splitreports.py

示例13: put

 def put(
     self, namespace: str, metadata: Dict[str, Any], bytes_io: BinaryIO,
 ) -> None:
     """Store a file (``bytes_io``) inside ``namespace``."""
     if bytes_io.tell():
         bytes_io.seek(0)
     outdir = self._dir_of(namespace)
     if not outdir.exists():
         outdir.mkdir(parents=True)  # Create namespace directory as needed
     outfile = outdir / self._get_filename(metadata)
     with open(str(outfile), mode='wb', buffering=MEGABYTE) as writer:
         while True:
             chunk = bytes_io.read(MEGABYTE)
             if chunk:
                 writer.write(chunk)
             else:
                 break
     assert outfile.lstat().st_size == metadata['length']
開發者ID:nandoflorestan,項目名稱:keepluggable,代碼行數:18,代碼來源:local.py

示例14: _compute_md5

 def _compute_md5(
     self, bytes_io: BinaryIO, metadata: Dict[str, Any],
 ) -> None:
     from hashlib import md5
     two_megabytes = 1048576 * 2
     the_hash = md5()
     the_length = 0
     bytes_io.seek(0)
     while True:
         segment = bytes_io.read(two_megabytes)
         if segment == b'':
             break
         the_length += len(segment)
         the_hash.update(segment)
     metadata['md5'] = the_hash.hexdigest()
     previous_length = metadata.get('length')
     if previous_length is None:
         metadata['length'] = the_length
     else:
         assert previous_length == the_length, "Bug? File lengths {}, {} " \
             "don't match.".format(previous_length, the_length)
     bytes_io.seek(0)  # ...so it can be read again
開發者ID:nandoflorestan,項目名稱:keepluggable,代碼行數:22,代碼來源:actions.py

示例15: mktar_from_dockerfile

def mktar_from_dockerfile(fileobject: BinaryIO) -> IO:
    """
    Create a zipped tar archive from a Dockerfile
    **Remember to close the file object**
    Args:
        fileobj: a Dockerfile
    Returns:
        a NamedTemporaryFile() object
    """

    f = tempfile.NamedTemporaryFile()
    t = tarfile.open(mode="w:gz", fileobj=f)

    if isinstance(fileobject, BytesIO):
        dfinfo = tarfile.TarInfo("Dockerfile")
        dfinfo.size = len(fileobject.getvalue())
        fileobject.seek(0)
    else:
        dfinfo = t.gettarinfo(fileobj=fileobject, arcname="Dockerfile")

    t.addfile(dfinfo, fileobject)
    t.close()
    f.seek(0)
    return f
開發者ID:paultag,項目名稱:aiodocker,代碼行數:24,代碼來源:utils.py


注:本文中的typing.BinaryIO類示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。