當前位置: 首頁>>代碼示例>>Python>>正文


Python gzip.compress方法代碼示例

本文整理匯總了Python中gzip.compress方法的典型用法代碼示例。如果您正苦於以下問題:Python gzip.compress方法的具體用法?Python gzip.compress怎麽用?Python gzip.compress使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在gzip的用法示例。


在下文中一共展示了gzip.compress方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: loads

# 需要導入模塊: import gzip [as 別名]
# 或者: from gzip import compress [as 別名]
def loads(buf):
    mv = memoryview(buf)
    header = read_file_header(mv)
    compress = header.compress

    if compress == CompressType.NONE:
        data = buf[HEADER_LENGTH:]
    else:
        data = decompressors[compress](mv[HEADER_LENGTH:])

    if header.type == SerialType.ARROW:
        try:
            return pyarrow.deserialize(memoryview(data), mars_serialize_context())
        except pyarrow.lib.ArrowInvalid:  # pragma: no cover
            # reconstruct value from buffers of arrow components
            data_view = memoryview(data)
            meta_block_size = np.frombuffer(data_view[0:4], dtype='int32').item()
            meta = pickle.loads(data_view[4:4 + meta_block_size])  # nosec
            buffer_sizes = meta.pop('buffer_sizes')
            bounds = np.cumsum([4 + meta_block_size] + buffer_sizes)
            meta['data'] = [pyarrow.py_buffer(data_view[bounds[idx]:bounds[idx + 1]])
                            for idx in range(len(buffer_sizes))]
            return pyarrow.deserialize_components(meta, mars_serialize_context())
    else:
        return pickle.loads(data) 
開發者ID:mars-project,項目名稱:mars,代碼行數:27,代碼來源:dataserializer.py

示例2: dump

# 需要導入模塊: import gzip [as 別名]
# 或者: from gzip import compress [as 別名]
def dump(obj, file, *, serial_type=None, compress=None, pickle_protocol=None):
    if serial_type is None:
        serial_type = SerialType.ARROW if pyarrow is not None else SerialType.PICKLE
    if compress is None:
        compress = CompressType.NONE
    try:
        if serial_type == SerialType.ARROW:
            serialized = pyarrow.serialize(obj, mars_serialize_context())
            data_size = serialized.total_bytes
            write_file_header(file, file_header(serial_type, SERIAL_VERSION, data_size, compress))
            file = open_compression_file(file, compress)
            serialized.write_to(file)
        else:
            pickle_protocol = pickle_protocol or pickle.HIGHEST_PROTOCOL
            serialized = pickle.dumps(obj, protocol=pickle_protocol)
            data_size = len(serialized)
            write_file_header(file, file_header(serial_type, SERIAL_VERSION, data_size, compress))
            file = open_compression_file(file, compress)
            file.write(serialized)
    finally:
        if compress != CompressType.NONE:
            file.close()
    return 
開發者ID:mars-project,項目名稱:mars,代碼行數:25,代碼來源:dataserializer.py

示例3: upload_s3

# 需要導入模塊: import gzip [as 別名]
# 或者: from gzip import compress [as 別名]
def upload_s3(bucket, json_file, metadata):
    """
    파일을 gz하여 s3로 업로드
    :param json_file: 업로드할 json 파일명
    :return:
    """
    gz_name = f"{json_file}.gz"
    obj_key = f"json/{path.basename(gz_name)}"
    print("업로드", gz_name, obj_key)

    with open(json_file, 'rb') as f:
        gz = gzip.compress(f.read())
        s3.put_object(
            Body=gz,
            Bucket=bucket,
            ContentEncoding='gzip',
            ContentLanguage='string',
            ContentType='application/json',
            Key=obj_key,
            # todo : 메타데이터 추가 - 2018-07-28
            Metadata=metadata,
        ) 
開發者ID:awskrug,項目名稱:handson-labs-2018,代碼行數:24,代碼來源:shp2json.py

示例4: get_data_uri

# 需要導入模塊: import gzip [as 別名]
# 或者: from gzip import compress [as 別名]
def get_data_uri(data):

    """
    Return a data uri for the input, which can be either a string or byte array
    """

    if isinstance(data, str):
        data = compress(data.encode())
        mediatype = "data:application/gzip"
    else:
        if data[0] == 0x1f and data[1] == 0x8b:
            mediatype = "data:application/gzip"
        else:
            mediatype = "data:application:octet-stream"

    enc_str = b64encode(data)

    data_uri = mediatype + ";base64," + str(enc_str)[2:-1]
    return data_uri 
開發者ID:igvteam,項目名稱:igv-reports,代碼行數:21,代碼來源:datauri.py

示例5: s3fs_nifti_write

# 需要導入模塊: import gzip [as 別名]
# 或者: from gzip import compress [as 別名]
def s3fs_nifti_write(img, fname, fs=None):
    """
    Write a nifti file straight to S3

    Paramters
    ---------
    img : nib.Nifti1Image class instance
        The image containing data to be written into S3
    fname : string
        Full path (including bucket name and extension) to the S3 location
        where the file is to be saved.
    fs : an s3fs.S3FileSystem class instance, optional
        A file-system to refer to. Default to create a new file-system
    """
    if fs is None:
        fs = s3fs.S3FileSystem()

    bio = BytesIO()
    file_map = img.make_file_map({'image': bio, 'header': bio})
    img.to_file_map(file_map)
    data = gzip.compress(bio.getvalue())
    with fs.open(fname, 'wb') as ff:
        ff.write(data) 
開發者ID:yeatmanlab,項目名稱:pyAFQ,代碼行數:25,代碼來源:data.py

示例6: save_routes

# 需要導入模塊: import gzip [as 別名]
# 或者: from gzip import compress [as 別名]
def save_routes(agency_id, routes, save_to_s3=False):
    data_str = json.dumps({
        'version': DefaultVersion,
        'routes': [route.data for route in routes]
    }, separators=(',', ':'))

    cache_path = get_cache_path(agency_id)

    with open(cache_path, "w") as f:
        f.write(data_str)

    if save_to_s3:
        s3 = boto3.resource('s3')
        s3_path = get_s3_path(agency_id)
        s3_bucket = config.s3_bucket
        print(f'saving to s3://{s3_bucket}/{s3_path}')
        object = s3.Object(s3_bucket, s3_path)
        object.put(
            Body=gzip.compress(bytes(data_str, 'utf-8')),
            CacheControl='max-age=86400',
            ContentType='application/json',
            ContentEncoding='gzip',
            ACL='public-read'
        ) 
開發者ID:trynmaps,項目名稱:metrics-mvp,代碼行數:26,代碼來源:routeconfig.py

示例7: gen_kubeconfig

# 需要導入模塊: import gzip [as 別名]
# 或者: from gzip import compress [as 別名]
def gen_kubeconfig(self, component, server='localhost'):
        """Generate kubeconfig"""

        kubeconfig = loads(files['kubeconfig'].decode(), object_pairs_hook=OrderedDict)
        kubeconfig['users'][0]['user']['client-certificate'] = 'tls/client/{}.crt'.format(component)
        kubeconfig['clusters'][0]['cluster']['server'] = 'https://' + server + ':6443'

        kubeconfig = compress((dumps(kubeconfig, indent=2) + '\n').encode())

        self.add_files([
            {
                'filesystem': 'root',
                'path': '/etc/kubernetes/kubeconfig-' + component + '.gz',
                'mode': 416, # 0640
                'contents': {
                    'source': 'data:,' + quote(kubeconfig)
                }
            }
        ]) 
開發者ID:antoineco,項目名稱:kOVHernetes,代碼行數:21,代碼來源:userdata.py

示例8: gen_kubemanifest

# 需要導入模塊: import gzip [as 別名]
# 或者: from gzip import compress [as 別名]
def gen_kubemanifest(self, component, tag):
        """Generate Kubernetes Pod manifest"""

        manifest = loads(files[component].decode(), object_pairs_hook=OrderedDict)
        manifest['spec']['containers'][0]['image'] = 'k8s.gcr.io/hyperkube:v{}'.format(self.k8s_ver)

        manifest = compress((dumps(manifest, indent=2) + '\n').encode())

        self.add_files([
            {
                'filesystem': 'root',
                'path': '/etc/kubernetes/manifests/kube-{}.json'.format(component) + '.gz',
                'mode': 416, # 0640
                'contents': {
                    'source': 'data:,' + quote(manifest)
                }
            }
        ]) 
開發者ID:antoineco,項目名稱:kOVHernetes,代碼行數:20,代碼來源:userdata.py

示例9: write_sequence_file

# 需要導入模塊: import gzip [as 別名]
# 或者: from gzip import compress [as 別名]
def write_sequence_file(path, structure, compressed=True):
    '''Encodes and writes MMTF encoded structure data to a Hadoop Sequnce File

    Parameters
    ----------
    path : str
       Path to Hadoop file directory)
    structure : tuple
       structure data to be written
    compress : bool
       if true, apply gzip compression
    '''
    # Can't apply first() function on list

    structure.map(lambda t: (t[0], _to_byte_array(t[1], compressed)))\
             .saveAsHadoopFile(path,
                               "org.apache.hadoop.mapred.SequenceFileOutputFormat",
                               "org.apache.hadoop.io.Text",
                               "org.apache.hadoop.io.BytesWritable") 
開發者ID:sbl-sdsc,項目名稱:mmtf-pyspark,代碼行數:21,代碼來源:mmtfWriter.py

示例10: _to_byte_array

# 需要導入模塊: import gzip [as 別名]
# 或者: from gzip import compress [as 別名]
def _to_byte_array(structure, compressed):
    '''Returns an MMTF-encoded byte array with optional gzip compression

    Returns
    -------
    list
       MMTF encoded and optionally gzipped structure data
    '''

    byte_array = bytearray(msgpack.packb(structure.input_data, use_bin_type=True))
    #byte_array = bytearray(msgpack.packb(MMTFEncoder.encode_data(structure), use_bin_type = True))

    if compressed:
        return gzip.compress(byte_array)
    else:
        return byte_array 
開發者ID:sbl-sdsc,項目名稱:mmtf-pyspark,代碼行數:18,代碼來源:mmtfWriter.py

示例11: write_data

# 需要導入模塊: import gzip [as 別名]
# 或者: from gzip import compress [as 別名]
def write_data(uuid, data):
    """Try to store the data, log errors."""

    try:
        CACHE.set(
            "{}{}".format(Keys.complete.value, uuid),
            codecs.decode(
                base64.b64encode(compress(codecs.encode(
                    ujson.dumps(data),
                    "utf-8",
                ))),
                "utf-8",
            ),
            timeout=EXPIRY,
        )
    except Exception as error:
        LOG.warning("Failed to save data: %r", error) 
開發者ID:a-tal,項目名稱:esi-knife,代碼行數:19,代碼來源:utils.py

示例12: write_results

# 需要導入模塊: import gzip [as 別名]
# 或者: from gzip import compress [as 別名]
def write_results(results, character_id):
    """Write the results to a compressed .knife file."""

    fname = "{}.knife".format(character_id)
    i = 0
    while os.path.isfile(fname):
        i += 1
        fname = "{}-{}.knife".format(character_id, i)

    with open(fname, "w") as openout:
        openout.write(codecs.decode(
            base64.b64encode(compress(codecs.encode(
                json.dumps(results),
                "utf-8",
            ))),
            "utf-8",
        ))

    print("created {}".format(fname)) 
開發者ID:a-tal,項目名稱:esi-knife,代碼行數:21,代碼來源:cli.py

示例13: test_fetch_deflate_encoded_csv

# 需要導入模塊: import gzip [as 別名]
# 或者: from gzip import compress [as 別名]
def test_fetch_deflate_encoded_csv(self):
        body = b"A,B\nx,y\nz,a"
        zo = zlib.compressobj(wbits=-zlib.MAX_WBITS)
        zbody = zo.compress(body) + zo.flush()
        url = self.build_url("/path/to.csv.gz")
        self.mock_http_response = MockHttpResponse.ok(
            zbody,
            [
                ("Content-Type", "text/csv; charset=utf-8"),
                ("Content-Encoding", "deflate"),
            ],
        )
        with call_fetch(url) as result:
            self.assertEqual(result.errors, [])
            with httpfile.read(result.path) as (_, __, headers, body_path):
                self.assertEqual(body_path.read_bytes(), body) 
開發者ID:CJWorkbench,項目名稱:cjworkbench,代碼行數:18,代碼來源:test_loadurl.py

示例14: encode_file

# 需要導入模塊: import gzip [as 別名]
# 或者: from gzip import compress [as 別名]
def encode_file(path: Path) -> str:
    compressed = gzip.compress(path.read_bytes(), compresslevel=9)
    return base64.b64encode(compressed).decode('utf-8') 
開發者ID:lRomul,項目名稱:argus-freesound,代碼行數:5,代碼來源:build_kernel.py

示例15: flush_index

# 需要導入模塊: import gzip [as 別名]
# 或者: from gzip import compress [as 別名]
def flush_index(self, compressed=True):
        data = json.dumps(self.index).encode("utf-8")
        if compressed:
            logger.debug("Using gzip encoding for writing index")
            data = gzip.compress(data)
        else:
            logger.debug("Using plain text encoding for writing index")

        self.boto.put_object(Bucket=self.bucket, Key=self.index_path(), Body=data) 
開發者ID:MichaelAquilina,項目名稱:S4,代碼行數:11,代碼來源:s3.py


注:本文中的gzip.compress方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。