当前位置: 首页>>代码示例>>Python>>正文


Python gzip.compress方法代码示例

本文整理汇总了Python中gzip.compress方法的典型用法代码示例。如果您正苦于以下问题:Python gzip.compress方法的具体用法?Python gzip.compress怎么用?Python gzip.compress使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在gzip的用法示例。


在下文中一共展示了gzip.compress方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: loads

# 需要导入模块: import gzip [as 别名]
# 或者: from gzip import compress [as 别名]
def loads(buf):
    mv = memoryview(buf)
    header = read_file_header(mv)
    compress = header.compress

    if compress == CompressType.NONE:
        data = buf[HEADER_LENGTH:]
    else:
        data = decompressors[compress](mv[HEADER_LENGTH:])

    if header.type == SerialType.ARROW:
        try:
            return pyarrow.deserialize(memoryview(data), mars_serialize_context())
        except pyarrow.lib.ArrowInvalid:  # pragma: no cover
            # reconstruct value from buffers of arrow components
            data_view = memoryview(data)
            meta_block_size = np.frombuffer(data_view[0:4], dtype='int32').item()
            meta = pickle.loads(data_view[4:4 + meta_block_size])  # nosec
            buffer_sizes = meta.pop('buffer_sizes')
            bounds = np.cumsum([4 + meta_block_size] + buffer_sizes)
            meta['data'] = [pyarrow.py_buffer(data_view[bounds[idx]:bounds[idx + 1]])
                            for idx in range(len(buffer_sizes))]
            return pyarrow.deserialize_components(meta, mars_serialize_context())
    else:
        return pickle.loads(data) 
开发者ID:mars-project,项目名称:mars,代码行数:27,代码来源:dataserializer.py

示例2: dump

# 需要导入模块: import gzip [as 别名]
# 或者: from gzip import compress [as 别名]
def dump(obj, file, *, serial_type=None, compress=None, pickle_protocol=None):
    if serial_type is None:
        serial_type = SerialType.ARROW if pyarrow is not None else SerialType.PICKLE
    if compress is None:
        compress = CompressType.NONE
    try:
        if serial_type == SerialType.ARROW:
            serialized = pyarrow.serialize(obj, mars_serialize_context())
            data_size = serialized.total_bytes
            write_file_header(file, file_header(serial_type, SERIAL_VERSION, data_size, compress))
            file = open_compression_file(file, compress)
            serialized.write_to(file)
        else:
            pickle_protocol = pickle_protocol or pickle.HIGHEST_PROTOCOL
            serialized = pickle.dumps(obj, protocol=pickle_protocol)
            data_size = len(serialized)
            write_file_header(file, file_header(serial_type, SERIAL_VERSION, data_size, compress))
            file = open_compression_file(file, compress)
            file.write(serialized)
    finally:
        if compress != CompressType.NONE:
            file.close()
    return 
开发者ID:mars-project,项目名称:mars,代码行数:25,代码来源:dataserializer.py

示例3: upload_s3

# 需要导入模块: import gzip [as 别名]
# 或者: from gzip import compress [as 别名]
def upload_s3(bucket, json_file, metadata):
    """
    파일을 gz하여 s3로 업로드
    :param json_file: 업로드할 json 파일명
    :return:
    """
    gz_name = f"{json_file}.gz"
    obj_key = f"json/{path.basename(gz_name)}"
    print("업로드", gz_name, obj_key)

    with open(json_file, 'rb') as f:
        gz = gzip.compress(f.read())
        s3.put_object(
            Body=gz,
            Bucket=bucket,
            ContentEncoding='gzip',
            ContentLanguage='string',
            ContentType='application/json',
            Key=obj_key,
            # todo : 메타데이터 추가 - 2018-07-28
            Metadata=metadata,
        ) 
开发者ID:awskrug,项目名称:handson-labs-2018,代码行数:24,代码来源:shp2json.py

示例4: get_data_uri

# 需要导入模块: import gzip [as 别名]
# 或者: from gzip import compress [as 别名]
def get_data_uri(data):

    """
    Return a data uri for the input, which can be either a string or byte array
    """

    if isinstance(data, str):
        data = compress(data.encode())
        mediatype = "data:application/gzip"
    else:
        if data[0] == 0x1f and data[1] == 0x8b:
            mediatype = "data:application/gzip"
        else:
            mediatype = "data:application:octet-stream"

    enc_str = b64encode(data)

    data_uri = mediatype + ";base64," + str(enc_str)[2:-1]
    return data_uri 
开发者ID:igvteam,项目名称:igv-reports,代码行数:21,代码来源:datauri.py

示例5: s3fs_nifti_write

# 需要导入模块: import gzip [as 别名]
# 或者: from gzip import compress [as 别名]
def s3fs_nifti_write(img, fname, fs=None):
    """
    Write a nifti file straight to S3

    Paramters
    ---------
    img : nib.Nifti1Image class instance
        The image containing data to be written into S3
    fname : string
        Full path (including bucket name and extension) to the S3 location
        where the file is to be saved.
    fs : an s3fs.S3FileSystem class instance, optional
        A file-system to refer to. Default to create a new file-system
    """
    if fs is None:
        fs = s3fs.S3FileSystem()

    bio = BytesIO()
    file_map = img.make_file_map({'image': bio, 'header': bio})
    img.to_file_map(file_map)
    data = gzip.compress(bio.getvalue())
    with fs.open(fname, 'wb') as ff:
        ff.write(data) 
开发者ID:yeatmanlab,项目名称:pyAFQ,代码行数:25,代码来源:data.py

示例6: save_routes

# 需要导入模块: import gzip [as 别名]
# 或者: from gzip import compress [as 别名]
def save_routes(agency_id, routes, save_to_s3=False):
    data_str = json.dumps({
        'version': DefaultVersion,
        'routes': [route.data for route in routes]
    }, separators=(',', ':'))

    cache_path = get_cache_path(agency_id)

    with open(cache_path, "w") as f:
        f.write(data_str)

    if save_to_s3:
        s3 = boto3.resource('s3')
        s3_path = get_s3_path(agency_id)
        s3_bucket = config.s3_bucket
        print(f'saving to s3://{s3_bucket}/{s3_path}')
        object = s3.Object(s3_bucket, s3_path)
        object.put(
            Body=gzip.compress(bytes(data_str, 'utf-8')),
            CacheControl='max-age=86400',
            ContentType='application/json',
            ContentEncoding='gzip',
            ACL='public-read'
        ) 
开发者ID:trynmaps,项目名称:metrics-mvp,代码行数:26,代码来源:routeconfig.py

示例7: gen_kubeconfig

# 需要导入模块: import gzip [as 别名]
# 或者: from gzip import compress [as 别名]
def gen_kubeconfig(self, component, server='localhost'):
        """Generate kubeconfig"""

        kubeconfig = loads(files['kubeconfig'].decode(), object_pairs_hook=OrderedDict)
        kubeconfig['users'][0]['user']['client-certificate'] = 'tls/client/{}.crt'.format(component)
        kubeconfig['clusters'][0]['cluster']['server'] = 'https://' + server + ':6443'

        kubeconfig = compress((dumps(kubeconfig, indent=2) + '\n').encode())

        self.add_files([
            {
                'filesystem': 'root',
                'path': '/etc/kubernetes/kubeconfig-' + component + '.gz',
                'mode': 416, # 0640
                'contents': {
                    'source': 'data:,' + quote(kubeconfig)
                }
            }
        ]) 
开发者ID:antoineco,项目名称:kOVHernetes,代码行数:21,代码来源:userdata.py

示例8: gen_kubemanifest

# 需要导入模块: import gzip [as 别名]
# 或者: from gzip import compress [as 别名]
def gen_kubemanifest(self, component, tag):
        """Generate Kubernetes Pod manifest"""

        manifest = loads(files[component].decode(), object_pairs_hook=OrderedDict)
        manifest['spec']['containers'][0]['image'] = 'k8s.gcr.io/hyperkube:v{}'.format(self.k8s_ver)

        manifest = compress((dumps(manifest, indent=2) + '\n').encode())

        self.add_files([
            {
                'filesystem': 'root',
                'path': '/etc/kubernetes/manifests/kube-{}.json'.format(component) + '.gz',
                'mode': 416, # 0640
                'contents': {
                    'source': 'data:,' + quote(manifest)
                }
            }
        ]) 
开发者ID:antoineco,项目名称:kOVHernetes,代码行数:20,代码来源:userdata.py

示例9: write_sequence_file

# 需要导入模块: import gzip [as 别名]
# 或者: from gzip import compress [as 别名]
def write_sequence_file(path, structure, compressed=True):
    '''Encodes and writes MMTF encoded structure data to a Hadoop Sequnce File

    Parameters
    ----------
    path : str
       Path to Hadoop file directory)
    structure : tuple
       structure data to be written
    compress : bool
       if true, apply gzip compression
    '''
    # Can't apply first() function on list

    structure.map(lambda t: (t[0], _to_byte_array(t[1], compressed)))\
             .saveAsHadoopFile(path,
                               "org.apache.hadoop.mapred.SequenceFileOutputFormat",
                               "org.apache.hadoop.io.Text",
                               "org.apache.hadoop.io.BytesWritable") 
开发者ID:sbl-sdsc,项目名称:mmtf-pyspark,代码行数:21,代码来源:mmtfWriter.py

示例10: _to_byte_array

# 需要导入模块: import gzip [as 别名]
# 或者: from gzip import compress [as 别名]
def _to_byte_array(structure, compressed):
    '''Returns an MMTF-encoded byte array with optional gzip compression

    Returns
    -------
    list
       MMTF encoded and optionally gzipped structure data
    '''

    byte_array = bytearray(msgpack.packb(structure.input_data, use_bin_type=True))
    #byte_array = bytearray(msgpack.packb(MMTFEncoder.encode_data(structure), use_bin_type = True))

    if compressed:
        return gzip.compress(byte_array)
    else:
        return byte_array 
开发者ID:sbl-sdsc,项目名称:mmtf-pyspark,代码行数:18,代码来源:mmtfWriter.py

示例11: write_data

# 需要导入模块: import gzip [as 别名]
# 或者: from gzip import compress [as 别名]
def write_data(uuid, data):
    """Try to store the data, log errors."""

    try:
        CACHE.set(
            "{}{}".format(Keys.complete.value, uuid),
            codecs.decode(
                base64.b64encode(compress(codecs.encode(
                    ujson.dumps(data),
                    "utf-8",
                ))),
                "utf-8",
            ),
            timeout=EXPIRY,
        )
    except Exception as error:
        LOG.warning("Failed to save data: %r", error) 
开发者ID:a-tal,项目名称:esi-knife,代码行数:19,代码来源:utils.py

示例12: write_results

# 需要导入模块: import gzip [as 别名]
# 或者: from gzip import compress [as 别名]
def write_results(results, character_id):
    """Write the results to a compressed .knife file."""

    fname = "{}.knife".format(character_id)
    i = 0
    while os.path.isfile(fname):
        i += 1
        fname = "{}-{}.knife".format(character_id, i)

    with open(fname, "w") as openout:
        openout.write(codecs.decode(
            base64.b64encode(compress(codecs.encode(
                json.dumps(results),
                "utf-8",
            ))),
            "utf-8",
        ))

    print("created {}".format(fname)) 
开发者ID:a-tal,项目名称:esi-knife,代码行数:21,代码来源:cli.py

示例13: test_fetch_deflate_encoded_csv

# 需要导入模块: import gzip [as 别名]
# 或者: from gzip import compress [as 别名]
def test_fetch_deflate_encoded_csv(self):
        body = b"A,B\nx,y\nz,a"
        zo = zlib.compressobj(wbits=-zlib.MAX_WBITS)
        zbody = zo.compress(body) + zo.flush()
        url = self.build_url("/path/to.csv.gz")
        self.mock_http_response = MockHttpResponse.ok(
            zbody,
            [
                ("Content-Type", "text/csv; charset=utf-8"),
                ("Content-Encoding", "deflate"),
            ],
        )
        with call_fetch(url) as result:
            self.assertEqual(result.errors, [])
            with httpfile.read(result.path) as (_, __, headers, body_path):
                self.assertEqual(body_path.read_bytes(), body) 
开发者ID:CJWorkbench,项目名称:cjworkbench,代码行数:18,代码来源:test_loadurl.py

示例14: encode_file

# 需要导入模块: import gzip [as 别名]
# 或者: from gzip import compress [as 别名]
def encode_file(path: Path) -> str:
    compressed = gzip.compress(path.read_bytes(), compresslevel=9)
    return base64.b64encode(compressed).decode('utf-8') 
开发者ID:lRomul,项目名称:argus-freesound,代码行数:5,代码来源:build_kernel.py

示例15: flush_index

# 需要导入模块: import gzip [as 别名]
# 或者: from gzip import compress [as 别名]
def flush_index(self, compressed=True):
        data = json.dumps(self.index).encode("utf-8")
        if compressed:
            logger.debug("Using gzip encoding for writing index")
            data = gzip.compress(data)
        else:
            logger.debug("Using plain text encoding for writing index")

        self.boto.put_object(Bucket=self.bucket, Key=self.index_path(), Body=data) 
开发者ID:MichaelAquilina,项目名称:S4,代码行数:11,代码来源:s3.py


注:本文中的gzip.compress方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。