当前位置: 首页>>代码示例>>Python>>正文


Python snappy.compress函数代码示例

本文整理汇总了Python中snappy.compress函数的典型用法代码示例。如果您正苦于以下问题:Python compress函数的具体用法?Python compress怎么用?Python compress使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了compress函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: snappy_pack_blob

def snappy_pack_blob(obj, sep=SEP):
    if obj is None:
        return ""
    c = obj.dtype.char
    if c == "S":
        return "S" + snappy.compress(sep.join(obj))
    return buffer(c + snappy.compress(obj.tobytes()))
开发者ID:quinlan-lab,项目名称:vcf2db,代码行数:7,代码来源:vcf2db.py

示例2: test_view_snappy_compressed

def test_view_snappy_compressed():
  if not snappy_installed():
    raise SkipTest
  import snappy

  cluster = pseudo_hdfs4.shared_cluster()
  finish = []
  try:
    c = make_logged_in_client()
    cluster.fs.setuser(cluster.superuser)
    if cluster.fs.isdir('/tmp/test-snappy-filebrowser'):
      cluster.fs.rmtree('/tmp/test-snappy-filebrowser')

    cluster.fs.mkdir('/tmp/test-snappy-avro-filebrowser/')

    f = cluster.fs.open('/tmp/test-snappy-filebrowser/test-view.snappy', "w")
    f.write(snappy.compress('This is a test of the emergency broadcasting system.'))
    f.close()

    f = cluster.fs.open('/tmp/test-snappy-filebrowser/test-view.stillsnappy', "w")
    f.write(snappy.compress('The broadcasters of your area in voluntary cooperation with the FCC and other authorities.'))
    f.close()

    f = cluster.fs.open('/tmp/test-snappy-filebrowser/test-view.notsnappy', "w")
    f.write('foobar')
    f.close()

    # Snappy compressed fail
    response = c.get('/filebrowser/view/tmp/test-snappy-filebrowser/test-view.notsnappy?compression=snappy')
    assert_true('Failed to decompress' in response.context['message'], response)

    # Snappy compressed succeed
    response = c.get('/filebrowser/view/tmp/test-snappy-filebrowser/test-view.snappy')
    assert_equal('snappy', response.context['view']['compression'])
    assert_equal(response.context['view']['contents'], 'This is a test of the emergency broadcasting system.', response)

    # Snappy compressed succeed
    response = c.get('/filebrowser/view/tmp/test-snappy-filebrowser/test-view.stillsnappy')
    assert_equal('snappy', response.context['view']['compression'])
    assert_equal(response.context['view']['contents'], 'The broadcasters of your area in voluntary cooperation with the FCC and other authorities.', response)

    # Largest snappy compressed file
    finish.append( MAX_SNAPPY_DECOMPRESSION_SIZE.set_for_testing(1) )
    response = c.get('/filebrowser/view/tmp/test-snappy-filebrowser/test-view.stillsnappy?compression=snappy')
    assert_true('File size is greater than allowed max snappy decompression size of 1' in response.context['message'], response)

  finally:
    for done in finish:
      done()
    try:
      cluster.fs.rmtree('/test-snappy-avro-filebrowser/')
    except:
      pass      # Don't let cleanup errors mask earlier failures
开发者ID:Web5design,项目名称:hue,代码行数:53,代码来源:views_test.py

示例3: snappy_encode

def snappy_encode(payload, xerial_compatible=False, xerial_blocksize=32*1024):
    """Encodes the given data with snappy compression.

    If xerial_compatible is set then the stream is encoded in a fashion
    compatible with the xerial snappy library.

    The block size (xerial_blocksize) controls how frequent the blocking occurs
    32k is the default in the xerial library.

    The format winds up being:


        +-------------+------------+--------------+------------+--------------+
        |   Header    | Block1 len | Block1 data  | Blockn len | Blockn data  |
        +-------------+------------+--------------+------------+--------------+
        |  16 bytes   |  BE int32  | snappy bytes |  BE int32  | snappy bytes |
        +-------------+------------+--------------+------------+--------------+


    It is important to note that the blocksize is the amount of uncompressed
    data presented to snappy at each block, whereas the blocklen is the number
    of bytes that will be present in the stream; so the length will always be
    <= blocksize.

    """

    if not has_snappy():
        raise NotImplementedError("Snappy codec is not available")

    if xerial_compatible:
        def _chunker():
            for i in xrange(0, len(payload), xerial_blocksize):
                yield payload[i:i+xerial_blocksize]

        out = BytesIO()

        header = b''.join([struct.pack('!' + fmt, dat) for fmt, dat
                           in zip(_XERIAL_V1_FORMAT, _XERIAL_V1_HEADER)])

        out.write(header)
        for chunk in _chunker():
            block = snappy.compress(chunk)
            block_size = len(block)
            out.write(struct.pack('!i', block_size))
            out.write(block)

        out.seek(0)
        return out.read()

    else:
        return snappy.compress(payload)
开发者ID:sounos,项目名称:kafka-python,代码行数:51,代码来源:codec.py

示例4: encode_snappy

def encode_snappy(buff, xerial_compatible=False, xerial_blocksize=32 * 1024):
    """Encode a buffer using snappy

    If xerial_compatible is set, the buffer is encoded in a fashion compatible
    with the xerial snappy library.

    The block size (xerial_blocksize) controls how frequently the blocking
    occurs. 32k is the default in the xerial library.

    The format is as follows:
    +-------------+------------+--------------+------------+--------------+
    |   Header    | Block1 len | Block1 data  | Blockn len | Blockn data  |
    |-------------+------------+--------------+------------+--------------|
    |  16 bytes   |  BE int32  | snappy bytes |  BE int32  | snappy bytes |
    +-------------+------------+--------------+------------+--------------+

    It is important to note that `blocksize` is the amount of uncompressed
    data presented to snappy at each block, whereas `blocklen` is the
    number of bytes that will be present in the stream.

    Adapted from kafka-python
    https://github.com/mumrah/kafka-python/pull/127/files
    """
    #snappy segfaults if it gets a read-only buffer on PyPy
    if IS_PYPY or PY3:
        buff = bytes(buff)
    if snappy is None:
        raise ImportError("Please install python-snappy")
    if xerial_compatible:
        def _chunker():
            for i in range(0, len(buff), xerial_blocksize):
                yield buff[i:i + xerial_blocksize]
        out = BytesIO()
        full_data = list(zip(_XERIAL_V1_FORMAT, _XERIAL_V1_HEADER))
        header = b''.join(
            [struct.pack('!' + fmt, dat) for fmt, dat in full_data
         ])

        out.write(header)
        for chunk in _chunker():
            block = snappy.compress(chunk)
            block_size = len(block)
            out.write(struct.pack('!i', block_size))
            out.write(block)
        out.seek(0)
        return out.read()
    else:
        return snappy.compress(buff)
开发者ID:nashuiliang,项目名称:pykafka,代码行数:48,代码来源:compression.py

示例5: _pack_msgpack_snappy

def _pack_msgpack_snappy(obj):
    # print "pack", obj
    tmp = msgpack.dumps(obj, encoding='utf-8')
    if len(tmp) > 1000:
        return b'S' + snappy.compress(tmp)
    else:
        return b'\0' + tmp
开发者ID:raycool,项目名称:vnpy,代码行数:7,代码来源:jrpc_py.py

示例6: test_compression

    def test_compression(self):
        # test that we can add compressed chunks
        compressor = snappy.StreamCompressor()
        data = b"\0" * 50
        compressed_data = snappy.compress(data)
        crc = struct.pack("<L", snappy._masked_crc32c(data))
        self.assertEqual(crc, b"\x8f)H\xbd")
        self.assertEqual(len(compressed_data), 6)
        self.assertEqual(compressor.add_chunk(data, compress=True),
                         b"\xff\x06\x00\x00sNaPpY"
                         b"\x00\x0a\x00\x00" + crc + compressed_data)

        # test that we can add uncompressed chunks
        data = b"\x01" * 50
        crc = struct.pack("<L", snappy._masked_crc32c(data))
        self.assertEqual(crc, b"\xb2\x14)\x8a")
        self.assertEqual(compressor.add_chunk(data, compress=False),
                         b"\x01\x36\x00\x00" + crc + data)

        # test that we can add more data than will fit in one chunk
        data = b"\x01" * (snappy._CHUNK_MAX * 2 - 5)
        crc1 = struct.pack("<L",
                snappy._masked_crc32c(data[:snappy._CHUNK_MAX]))
        self.assertEqual(crc1, b"h#6\x8e")
        crc2 = struct.pack("<L",
                snappy._masked_crc32c(data[snappy._CHUNK_MAX:]))
        self.assertEqual(crc2, b"q\x8foE")
        self.assertEqual(compressor.add_chunk(data, compress=False),
                b"\x01\x04\x00\x01" + crc1 + data[:snappy._CHUNK_MAX] +
                b"\x01\xff\xff\x00" + crc2 + data[snappy._CHUNK_MAX:])
开发者ID:felipecruz,项目名称:python-snappy,代码行数:30,代码来源:test_snappy.py

示例7: compress

def compress(data):
    """
    Compresses given data via the snappy algorithm.

    The result is preceded with a header containing the string 'SNAPPY' and the
    default and min-compat versions (both ``1``).

    The block size for the compression is hard-coded at 32kb.

    If ``python-snappy`` is not installed a ``RuntimeError`` is raised.
    """
    if not snappy_available:
        raise RuntimeError("Snappy compression unavailable.")

    buff = BytesIO()
    buff.write(raw_header)

    for block_num in range(0, len(data), BLOCK_SIZE):
        block = data[block_num:block_num + BLOCK_SIZE]
        compressed = snappy.compress(block)

        buff.write(struct.pack("!i", len(compressed)))
        buff.write(compressed)

    result = buff.getvalue()

    buff.close()

    return result
开发者ID:FlorianLudwig,项目名称:kiel,代码行数:29,代码来源:snappy.py

示例8: snappy_write_block

    def snappy_write_block(fo, block_bytes):
        '''Write block in "snappy" codec.'''
        data = snappy.compress(block_bytes)

        write_long(fo, len(data) + 4)  # for CRC
        fo.write(data)
        write_crc32(fo, block_bytes)
开发者ID:rodcarroll,项目名称:fastavro,代码行数:7,代码来源:writer.py

示例9: update_post

def update_post(username, slug):
    user = current_user
    content = request.form.get('content', type=str)
    cursor = request.form.get('cursor', type=int)

    if content is not None:
        post = user.posts.filter_by(slug=slug).first()
        if post:
            post.cursor = len(content) if not cursor else cursor
            post.modified_timestamp = datetime.utcnow()
            
            # Get meta
            r = regex.compile(r'<<((?:(?>[^<>]+)|<(?!<)|>(?!>))*?)>>', regex.I | regex.S)
            post.meta = json.dumps(regex.findall(r, content))
            
            # Encrypt
            half_key = session[generate_hash(user.user_key_salt)]
            key = xor_keys(half_key, app.config['MASTER_KEY'])
            content = snappy.compress(content)
            content = AES_encrypt(key, user.username, content)
            post.content = content
            
            db.session.add(post)
            db.session.commit()
            return jsonify(error=None)
        return jsonify(error="Not found")
    elif cursor is not None:
        post = user.posts.filter_by(slug=slug).first()
        if post:
            post.cursor = cursor
            db.session.add(post)
            db.session.commit()
            return jsonify(error=None)
        return jsonify(error="Not found")
    return jsonify(error="Invalid parameters")
开发者ID:kevinisaac,项目名称:journal,代码行数:35,代码来源:views.py

示例10: write_key

def write_key(ds, kind, id, data_path):
    key = ds.key(kind, id)
    entity = datastore.Entity(
        key=key,
        exclude_from_indexes=['Value'])

    with open(data_path) as f:
        data = json.load(f)

    payload = {
        'LastModified': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
        'SchemaVersion': '',
        'DataType': data['dataType'],
        'Season': data['season']['name'],
        'Sport': data['sport'] if 'sport' in data else '',
        'League': data['league']['alias'],
        'TeamId': str(data['team']['id']),
        'PlayerId': data['player']['id'] if 'player' in data else '',
        'EventId': data['eventId'] if 'eventId' in data else '',
        'EventDate': data['eventDate'] if 'eventDate' in data else '',
        'EventType': data['eventType'] if 'eventType' in data else '',
        'Value': snappy.compress(msgpack.packb(data))
    }
    print payload

    entity.update(payload)
    ds.put(entity)
开发者ID:ikenticus,项目名称:blogcode,代码行数:27,代码来源:datastore.py

示例11: _pack

def _pack(obj) :
#     print "PACK", obj
    tmp = msgpack.dumps(obj)
    if len(tmp) > 1000:
        return 'S'  + snappy.compress(tmp)
    else:
        return '\0' + tmp
开发者ID:GenesisOrg,项目名称:vnpy,代码行数:7,代码来源:jrpc_server.py

示例12: rewrite

	def rewrite(data_string):
		data=json.loads(data_string)
		toupdate=json.loads(update)
		#primary_key_modified=False

		#delete the appropriate document
		query=BooleanQuery()
		for key in primary_keys_map:
			temp=QueryParser(Version.LUCENE_CURRENT,key,analyzer).parse(data[key])
			query.add(BooleanClause(temp,BooleanClause.Occur.MUST))
		

		#modify the values
		for key,value in toupdate.items():
			#if such a key is not present the we either add and update that key into data,or just ignore it!(By default it is set to True!)	
			if add_field_if_not_exists==False:
				if key in data.keys():
					data[key]=value
			else:		
				data[key]=value

		#this deletion statement has been intenstionally added here		
		#only if the modified data,has primary keys already not existing,will the updating process continue
		primary_key_update=False
		for key in toupdate.keys():
			if key in primary_keys_map:
				primary_key_update=True
				break
		if primary_key_update == True:
			query_search=BooleanQuery()
			for key in primary_keys_map:
				temp=QueryParser(Version.LUCENE_CURRENT,key,analyzer).parse(data[key])
				query_search.add(BooleanClause(temp,BooleanClause.Occur.MUST))
			hits=searcher.search(query_search,MAX_RESULTS).scoreDocs
			if len(hits) > 0:
				return 106			
		writer.deleteDocuments(query)

		#add the newly modified document
		doc=Document()
		#index files wrt primary key
		for primary_key in primary_keys_map:
			try:
				field=Field(primary_key,data[primary_key],Field.Store.NO,Field.Index.ANALYZED)
				doc.add(field)
			except:
				# primary_keys_map.pop(collection_name)
				return 101
		#compress data using snappy if compression is on		
		if to_be_compressed_input==True:
			temp=json.dumps(data)
			data_string=base64.b64encode(snappy.compress(temp))
		else:
			temp=json.dumps(data)
			data_string=base64.b64encode(temp)

		field=Field("$DATA$",data_string,Field.Store.YES,Field.Index.ANALYZED)
		doc.add(field)
		writer.addDocument(doc)
开发者ID:NitinJamadagni,项目名称:Mini-NoSQL-Database,代码行数:59,代码来源:Handler_callable.py

示例13: compress

    def compress(data):
        meta = {
            "compression": "snappy",
            "orig_size": len(data)
        }

        compressed_data = snappy.compress(data)
        return meta, compressed_data
开发者ID:irq0,项目名称:veintidos,代码行数:8,代码来源:compressor.py

示例14: snappy_encode

def snappy_encode(payload, xerial_compatible=False,
                  xerial_blocksize=32 * 1024):
    """
    Compress the given data with the Snappy algorithm.

    :param bytes payload: Data to compress.
    :param bool xerial_compatible:
        If set then the stream is broken into length-prefixed blocks in
        a fashion compatible with the xerial snappy library.

        The format winds up being::

            +-------------+------------+--------------+------------+--------------+
            |   Header    | Block1_len | Block1 data  | BlockN len | BlockN data  |
            |-------------+------------+--------------+------------+--------------|
            |  16 bytes   |  BE int32  | snappy bytes |  BE int32  | snappy bytes |
            +-------------+------------+--------------+------------+--------------+

    :param int xerial_blocksize:
        Number of bytes per chunk to independently Snappy encode. 32k is the
        default in the xerial library.

    :returns: Compressed bytes.
    :rtype: :class:`bytes`
    """
    if not has_snappy():  # FIXME This should be static, not checked every call.
        raise NotImplementedError("Snappy codec is not available")

    if xerial_compatible:
        def _chunker():
            for i in range(0, len(payload), xerial_blocksize):
                yield payload[i:i+xerial_blocksize]

        out = BytesIO()
        out.write(_XERIAL_HEADER)

        for chunk in _chunker():
            block = snappy.compress(chunk)
            out.write(struct.pack('!i', len(block)))
            out.write(block)

        out.seek(0)
        return out.read()

    else:
        return snappy.compress(payload)
开发者ID:ciena,项目名称:afkak,代码行数:46,代码来源:codec.py

示例15: test_view_snappy_compressed

  def test_view_snappy_compressed(self):
    if not snappy_installed():
      raise SkipTest
    import snappy

    cluster = pseudo_hdfs4.shared_cluster()
    finish = []
    try:
      prefix = self.cluster.fs_prefix + '/test_view_snappy_compressed'
      self.self.cluster.fs.mkdir(prefix)

      f = cluster.fs.open(prefix + '/test-view.snappy', "w")
      f.write(snappy.compress('This is a test of the emergency broadcasting system.'))
      f.close()

      f = cluster.fs.open(prefix + '/test-view.stillsnappy', "w")
      f.write(snappy.compress('The broadcasters of your area in voluntary cooperation with the FCC and other authorities.'))
      f.close()

      f = cluster.fs.open(prefix + '/test-view.notsnappy', "w")
      f.write('foobar')
      f.close()

      # Snappy compressed fail
      response = c.get('/filebrowser/view=%s/test-view.notsnappy?compression=snappy' % prefix)
      assert_true('Failed to decompress' in response.context['message'], response)

      # Snappy compressed succeed
      response = c.get('/filebrowser/view=%s/test-view.snappy' % prefix)
      assert_equal('snappy', response.context['view']['compression'])
      assert_equal(response.context['view']['contents'], 'This is a test of the emergency broadcasting system.', response)

      # Snappy compressed succeed
      response = c.get('/filebrowser/view=%s/test-view.stillsnappy' % prefix)
      assert_equal('snappy', response.context['view']['compression'])
      assert_equal(response.context['view']['contents'], 'The broadcasters of your area in voluntary cooperation with the FCC and other authorities.', response)

      # Largest snappy compressed file
      finish.append( MAX_SNAPPY_DECOMPRESSION_SIZE.set_for_testing(1) )
      response = c.get('/filebrowser/view=%s/test-view.stillsnappy?compression=snappy' % prefix)
      assert_true('File size is greater than allowed max snappy decompression size of 1' in response.context['message'], response)

    finally:
      for done in finish:
        done()
开发者ID:CaeserNieh,项目名称:hue,代码行数:45,代码来源:views_test.py


注:本文中的snappy.compress函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。