本文整理汇总了Python中pylzma.compress函数的典型用法代码示例。如果您正苦于以下问题:Python compress函数的具体用法?Python compress怎么用?Python compress使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了compress函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: compress_stub
def compress_stub(method, idata):
# compress
if method == 0:
return 0, idata
elif method == 14: # M_LZMA
import pylzma
odata = pylzma.compress(idata, eos=0)
## FIXME: internal pylzma-0.3.0 error
##assert pylzma.decompress(odata, maxlength=len(idata)) == idata
# recode lzma-header
prop = ord(odata[0])
pb = (prop / 9) / 5; lp = (prop / 9) % 5; lc = prop % 9
h = chr(((lc + lp) << 3) | pb) + chr((lp << 4) | lc)
odata = h + odata[5:]
# encode upx stub header
odata = encode_compressed_stub_header(method, idata, odata) + odata
elif method == 15: # M_DEFLATE
odata = zlib.compress(idata, 9)
# strip zlib-header and zlib-trailer (adler32)
odata = odata[2:-4]
assert zlib.decompress(odata, -15) == idata
# encode upx stub header
odata = encode_compressed_stub_header(method, idata, odata) + odata
else:
raise Exception, ("invalid method", method, opts.methods)
if 1 and len(odata) >= len(idata):
# not compressible
return 0, idata
assert len(odata) <= len(idata), "stub compression failed"
return method, odata
示例2: compress
def compress(buf, size):
remain = size
pos = 0
comp_size = 0
num_blocks = 0
data = ""
while remain > 0 :
num_blocks += 1
if remain > UNCOMP_BLOCK_SIZE :
#print 'compress0 %d' % UNCOMP_BLOCK_SIZE
head = struct.pack("I", UNCOMP_BLOCK_SIZE)
block = buf[pos:pos+UNCOMP_BLOCK_SIZE]
pos += UNCOMP_BLOCK_SIZE
remain -= UNCOMP_BLOCK_SIZE
else:
#print 'compress1 %d' % remain
head = struct.pack("I", remain)
block = buf[pos:pos+remain]
pos += remain
remain -= remain
dst = pylzma.compress(block, dictionary=23, fastBytes=273, eos=0)[5:]
head += struct.pack("I8s", len(dst), "\x5d\x00\x40\x00\x00\x00\x00\x00")
#out.write(head+dst)
data += head+dst
pad = len(head+dst)
comp_size += pad
pad = (pad + 0xf) / 0x10 * 0x10 - pad
if (pad > 0) :
#out.write(LZMA_PAD[0:pad])
data += LZMA_PAD[0:pad]
comp_size += pad
data = struct.pack("I", size) + data
print "compress %s %d->%d, block %d." % (sys.argv[2], size, comp_size, num_blocks)
return data
示例3: test_compression_decompression_noeos
def test_compression_decompression_noeos(self):
# call compression and decompression on random data of various sizes
for i in range(18):
size = 1 << i
original = generate_random(size)
result = pylzma.decompress(pylzma.compress(original, eos=0), maxlength=size)
self.assertEqual(md5(original).hexdigest(), md5(result).hexdigest())
示例4: generateClientDB
def generateClientDB(self,realm,realmDB):
try:
#Copy the master DB to the client DB
shutil.copyfile("./data/ahserver/ahmaster.db","./data/ahserver/%s"%realmDB)
#Connecting to the client DB to flush out the extra tables we do not need to send over
dbconn = sqlite.connect("./data/ahserver/%s"%realmDB,isolation_level = None)
dcursor = dbconn.cursor()
dcursor.execute("BEGIN TRANSACTION;")
dcursor.execute("DELETE from ItemList WHERE id IN (select item_list_id FROM ItemTransactionDB WHERE realm != %d);"%realm)
dcursor.execute("drop table ItemTransactionDB")
dcursor.execute("drop table ItemInstance")
dcursor.execute("drop table ItemCharacterMapping")
dcursor.execute("drop table ItemVariant")
dcursor.execute("END TRANSACTION;")
dcursor.execute("vacuum ItemTransactionDB")
dcursor.execute("vacuum ItemInstance")
dcursor.execute("vacuum ItemCharacterMapping")
dcursor.execute("vacuum ItemVariant")
dcursor.close()
dbconn.close()
#Compress the client DB. Better to do this once every 10 seconds than a bunch of times under heavy load of client requests
f = file("./data/ahserver/%s"%realmDB,"rb")
cbuffer = f.read()
cbuffer = pylzma.compress(cbuffer,algorithm=0)
cbuffer = sqlite.Binary(cbuffer)
f.close()
f = file("./data/ahserver/%s"%realmDB,"wb")
f.write(cbuffer)
f.close()
except:
print "Failed to write %s to disk"%realmDB
示例5: write_article
def write_article():
global compress
global verbose
global output, f_out, i_out
global article_count
global g_this_article_title
global file_number
article_count += 1
if verbose:
print "[MWR %d] %s" % (article_count, g_this_article_title)
sys.stdout.flush()
elif article_count % 1000 == 0:
print "Render[%d]: %d" % (file_number, article_count)
sys.stdout.flush()
output.flush()
# create link
links_stream = io.BytesIO('')
for i in g_links:
(x0, y0, x1, y1, url) = g_links[i]
links_stream.write(struct.pack('III', (y0 << 8) | x0, (y1 << 8) | x1, link_number(url)))
links_stream.flush()
links = links_stream.getvalue()
links_stream.close()
header = struct.pack('I2H', 8 + len(links), g_link_cnt, 0)
body = output.getvalue()
file_offset = f_out.tell()
if compress:
body = chr(5) + pylzma.compress(header+links+body, dictionary = 24, fastBytes = 32,
literalContextBits = 3,
literalPosBits = 0, posBits = 2, algorithm = 1, eos = 1)
f_out.write(body)
else:
f_out.write(header)
f_out.write(links)
f_out.write(body)
output.truncate(0)
if compress:
try:
(article_number, fnd_offset, restricted) = article_index(g_this_article_title)
data_offset = (file_offset & 0x7fffffff)
if bool(int(restricted)): # '0' is True so turn it into False
data_offset |= 0x80000000
data_length = (0x80 << 24) | (file_number << 24) | len(body) # 0x80 => lzma encoding
i_out.write(struct.pack('III', data_offset, fnd_offset, data_length))
except KeyError:
print 'Error in: write_article, Title not found'
print 'Title:', g_this_article_title
print 'Offset:', file_offset
print 'Count:', article_count
示例6: test_compression_decompression_eos
def test_compression_decompression_eos(self):
# call compression and decompression on random data of various sizes
for i in xrange(18):
size = 1 << i
original = generate_random(size)
result = pylzma.decompress(pylzma.compress(original, eos=1))
self.assertEqual(len(result), size)
self.assertEqual(md5.new(original).hexdigest(), md5.new(result).hexdigest())
示例7: test_matchfinders
def test_matchfinders(self):
# use different matchfinder algorithms for compression
matchfinders = ['bt2', 'bt3', 'hc4']
original = 'hello world'
for mf in matchfinders:
result = pylzma.decompress(pylzma.compress(original, matchfinder=mf))
self.assertEqual(original, result)
self.failUnlessRaises(TypeError, pylzma.compress, original, matchfinder='1234')
示例8: depthCallback
def depthCallback(self, dev, depth, timestamp):
# resize grid
depth0 = depth[self.useCols, self.useRows]
# median of this + previous frames: reduces noise, and greatly improves compression on similar frames
if self.medianOf > 1:
self.depths.insert(0, depth0)
depth = numpy.median(numpy.dstack(self.depths), axis = 2).astype(numpy.int16)
self.depths.pop()
else:
depth = depth0
# flip x axis so the orientation is correct
depth = numpy.fliplr(depth)
# rescale depths
numpy.clip(depth, 0, 2 ** 10 - 1, depth)
depth >>= 2
# calculate quadrant averages (used to pan camera; could otherwise be done in JS)
h, w = self.h, self.w
halfH, halfW = h / 2, w / 2
qtl = numpy.mean(depth[0:halfH, 0:halfW])
qtr = numpy.mean(depth[0:halfH, halfW:w])
qbl = numpy.mean(depth[halfH:h, 0:halfW])
qbr = numpy.mean(depth[halfH:h, halfW:w])
depth = depth.ravel() # 1-D version
# calculate diff from last frame (unless it's a keyframe)
keyFrame = self.currentFrame == 0
diffDepth = depth if keyFrame else depth - self.lastDepth
# optionally produce pixel diffs (oddly, pixel diffing seems to *increase* compressed data size)
if self.pixelDiffs:
diffDepth = numpy.concatenate(([diffDepth[0]], numpy.diff(diffDepth)))
# smush data together
data = numpy.concatenate(([keyFrame, qtl, qtr, qbl, qbr], diffDepth % 256))
# compress and broadcast
crunchedData = pylzma.compress(data.astype(numpy.uint8), dictionary = 18) # default: 23 -> 2 ** 23 -> 8MB
# write out test data
# ff = open('/tmp/test_depth.bin', 'ab')
# ff.write(crunchedData)
# ff.close()
reactor.callFromThread(self.wsFactory.broadcast, crunchedData, True)
# setup for next frame
self.lastDepth = depth
self.currentFrame += 1
self.currentFrame %= self.keyFrameEvery
示例9: test_compress_large_stream
def test_compress_large_stream(self):
# decompress large block of repeating data, stream version (bug reported by Christopher Perkins)
data = bytes("asdf", 'ascii')*123456
decompress = pylzma.decompressobj()
infile = BytesIO(pylzma.compress(data))
outfile = BytesIO()
while 1:
tmp = infile.read(1)
if not tmp: break
outfile.write(decompress.decompress(tmp))
outfile.write(decompress.flush())
self.failUnless(data == outfile.getvalue())
示例10: test_compress_large_stream_bigchunks
def test_compress_large_stream_bigchunks(self):
# decompress large block of repeating data, stream version with big chunks
data = bytes("asdf", 'ascii')*123456
decompress = pylzma.decompressobj()
infile = BytesIO(pylzma.compress(data))
outfile = BytesIO()
while 1:
tmp = infile.read(1024)
if not tmp: break
outfile.write(decompress.decompress(tmp))
outfile.write(decompress.flush())
self.failUnless(data == outfile.getvalue())
示例11: run
def run(self):
try:
if self.compresslib=="lzma":
self.datacompressed=pylzma.compress(self.data,algorithm=self.compressionlevel)
elif self.compresslib=="zlib":
self.datacompressed=zlib.compress(self.data,self.compressionlevel)
elif self.compresslib=="bz2":
self.datacompressed=bz2.compress(self.data,self.compressionlevel)
elif self.compresslib=="none":
self.datacompressed=self.data
except:
self.exception=True
raise
示例12: compress_lzma
def compress_lzma(self):
self.flash.seek(8)
self.lzma_compressed = pylzma.compress(self.flash.read1(self.file_factor))
self.compressed_file_size = len(self.lzma_compressed) - 5
lzma_file = io.BytesIO()
lzma_file.write(b'ZWS')
lzma_file.write(struct.pack("<B", self.version))
lzma_file.write(struct.pack("<I", self.file_size))
lzma_file.write(struct.pack("<I", self.compressed_file_size))
lzma_file.write(self.lzma_compressed)
lzma_file.seek(0)
return lzma_file
示例13: ConvertMavlinkToTextMessage
def ConvertMavlinkToTextMessage(self, ListOfMavlinkMessages):
######################################################################################
#
# Summary: Take a list of mavlink messages, converts them to a single text buffer, crushes
# the buffer size down with LZMA compression, encodes the compressed buffer in Base64, and
# returns the encoded buffer. Base64 is used to make the text buffer url/sms/email safe.
#
######################################################################################
BufferOfMavlinkMessages = ""
for message in ListOfMavlinkMessages:
MessageInASCII = binascii.hexlify(message.get_msgbuf())
BufferOfMavlinkMessages+=MessageInASCII
CompressedMavlinkBuffer = pylzma.compress(BufferOfMavlinkMessages)
EncodedMavlinkBuffer = base64.b64encode(CompressedMavlinkBuffer)
TestMavlinkBuffer = base64.b64encode(BufferOfMavlinkMessages) #debug
return EncodedMavlinkBuffer
示例14: compress
def compress(infile, outfile):
fi = open(infile, "rb")
swf_size = os.path.getsize(infile)
swf_data = fi.read()
fi.close()
validate((swf_data[1] == 'W') and (swf_data[2] == 'S'), "not a SWF file", 112)
if swf_data[0] == 'Z':
print "LZMA", outfile
sys.exit(0)
dfilesize = struct.unpack("<I", swf_data[4:8])[0] - 8
if swf_data[0] == 'C':
# compressed SWF
ddata = zlib.decompress(swf_data[8:])
else:
# uncompressed SWF
validate((swf_data[0] == 'F'), "not a SWF file", 113)
ddata = swf_data[8:]
validate((dfilesize == len(ddata)), 'decompression failure', 114)
zdata = pylzma.compress(ddata, eos=1)
# 5 accounts for lzma props
zsize = len(zdata) - 5
zheader = list(struct.unpack("<12B", swf_data[0:12]))
zheader[0] = ord('Z')
zheader[3] = 13
zheader[8] = (zsize) & 0xFF
zheader[9] = (zsize >> 8) & 0xFF
zheader[10] = (zsize >> 16) & 0xFF
zheader[11] = (zsize >> 24) & 0xFF
fo = open(outfile, 'wb')
fo.write(struct.pack("<12B", *zheader))
fo.write(zdata)
fo.close()
opt_size = os.path.getsize(outfile)
print "%6.2f%% %7.7sB " % (100 - (100.0 * opt_size / swf_size), kilo(swf_size - opt_size)) + outfile + ": ", kilo(swf_size) + " -> " + kilo(opt_size)
示例15: compressXelapedia
def compressXelapedia(source, dest):
createXelapedia(dest)
# connect with the database with the compressed articles
con = sqlite.connect(dest)
con.text_factory=str
# connect the database with the uncompressed articles
con.execute('ATTACH ? AS source', (source,))
# update the configuration
con.execute('UPDATE config SET value=\'lzma\' WHERE key=\'type\'')
# empty the destination database
con.execute('DELETE FROM articles')
con.execute('DELETE FROM titles')
con.execute('DELETE FROM redurects')
# copy the titles
con.execute('INSERT INTO titles(title, article_id) ' +
'SELECT title, article_id FROM source.titles')
con.commit()
# we dont need the table attached directly anymore
con.execute('DETACH source')
conSource = sqlite.connect(source)
conSource.text_factory=str
# now copy and compress the articles
#con.create_function('compress', 1, compressFunction)
#con.execute('INSERT INTO articles(id, contents) ' +
# 'SELECT id, compress(contents) FROM source.articles ORDER BY id')
cur=conSource.execute('SELECT id, contents FROM articles ORDER BY id')
for id, uncompressed in cur:
compressed = Binary(compress(uncompressed))
con.execute('INSERT INTO articles(id, contents) VALUES(?,?)',
(id, compressed,))
stdout.write('.')
stdout.flush()
con.commit()