本文整理汇总了Python中zlib.compress函数的典型用法代码示例。如果您正苦于以下问题:Python compress函数的具体用法?Python compress怎么用?Python compress使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了compress函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: calculate_description_similarities
def calculate_description_similarities():
for essay_num in range(1,10+1):
essay_description = open("data/descriptions/essay_%d_description.txt" % (essay_num,)).read()
description_infovalue = len(zlib.compress(essay_description,9))
print essay_num
output = open("features/essay_%d/description_similarity" % (essay_num,),"w")
for essay in essays.get_essay_set(essay_num):
essay_text = essay.get_text("proc")
random_text = word_generator(len(essay_text))
if len(essay_text) > 0:
essay_infovalue = len(zlib.compress(essay_description + essay_text,9))
essay_infovalue_dummy = len(zlib.compress(essay_description + random_text,9))
essay_infovalue_length_raw = essay_infovalue - description_infovalue
if len(essay_text) != 0:
essay_infovalue_length_norm = (essay_infovalue - description_infovalue) / len(essay_text)
else:
essay_infovalue_length_norm = 0
if (description_infovalue - essay_infovalue_dummy) != 0:
essay_infovalue_length_norm2 = (description_infovalue - essay_infovalue) / (description_infovalue - essay_infovalue_dummy)
else:
essay_infovalue_length_norm2 = 0
else:
essay_infovalue_length_raw = -1
essay_infovalue_length_norm = -1
essay_infovalue_length_norm2 = -1
output.write("%.6f,%.6f,%.6f\n" % (essay_infovalue_length_raw, essay_infovalue_length_norm, essay_infovalue_length_norm2))
output.close()
示例2: add
def add(self, entry):
if self.os is None:
import os
self.os = os
nm = entry[0]
pth = entry[1]
base, ext = self.os.path.splitext(self.os.path.basename(pth))
ispkg = base == '__init__'
try:
txt = open(pth[:-1], 'rU').read() + '\n'
except (IOError, OSError):
try:
f = open(pth, 'rb')
f.seek(8) # skip magic and timestamp
bytecode = f.read()
marshal.loads(bytecode).co_filename # to make sure it's valid
obj = zlib.compress(bytecode, self.LEVEL)
except (IOError, ValueError, EOFError, AttributeError):
raise ValueError("bad bytecode in %s and no source" % pth)
else:
txt = txt.replace('\r\n', '\n')
try:
import os
co = compile(txt, self.os.path.join(self.path, nm), 'exec')
except SyntaxError, e:
print "Syntax error in", pth[:-1]
print e.args
raise
obj = zlib.compress(marshal.dumps(co), self.LEVEL)
示例3: add
def add(self, entry):
if self.os is None:
import os
self.os = os
nm = entry[0]
pth = entry[1]
base, ext = self.os.path.splitext(self.os.path.basename(pth))
ispkg = base == "__init__"
try:
txt = open(pth[:-1], "r").read() + "\n"
except (IOError, OSError):
try:
f = open(pth, "rb")
f.seek(8) # skip magic and timestamp
bytecode = f.read()
marshal.loads(bytecode).co_filename # to make sure it's valid
obj = zlib.compress(bytecode, self.LEVEL)
except (IOError, ValueError, EOFError, AttributeError):
raise ValueError("bad bytecode in %s and no source" % pth)
else:
txt = iu._string_replace(txt, "\r\n", "\n")
try:
co = compile(txt, "%s/%s" % (self.path, nm), "exec")
except SyntaxError, e:
print "Syntax error in", pth[:-1]
print e.args
raise
obj = zlib.compress(marshal.dumps(co), self.LEVEL)
示例4: main
def main(ip_address, ssn):
udp_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Send HI first
gzip_encoded_hi = zlib.compress("HI")
udp_sock.sendto(gzip_encoded_hi, (ip_address, THE_PROTOCOL_PORT))
# Get HI back
data, addr = udp_sock.recvfrom(len(gzip_encoded_hi))
if data != gzip_encoded_hi:
print "Didn't get proper HI back."
gzip_encoded_ssn = zlib.compress(ssn)
print "Len: {}".format(len(gzip_encoded_ssn))
print "Sending {}".format(gzip_encoded_ssn)
udp_sock.sendto(gzip_encoded_ssn, (ip_address, THE_PROTOCOL_PORT))
# Now we receieve the file
readable, _, _ = select.select([udp_sock], [], [], 5000)
buf = array.array('i', [0]) # The fuck does this work?
fcntl.ioctl(readable[0].fileno(), termios.FIONREAD, buf, True)
if buf <= 0:
print "Nothing on the other side."
return
full_file, addr = udp_sock.recvfrom(buf[0])
gzip_decoded_file = zlib.decompress(full_file)
print "FILE IS: {}".format(gzip_decoded_file)
示例5: put
def put(self, entity, tag=None):
is_update = False
entity['updated'] = time.time()
entity_id = None
entity_copy = entity.copy()
# get the entity_id (or create a new one)
entity_id = entity_copy.pop('id', None)
if entity_id is None:
entity_id = raw_guid()
else:
is_update = True
if len(entity_id) != 16:
if sys.version_info[0] == 2:
entity_id = entity_id.decode('hex')
else:
entity_id = codecs.decode(entity_id, "hex_codec")
body = simplejson.dumps(entity_copy)
if self.use_zlib:
if sys.version_info[0] == 2:
body = zlib.compress(body, 1)
else:
body = zlib.compress(to_bytes(body), 1)
if is_update:
self._put_update(entity_id, entity_copy, body)
return entity
else:
return self._put_new(entity_id, entity_copy, tag, body)
示例6: buscar_cumple
def buscar_cumple():
print "Ingrese 1 para buscar por nombre o 2 para buscar por fecha de cumpleanos (cualquier otro caracter para cancelar)"
op = raw_input("Opcion:")
if op == "1":
try:
f = open("calendario.txt","r")#Abre el archivo de modo lectura
except:
print "aun no ha ingresado ningun cumple!"
else#El código colocado en la cláusula else se ejecuta solo si no se levante una excepción:
print "Ingrese el nombre"
nomb = zlib.compress(encriptar(raw_input("Nombre:")))
dic = pickle.load(f)#la funcion load carga el objeto serializado, ya q este es una lista
f.close()#cerrar el archivo
encontrado = "no"
for i in dic.keys():#devuelve una lista de todas las claves usadas en el diccionario
for j in range(len(dic[i])):
if nomb == dic[i][j][0]:
print ("Se encontro " + zlib.decompress(desencriptar(dic[i][j][0])) + " el dia " + zlib.decompress(desencriptar(i)))
encontrado = "si"
if encontrado == "no":
print "***No se hayaron coinsidencias***"
elif op == "2":
try:
f = open("calendario.txt","r")#abre el archivo modo lectura
except:
print "aun no ha ingresado ningun cumple!"
else#• El código colocado en la cláusula else se ejecuta solo si no se levante una excepción:
print "Ingrese la fecha"
fecha = zlib.compress(encriptar(raw_input("Fecha: ")))
dic = pickle.load(f)#la funcion load carga el objeto serializado, ya q este es una lista
f.close()#Cierra el archivo
if fecha in dic:
for x in dic[fecha]:
print zlib.decompress(desencriptar (x[0])) + ", " + zlib.decompress(desencriptar (x[1])) + ", " + zlib.decompress(desencriptar (x[2])) + "\n"
示例7: testDecompressStream
def testDecompressStream(self):
data = os.urandom(16 * 1024)
compressed = zlib.compress(data)
fp = StringIO.StringIO(compressed)
dfo = util.decompressStream(fp)
check = dfo.read()
self.assertEqual(check, data)
fp = StringIO.StringIO(compressed)
dfo = util.decompressStream(fp)
chunk = dfo.read(333)
self.assertEqual(chunk, data[:333])
# test readline
data = 'hello world\nhello world line 2\n'
compressed = zlib.compress(data)
fp = StringIO.StringIO(compressed)
dfo = util.decompressStream(fp)
line = dfo.readline()
self.assertEqual(line, 'hello world\n')
line = dfo.readline()
self.assertEqual(line, 'hello world line 2\n')
fp = StringIO.StringIO(compressed)
dfo = util.decompressStream(fp)
line = dfo.readline(5)
self.assertEqual(line, 'hello')
line = dfo.readline(5)
self.assertEqual(line, ' worl')
line = dfo.readline()
self.assertEqual(line, 'd\n')
示例8: toEtree
def toEtree(self):
msg = Element(('jabber:client', 'iq'))
msg['type'] = self.type
msg['id'] = self.id
msg['from'] = self.from_
msg['to'] = self.to
if self.type == 'result':
ecm_message = msg.addElement('ecm_message')
ecm_message['version'] = str(AGENT_VERSION_PROTOCOL)
ecm_message['core'] = str(AGENT_VERSION_CORE)
ecm_message['command'] = self.command
ecm_message['signature'] = self.signature
result = ecm_message.addElement('result')
result['retvalue'] = self.retvalue
result['timed_out'] = self.timed_out
result['partial'] = self.partial
# compress out
result.addElement('gzip_stdout').addContent(base64.b64encode(zlib.compress(self.stdout)))
result.addElement('gzip_stderr').addContent(base64.b64encode(zlib.compress(self.stderr)))
del ecm_message
return msg
示例9: set
def set(self, path, key, value):
"""Save a key, value pair into a blob using pickle and moderate zlib
compression (level 6). We simply save a dictionary containing all
different intermediates (from every view) of an entry.
:param path: path of this cache object
:param key: dictionary key where we store the value
:param value: a string we compress with zlib and afterwards save
"""
if exists(path):
try:
with io.open(path, 'rb') as fp:
rv = pickle.load(fp)
except (pickle.PickleError, IOError):
cache.remove(path)
rv = {}
try:
with io.open(path, 'wb') as fp:
rv[key] = zlib.compress(value, 6)
pickle.dump(rv, fp, pickle.HIGHEST_PROTOCOL)
except (IOError, pickle.PickleError) as e:
log.warn('%s: %s' % (e.__class__.__name__, e))
else:
try:
fd, tmp = tempfile.mkstemp(suffix=self._fs_transaction_suffix,
dir=self.cache_dir)
with io.open(fd, 'wb') as fp:
pickle.dump({key: zlib.compress(value, 6)}, fp, pickle.HIGHEST_PROTOCOL)
os.rename(tmp, path)
os.chmod(path, self.mode)
except (IOError, OSError, pickle.PickleError, zlib.error) as e:
log.warn('%s: %s' % (e.__class__.__name__, e))
self.objects[path].add(key)
return value
示例10: test_http
def test_http():
def gzip_compress(data):
file_obj = io.BytesIO()
gzip_file = gzip.GzipFile(fileobj=file_obj, mode='wb')
gzip_file.write(data)
gzip_file.close()
return file_obj.getvalue()
with http_server({
'/gzip': lambda env: (
(gzip_compress(b'<html test=ok>'), [('Content-Encoding', 'gzip')])
if 'gzip' in env.get('HTTP_ACCEPT_ENCODING', '') else
(b'<html test=accept-encoding-header-fail>', [])
),
'/deflate': lambda env: (
(zlib.compress(b'<html test=ok>'),
[('Content-Encoding', 'deflate')])
if 'deflate' in env.get('HTTP_ACCEPT_ENCODING', '') else
(b'<html test=accept-encoding-header-fail>', [])
),
'/raw-deflate': lambda env: (
# Remove zlib header and checksum
(zlib.compress(b'<html test=ok>')[2:-4],
[('Content-Encoding', 'deflate')])
if 'deflate' in env.get('HTTP_ACCEPT_ENCODING', '') else
(b'<html test=accept-encoding-header-fail>', [])
),
}) as root_url:
assert HTML(root_url + '/gzip').etree_element.get('test') == 'ok'
assert HTML(root_url + '/deflate').etree_element.get('test') == 'ok'
assert HTML(
root_url + '/raw-deflate').etree_element.get('test') == 'ok'
示例11: hide
def hide(img, img_enc, copyright="http://bitbucket.org/cedricbonhomme/stegano", secret_message=None, secret_file=None):
"""
"""
import shutil
import datetime
from zlib import compress
from zlib import decompress
from base64 import b64encode
from .exif.minimal_exif_writer import MinimalExifWriter
if secret_file != None:
with open(secret_file, "r") as f:
secret_file_content = f.read()
text = "\nImage annotation date: "
text = text + str(datetime.date.today())
text = text + "\nImage description:\n"
if secret_file != None:
text = compress(b64encode(text + secret_file_content))
else:
text = compress(b64encode(text + secret_message))
try:
shutil.copy(img, img_enc)
except Exception as e:
print(("Impossible to copy image:", e))
return
f = MinimalExifWriter(img_enc)
f.removeExif()
f.newImageDescription(text)
f.newCopyright(copyright, addYear=1)
f.process()
示例12: process_response
def process_response(self, request, response):
"""Sets the cache, if needed."""
#if not self._should_update_cache(request, response):
# # We don't need to update the cache, just return.
# return response
if response.streaming or response.status_code != 200:
return response
# Don't cache responses that set a user-specific (and maybe security
# sensitive) cookie in response to a cookie-less request.
if not request.COOKIES and response.cookies and has_vary_header(response, 'Cookie'):
return response
# Try to get the timeout from the "max-age" section of the "Cache-
# Control" header before reverting to using the default cache_timeout
# length.
timeout = get_max_age(response)
if timeout == None:
timeout = self.cache_timeout
elif timeout == 0:
# max-age was set to 0, don't bother caching.
return response
patch_response_headers(response, timeout)
if timeout:
cache_key = "%s-%s" % (self.key_prefix, request.get_full_path())
#raise ValueError(cache_key)
if hasattr(response, 'render') and isinstance(response.render, collections.Callable):
response.add_post_render_callback(
lambda r: cache._cache.set(cache_key.encode("utf-8"), zlib.compress(r.content, 9), timeout)
)
else:
# we use the highest compression level, because since it is cached we hope for it to pay off
cache._cache.set(cache_key.encode("utf-8"), zlib.compress(response.content, 9), timeout)
return response
示例13: save_signature
def save_signature(fname, _id):
# 현재 날짜와 시간을 구한다.
ret_date = k2timelib.get_now_date()
ret_time = k2timelib.get_now_time()
# 날짜와 시간 값을 2Byte로 변경한다.
val_date = struct.pack('<H', ret_date)
val_time = struct.pack('<H', ret_time)
# 크기 파일 저장 : ex) script.s01
sname = '%s.s%02d' % (fname, _id)
t = zlib.compress(marshal.dumps(set(size_sig))) # 중복된 데이터 삭제 후 저장
t = 'KAVS' + struct.pack('<L', len(size_sig)) + val_date + val_time + t
save_file(sname, t)
# 패턴 p1 파일 저장 : ex) script.i01
sname = '%s.i%02d' % (fname, _id)
t = zlib.compress(marshal.dumps(p1_sig))
t = 'KAVS' + struct.pack('<L', len(p1_sig)) + val_date + val_time + t
save_file(sname, t)
# 패턴 p2 파일 저장 : ex) script.c01
sname = '%s.c%02d' % (fname, _id)
t = zlib.compress(marshal.dumps(p2_sig))
t = 'KAVS' + struct.pack('<L', len(p2_sig)) + val_date + val_time + t
save_file(sname, t)
# 악성코드 이름 파일 저장 : ex) script.n01
sname = '%s.n%02d' % (fname, _id)
t = zlib.compress(marshal.dumps(name_sig))
t = 'KAVS' + struct.pack('<L', len(name_sig)) + val_date + val_time + t
save_file(sname, t)
示例14: test_multi_decoding_deflate_deflate
def test_multi_decoding_deflate_deflate(self):
data = zlib.compress(zlib.compress(b'foo'))
fp = BytesIO(data)
r = HTTPResponse(fp, headers={'content-encoding': 'deflate, deflate'})
assert r.data == b'foo'
示例15: do_chunk
def do_chunk(ilines,infile,args):
"""Takes in a the lines from the index file to work on in array form,
and the bam file name, and the arguments
returns a list of the necessary data for chimera detection ready for sorting
"""
ilines = [x.rstrip().split("\t") for x in ilines]
coord = [int(x) for x in ilines[0][2:4]]
bf = BAMFile(infile,BAMFile.Options(blockStart=coord[0],innerStart=coord[1]))
results = []
for i in range(0,len(ilines)):
flag = int(ilines[i][5])
e = bf.read_entry()
#if not e: break
value = None
if e.is_aligned():
tx = e.get_target_transcript(args.minimum_intron_size)
value = {'qrng':e.actual_original_query_range.get_range_string(),'tx':tx.get_gpd_line(),'flag':flag,'qlen':e.original_query_sequence_length,'aligned_bases':e.get_aligned_bases_count()}
results.append(e.entries.qname+"\t"+base64.b64encode(
zlib.compress(
pickle.dumps(value))))
#results.append([e.value('qname'),zlib.compress(pickle.dumps(value))])
else:
value = {'qrng':'','tx':'','flag':flag,'qlen':e.original_query_sequence_length,'aligned_bases':0}
results.append(e.entries.qname+"\t"+base64.b64encode(
zlib.compress(
pickle.dumps(value))))
#results.append([e.value('qname'),zlib.compress(pickle.dumps(value))])
return results