当前位置: 首页>>代码示例>>Python>>正文


Python lzo.decompress方法代码示例

本文整理汇总了Python中lzo.decompress方法的典型用法代码示例。如果您正苦于以下问题:Python lzo.decompress方法的具体用法?Python lzo.decompress怎么用?Python lzo.decompress使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在lzo的用法示例。


在下文中一共展示了lzo.decompress方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_data_by_index

# 需要导入模块: import lzo [as 别名]
# 或者: from lzo import decompress [as 别名]
def get_data_by_index(fmdx, index):
        fmdx.seek(index['file_pos'])
        record_block_compressed = fmdx.read(index['compressed_size'])
        record_block_type = record_block_compressed[:4]
        record_block_type = index['record_block_type']
        decompressed_size = index['decompressed_size']
        #adler32 = unpack('>I', record_block_compressed[4:8])[0]
        if record_block_type == 0:
            _record_block = record_block_compressed[8:]
            # lzo compression
        elif record_block_type == 1:
            if lzo is None:
                print("LZO compression is not supported")
                # decompress
            header = b'\xf0' + pack('>I', index['decompressed_size'])
            _record_block = lzo.decompress(record_block_compressed[
                                           8:], initSize=decompressed_size, blockSize=1308672)
            # zlib compression
        elif record_block_type == 2:
            # decompress
            _record_block = zlib.decompress(record_block_compressed[8:])
        data = _record_block[index['record_start'] -
                             index['offset']:index['record_end'] - index['offset']]
        return data 
开发者ID:finalion,项目名称:WordQuery,代码行数:26,代码来源:mdict_query.py

示例2: decode_body

# 需要导入模块: import lzo [as 别名]
# 或者: from lzo import decompress [as 别名]
def decode_body(buf, decompress=True):
    """given the bytes from a .dat file, decode it"""
    #head = unpack_from(FMT, bs)
    head = buf[:0x88]
    key = decode_key(buf[0x88:0x88 + 0x80])
    body = xor_data(key, buf[0x108:])
    expected_pack_size = len(buf) - 0x110
    packed_size, unpacked_size = unpack_from('<L I', body)
    if expected_pack_size != packed_size:
        raise Exception('Wrong packed size')
    if body[-3:] != TAIL:
        raise Exception('Trailing 3 bytes not correct')
        pass
    # this is needed to play nice with the lzo api
    if decompress:
      magic = b'\xf0' + unpacked_size.to_bytes(4, 'big')
      data = lzo.decompress(magic + body[8:])
      return head, data
    else:
      return head, body 
开发者ID:FX31337,项目名称:FX-BT-Scripts,代码行数:22,代码来源:convert_dat.py

示例3: decompress

# 需要导入模块: import lzo [as 别名]
# 或者: from lzo import decompress [as 别名]
def decompress(ctype, unc_len, data):
    """Decompress data.

    Arguments:
    Int:ctype    -- Compression type LZO, ZLIB (*currently unused*).
    Int:unc_len  -- Uncompressed data lenth.
    Str:data     -- Data to be uncompessed.

    Returns:
    Uncompressed Data.
    """
    if ctype == UBIFS_COMPR_LZO:
        return lzo.decompress(''.join(('\xf0', struct.pack('>I', unc_len), data)))
    elif ctype == UBIFS_COMPR_ZLIB:
        return zlib.decompress(data, -11)
    else:
        return data 
开发者ID:leonsio,项目名称:YAHM,代码行数:19,代码来源:misc.py

示例4: get_mdd_by_index

# 需要导入模块: import lzo [as 别名]
# 或者: from lzo import decompress [as 别名]
def get_mdd_by_index(self, fmdx, index):
        fmdx.seek(index['file_pos'])
        record_block_compressed = fmdx.read(index['compressed_size'])
        record_block_type = record_block_compressed[:4]
        record_block_type = index['record_block_type']
        decompressed_size = index['decompressed_size']
        #adler32 = unpack('>I', record_block_compressed[4:8])[0]
        if record_block_type == 0:
            _record_block = record_block_compressed[8:]
            # lzo compression
        elif record_block_type == 1:
            if lzo is None:
                print("LZO compression is not supported")
                # decompress
            header = b'\xf0' + pack('>I', index['decompressed_size'])
            _record_block = lzo.decompress(record_block_compressed[8:], initSize = decompressed_size, blockSize=1308672)
                # zlib compression
        elif record_block_type == 2:
            # decompress
            _record_block = zlib.decompress(record_block_compressed[8:])
        data = _record_block[index['record_start'] - index['offset']:index['record_end'] - index['offset']]        
        return data 
开发者ID:ninja33,项目名称:mdx-server,代码行数:24,代码来源:mdict_query.py

示例5: load_block

# 需要导入模块: import lzo [as 别名]
# 或者: from lzo import decompress [as 别名]
def load_block(self, index):
        if self.cache is not None and index in self.cache:
            return self.cache[index]
        else:
            offset, csize, size = self.block_info[index]
            # Get the block of compressed data
            self.file.seek(offset)
            data = self.file.read(csize)
            # Need to prepend a header for python-lzo module (silly)
            data = b''.join((b'\xf0', struct.pack("!I", size), data))
            value = lzo.decompress(data)
            if self.cache is not None:
                self.cache[index] = value
            return value 
开发者ID:bxlab,项目名称:bx-python,代码行数:16,代码来源:seeklzop.py

示例6: __init__

# 需要导入模块: import lzo [as 别名]
# 或者: from lzo import decompress [as 别名]
def __init__(self, f, cache=32):
        # If cache=None, then everything is allowed to stay in memory,
        # this is the default behavior.
        self.f = f
        M, V, max_size, bin_size, nbins = read_packed(f, ">5I")
        assert M == MAGIC
        # assert version less than max supported
        assert V <= VERSION, "File is version %d but I don't know about anything beyond %d" % (V, VERSION)
        self.max_size = max_size
        self.bin_size = bin_size
        self.nbins = nbins
        self.bins = LRUCache(size=cache)
        # Read typecode
        if V >= 1:
            self.typecode = (unpack('c', f.read(1))[0]).decode()
        else:
            self.typecode = 'f'
        # Read compression type
        if V >= 2:
            self.comp_type = f.read(4).strip().decode()
        else:
            self.comp_type = 'zlib'
        self.decompress = comp_types[self.comp_type][1]
        # Read default value
        s = f.read(calcsize(self.typecode))
        a = fromstring(s, self.typecode)
        if platform_is_little_endian:
            a = a.byteswap()
        self.default = a[0]
        # Read bin sizes and offsets
        self.bin_pos = []
        self.bin_sizes = []
        for i in range(nbins):
            pos, size = read_packed(f, ">2I")
            self.bin_pos.append(pos)
            self.bin_sizes.append(size) 
开发者ID:bxlab,项目名称:bx-python,代码行数:38,代码来源:binned_array.py

示例7: load_bin

# 需要导入模块: import lzo [as 别名]
# 或者: from lzo import decompress [as 别名]
def load_bin(self, index):
        assert self.bin_pos[index] != 0
        self.f.seek(self.bin_pos[index])
        raw = self.f.read(self.bin_sizes[index])
        a = fromstring(self.decompress(raw), self.typecode)
        if platform_is_little_endian:
            a = a.byteswap()
        assert len(a) == self.bin_size
        self.bins[index] = a 
开发者ID:bxlab,项目名称:bx-python,代码行数:11,代码来源:binned_array.py

示例8: _decode_key_block

# 需要导入模块: import lzo [as 别名]
# 或者: from lzo import decompress [as 别名]
def _decode_key_block(self, key_block_compressed, key_block_info_list):
        key_list = []
        i = 0
        for compressed_size, decompressed_size in key_block_info_list:
            start = i
            end = i + compressed_size
            # 4 bytes : compression type
            key_block_type = key_block_compressed[start:start + 4]
            # 4 bytes : adler checksum of decompressed key block
            adler32 = unpack('>I', key_block_compressed[
                             start + 4:start + 8])[0]
            if key_block_type == b'\x00\x00\x00\x00':
                key_block = key_block_compressed[start + 8:end]
            elif key_block_type == b'\x01\x00\x00\x00':
                if lzo is None:
                    print("LZO compression is not supported")
                    break
                # decompress key block
                header = b'\xf0' + pack('>I', decompressed_size)
                key_block = lzo.decompress(key_block_compressed[
                                           start + 8:end], initSize=decompressed_size, blockSize=1308672)
            elif key_block_type == b'\x02\x00\x00\x00':
                # decompress key block
                key_block = zlib.decompress(
                    key_block_compressed[start + 8:end])
            # extract one single key block into a key list
            key_list += self._split_key_block(key_block)
            # notice that adler32 returns signed value
            assert(adler32 == zlib.adler32(key_block) & 0xffffffff)

            i += compressed_size
        return key_list 
开发者ID:finalion,项目名称:WordQuery,代码行数:34,代码来源:readmdict.py

示例9: decode_body

# 需要导入模块: import lzo [as 别名]
# 或者: from lzo import decompress [as 别名]
def decode_body(buf, decompress=True):
    """given the bytes from a .dat file, decode it"""
    head = buf[:0x88]
    key = decode_key(buf[0x88:0x88 + 0x80])
    body = xor_data(key, buf[0x108:])
    expected_pack_size = len(buf) - 0x110
    packed_size, unpacked_size = unpack_from('<L I', body)
    if expected_pack_size != packed_size:
        raise Exception('Wrong packed size')
    if body[-3:] != TAIL:
        raise Exception('Trailing 3 bytes not correct')
        pass
    # this is needed to play nice with the lzo api
    if decompress:
      magic = b'\xf0' + unpacked_size.to_bytes(4, 'big')
      data = lzo.decompress(magic + body[8:])
      return head, data
    else:
      return head, body


# MetaQuotes Format Decompressor
#
# A .dat file contains 3 main types of blocks: Type-1, Type-2 and Type-3.
# Every data file starts with Type-1 block(s) since this type stores exact
# values for timestamp, open/high/low/close prices and the volume. After a
# Type-1 block it can be continued with incremental types (Type-2 or Type-3).
# Each Type-3 block represents a minute data with incremental open/high/low/close
# price and volume data. Type-2 blocks contain highly compressable minute data
# which have the same values or have just a small difference between eachother,
# but it would need further investigation.
# 
开发者ID:FX31337,项目名称:FX-BT-Scripts,代码行数:34,代码来源:dl_bt_metaquotes.py

示例10: convertToCsv

# 需要导入模块: import lzo [as 别名]
# 或者: from lzo import decompress [as 别名]
def convertToCsv(pair, year, month, historyFile, destination):
    # return a string of the md5 checksum for the bytes `b`
    digest = lambda b: binascii.hexlify(hashlib.md5 (b).digest()).decode()

    if args.verbose: print('Converting to CSV ...')
    historyPath = os.path.join(destination, pair, str(year), '%02d' % int(month), historyFile)
    csvPath = os.path.join(destination, pair, str(year), '%02d' % int(month), '%s-%02d.csv' % (str(year), int(month)))
    with open(historyPath, 'rb') as datInput, open(csvPath, 'wt') as csvOutput:
        buf = datInput.read()
        matches = re.search(r'([a-z0-9]+)\.dat', historyFile).groups()
        if len(matches) != 1 or len(matches[0]) != 32:
            raise Exception('Error with MD5 from filename')
        md5 = matches[0]
        if digest(buf) != md5:
            raise Exception('Checksum does not match')
        head, data = decode_body(buf)
        bars = decompress(data, year, month)
        if args.anomaly:
            anomalyTest(bars)
        csvWriter = csv.writer(csvOutput, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)

        # Build timedelta object from time offset
        timeOffsetMatch = re.match(r'(?P<sign>[+-]?)(?P<hours>\d{2})(?P<minutes>\d{2})', args.timeOffset)
        if timeOffsetMatch:
            timeOffsetGroup = timeOffsetMatch.groupdict()
            timeOffset = datetime.timedelta(  hours=int(timeOffsetGroup['sign'] + timeOffsetGroup['hours'  ]),
                                            minutes=int(timeOffsetGroup['sign'] + timeOffsetGroup['minutes']))
        else:
            timeOffset = datetime.timedelta(0)

        for bar in bars:
            csvWriter.writerow([
                (bar['timestamp'] + timeOffset).strftime('%Y-%m-%d %H:%M:%S'),
                bar['open']/1e5,
                bar['high']/1e5,
                bar['low']/1e5,
                bar['close']/1e5,
                bar['volume']
            ]) 
开发者ID:FX31337,项目名称:FX-BT-Scripts,代码行数:41,代码来源:dl_bt_metaquotes.py

示例11: _decode_key_block

# 需要导入模块: import lzo [as 别名]
# 或者: from lzo import decompress [as 别名]
def _decode_key_block(self, key_block_compressed, key_block_info_list):
        key_list = []
        i = 0
        for compressed_size, decompressed_size in key_block_info_list:
            start = i
            end = i + compressed_size
            # 4 bytes : compression type
            key_block_type = key_block_compressed[start:start + 4]
            # 4 bytes : adler checksum of decompressed key block
            adler32 = unpack('>I', key_block_compressed[start + 4:start + 8])[0]
            if key_block_type == b'\x00\x00\x00\x00':
                key_block = key_block_compressed[start + 8:end]
            elif key_block_type == b'\x01\x00\x00\x00':
                if lzo is None:
                    print("LZO compression is not supported")
                    break
                # decompress key block
                header = b'\xf0' + pack('>I', decompressed_size)
                key_block = lzo.decompress(key_block_compressed[start + 8:end], initSize = decompressed_size, blockSize=1308672)
            elif key_block_type == b'\x02\x00\x00\x00':
                # decompress key block
                key_block = zlib.decompress(key_block_compressed[start + 8:end])
            # extract one single key block into a key list
            key_list += self._split_key_block(key_block)
            # notice that adler32 returns signed value
            assert(adler32 == zlib.adler32(key_block) & 0xffffffff)

            i += compressed_size
        return key_list 
开发者ID:sth2018,项目名称:FastWordQuery,代码行数:31,代码来源:readmdict.py

示例12: get_data_by_index

# 需要导入模块: import lzo [as 别名]
# 或者: from lzo import decompress [as 别名]
def get_data_by_index(fmdx, index):
        fmdx.seek(index['file_pos'])
        record_block_compressed = fmdx.read(index['compressed_size'])
        record_block_type = record_block_compressed[:4]
        record_block_type = index['record_block_type']
        decompressed_size = index['decompressed_size']
        #adler32 = unpack('>I', record_block_compressed[4:8])[0]
        if record_block_type == 0:
            _record_block = record_block_compressed[8:]
            # lzo compression
        elif record_block_type == 1:
            if lzo is None:
                print("LZO compression is not supported")
                # decompress
            header = b'\xf0' + pack('>I', index['decompressed_size'])
            _record_block = lzo.decompress(
                record_block_compressed[8:],
                initSize=decompressed_size,
                blockSize=1308672)
            # zlib compression
        elif record_block_type == 2:
            # decompress
            _record_block = zlib.decompress(record_block_compressed[8:])
        data = _record_block[index['record_start'] -
                             index['offset']:index['record_end'] -
                             index['offset']]
        return data 
开发者ID:sth2018,项目名称:FastWordQuery,代码行数:29,代码来源:mdict_query.py

示例13: get_mdx_by_index

# 需要导入模块: import lzo [as 别名]
# 或者: from lzo import decompress [as 别名]
def get_mdx_by_index(self, fmdx, index):
        fmdx.seek(index['file_pos'])
        record_block_compressed = fmdx.read(index['compressed_size'])
        record_block_type = record_block_compressed[:4]
        record_block_type = index['record_block_type']
        decompressed_size = index['decompressed_size']
        #adler32 = unpack('>I', record_block_compressed[4:8])[0]
        if record_block_type == 0:
            _record_block = record_block_compressed[8:]
            # lzo compression
        elif record_block_type == 1:
            if lzo is None:
                print("LZO compression is not supported")
                # decompress
            header = b'\xf0' + pack('>I', index['decompressed_size'])
            _record_block = lzo.decompress(record_block_compressed[8:], initSize = decompressed_size, blockSize=1308672)
                # zlib compression
        elif record_block_type == 2:
            # decompress
            _record_block = zlib.decompress(record_block_compressed[8:])
        record = _record_block[index['record_start'] - index['offset']:index['record_end'] - index['offset']]
        record = record = record.decode(self._encoding, errors='ignore').strip(u'\x00').encode('utf-8')
        if self._stylesheet:
            record = self._replace_stylesheet(record)
        record = record.decode('utf-8')
        return record 
开发者ID:ninja33,项目名称:mdx-server,代码行数:28,代码来源:mdict_query.py

示例14: decompress

# 需要导入模块: import lzo [as 别名]
# 或者: from lzo import decompress [as 别名]
def decompress(data, buflen, compr_type):
    if compr_type==COMPR_NONE:
        return data
    elif compr_type==COMPR_LZO:
        return lzo.decompress(data, False, buflen)
    elif compr_type==COMPR_ZLIB:
        return zlib.decompress(data, -zlib.MAX_WBITS)
    else:
        raise Exception("unknown compression type") 
开发者ID:nlitsme,项目名称:ubidump,代码行数:11,代码来源:ubidump.py

示例15: parse

# 需要导入模块: import lzo [as 别名]
# 或者: from lzo import decompress [as 别名]
def parse(self, data):
        self.key, self.size, self.compr_type = struct.unpack("<16sLH2x", data[:self.hdrsize])
        self.data = decompress(data[self.hdrsize:], self.size, self.compr_type)
        if len(self.data) != self.size:
            raise Exception("data size mismatch") 
开发者ID:nlitsme,项目名称:ubidump,代码行数:7,代码来源:ubidump.py


注:本文中的lzo.decompress方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。