当前位置: 首页>>代码示例>>Java>>正文


Java HFileBlockEncodingContext.getUncompressedBytesWithHeader方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext.getUncompressedBytesWithHeader方法的典型用法代码示例。如果您正苦于以下问题:Java HFileBlockEncodingContext.getUncompressedBytesWithHeader方法的具体用法?Java HFileBlockEncodingContext.getUncompressedBytesWithHeader怎么用?Java HFileBlockEncodingContext.getUncompressedBytesWithHeader使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext的用法示例。


在下文中一共展示了HFileBlockEncodingContext.getUncompressedBytesWithHeader方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: encodeDataBlock

import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext; //导入方法依赖的package包/类
private HFileBlock encodeDataBlock(HFileBlock block,
    DataBlockEncoding algo, boolean includesMemstoreTS,
    HFileBlockEncodingContext encodingCtx) {
  encodeBufferToHFileBlockBuffer(
    block.getBufferWithoutHeader(), algo, includesMemstoreTS, encodingCtx);
  byte[] encodedUncompressedBytes =
    encodingCtx.getUncompressedBytesWithHeader();
  ByteBuffer bufferWrapper = ByteBuffer.wrap(encodedUncompressedBytes);
  int sizeWithoutHeader = bufferWrapper.limit() - encodingCtx.getHeaderSize();
  HFileBlock encodedBlock = new HFileBlock(BlockType.ENCODED_DATA,
      block.getOnDiskSizeWithoutHeader(),
      sizeWithoutHeader, block.getPrevBlockOffset(),
      bufferWrapper, HFileBlock.FILL_HEADER, block.getOffset(),
      includesMemstoreTS, block.getMinorVersion(),
      block.getBytesPerChecksum(), block.getChecksumType(),
      block.getOnDiskDataSizeWithHeader());
  return encodedBlock;
}
 
开发者ID:daidong,项目名称:DominoHBase,代码行数:19,代码来源:HFileDataBlockEncoderImpl.java

示例2: createBlockOnDisk

import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext; //导入方法依赖的package包/类
private HFileBlock createBlockOnDisk(HFileBlock block, boolean useTags) throws IOException {
  int size;
  HFileBlockEncodingContext context = new HFileBlockDefaultEncodingContext(
      blockEncoder.getDataBlockEncoding(),
      HConstants.HFILEBLOCK_DUMMY_HEADER, block.getHFileContext());
  context.setDummyHeader(block.getDummyHeaderForVersion());
  blockEncoder.beforeWriteToDisk(block.getBufferWithoutHeader(), context, block.getBlockType());
  byte[] encodedBytes = context.getUncompressedBytesWithHeader();
  size = encodedBytes.length - block.getDummyHeaderForVersion().length;
  return new HFileBlock(context.getBlockType(), size, size, -1,
          ByteBuffer.wrap(encodedBytes), HFileBlock.FILL_HEADER, 0,
          block.getOnDiskDataSizeWithHeader(), block.getHFileContext());
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:14,代码来源:TestHFileDataBlockEncoder.java

示例3: createBlockOnDisk

import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext; //导入方法依赖的package包/类
private HFileBlock createBlockOnDisk(HFileBlock block) throws IOException {
  int size;
  HFileBlockEncodingContext context = new HFileBlockDefaultEncodingContext(
      Compression.Algorithm.NONE, blockEncoder.getDataBlockEncoding(),
      HConstants.HFILEBLOCK_DUMMY_HEADER);
  context.setDummyHeader(block.getDummyHeaderForVersion());
  blockEncoder.beforeWriteToDisk(block.getBufferWithoutHeader(),
          includesMemstoreTS, context, block.getBlockType());
  byte[] encodedBytes = context.getUncompressedBytesWithHeader();
  size = encodedBytes.length - block.getDummyHeaderForVersion().length;
  return new HFileBlock(context.getBlockType(), size, size, -1,
          ByteBuffer.wrap(encodedBytes), HFileBlock.FILL_HEADER, 0, includesMemstoreTS,
          block.getMinorVersion(), block.getBytesPerChecksum(), block.getChecksumType(),
          block.getOnDiskDataSizeWithHeader());
}
 
开发者ID:cloud-software-foundation,项目名称:c5,代码行数:16,代码来源:TestHFileDataBlockEncoder.java

示例4: testEncodingWritePath

import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext; //导入方法依赖的package包/类
/**
 * Test writing to disk.
 * @throws IOException
 */
@Test
public void testEncodingWritePath() throws IOException {
  // usually we have just block without headers, but don't complicate that
  HFileBlock block = getSampleHFileBlock();
  HFileBlockEncodingContext context = new HFileBlockDefaultEncodingContext(
      Compression.Algorithm.NONE, blockEncoder.getEncodingOnDisk(), HFileBlock.DUMMY_HEADER);
  blockEncoder.beforeWriteToDisk(block.getBufferWithoutHeader(),
          includesMemstoreTS, context, block.getBlockType());

  byte[] encodedBytes = context.getUncompressedBytesWithHeader();
  int size = encodedBytes.length - HFileBlock.HEADER_SIZE;
  HFileBlock blockOnDisk =
      new HFileBlock(context.getBlockType(), size, size, -1,
          ByteBuffer.wrap(encodedBytes), HFileBlock.FILL_HEADER, 0,
      includesMemstoreTS, block.getMinorVersion(),
      block.getBytesPerChecksum(), block.getChecksumType(),
      block.getOnDiskDataSizeWithHeader());

  if (blockEncoder.getEncodingOnDisk() !=
      DataBlockEncoding.NONE) {
    assertEquals(BlockType.ENCODED_DATA, blockOnDisk.getBlockType());
    assertEquals(blockEncoder.getEncodingOnDisk().getId(),
        blockOnDisk.getDataBlockEncodingId());
  } else {
    assertEquals(BlockType.DATA, blockOnDisk.getBlockType());
  }
}
 
开发者ID:daidong,项目名称:DominoHBase,代码行数:32,代码来源:TestHFileDataBlockEncoder.java

示例5: writeEncodedBlock

import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext; //导入方法依赖的package包/类
static void writeEncodedBlock(Algorithm algo, DataBlockEncoding encoding,
     DataOutputStream dos, final List<Integer> encodedSizes,
    final List<ByteBuffer> encodedBlocks, int blockId, 
    boolean includesMemstoreTS, byte[] dummyHeader, boolean useTag) throws IOException {
  ByteArrayOutputStream baos = new ByteArrayOutputStream();
  DoubleOutputStream doubleOutputStream =
      new DoubleOutputStream(dos, baos);
  writeTestKeyValues(doubleOutputStream, blockId, includesMemstoreTS, useTag);
  ByteBuffer rawBuf = ByteBuffer.wrap(baos.toByteArray());
  rawBuf.rewind();

  DataBlockEncoder encoder = encoding.getEncoder();
  int headerLen = dummyHeader.length;
  byte[] encodedResultWithHeader = null;
  HFileContext meta = new HFileContextBuilder()
                      .withCompression(algo)
                      .withIncludesMvcc(includesMemstoreTS)
                      .withIncludesTags(useTag)
                      .build();
  if (encoder != null) {
    HFileBlockEncodingContext encodingCtx = encoder.newDataBlockEncodingContext(encoding,
        dummyHeader, meta);
    encoder.encodeKeyValues(rawBuf, encodingCtx);
    encodedResultWithHeader =
        encodingCtx.getUncompressedBytesWithHeader();
  } else {
    HFileBlockDefaultEncodingContext defaultEncodingCtx = new HFileBlockDefaultEncodingContext(
        encoding, dummyHeader, meta);
    byte[] rawBufWithHeader =
        new byte[rawBuf.array().length + headerLen];
    System.arraycopy(rawBuf.array(), 0, rawBufWithHeader,
        headerLen, rawBuf.array().length);
    defaultEncodingCtx.compressAfterEncodingWithBlockType(rawBufWithHeader,
        BlockType.DATA);
    encodedResultWithHeader =
      defaultEncodingCtx.getUncompressedBytesWithHeader();
  }
  final int encodedSize =
      encodedResultWithHeader.length - headerLen;
  if (encoder != null) {
    // We need to account for the two-byte encoding algorithm ID that
    // comes after the 24-byte block header but before encoded KVs.
    headerLen += DataBlockEncoding.ID_SIZE;
  }
  byte[] encodedDataSection =
      new byte[encodedResultWithHeader.length - headerLen];
  System.arraycopy(encodedResultWithHeader, headerLen,
      encodedDataSection, 0, encodedDataSection.length);
  final ByteBuffer encodedBuf =
      ByteBuffer.wrap(encodedDataSection);
  encodedSizes.add(encodedSize);
  encodedBlocks.add(encodedBuf);
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:54,代码来源:TestHFileBlock.java

示例6: writeEncodedBlock

import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext; //导入方法依赖的package包/类
static void writeEncodedBlock(Algorithm algo, DataBlockEncoding encoding,
     DataOutputStream dos, final List<Integer> encodedSizes,
    final List<ByteBuffer> encodedBlocks, int blockId,
    boolean includesMemstoreTS, byte[] dummyHeader) throws IOException {
  ByteArrayOutputStream baos = new ByteArrayOutputStream();
  DoubleOutputStream doubleOutputStream =
      new DoubleOutputStream(dos, baos);
  writeTestKeyValues(doubleOutputStream, blockId, includesMemstoreTS);
  ByteBuffer rawBuf = ByteBuffer.wrap(baos.toByteArray());
  rawBuf.rewind();

  DataBlockEncoder encoder = encoding.getEncoder();
  int headerLen = dummyHeader.length;
  byte[] encodedResultWithHeader = null;
  if (encoder != null) {
    HFileBlockEncodingContext encodingCtx =
        encoder.newDataBlockEncodingContext(algo, encoding, dummyHeader);
    encoder.encodeKeyValues(rawBuf, includesMemstoreTS,
        encodingCtx);
    encodedResultWithHeader =
        encodingCtx.getUncompressedBytesWithHeader();
  } else {
    HFileBlockDefaultEncodingContext defaultEncodingCtx =
      new HFileBlockDefaultEncodingContext(algo, encoding, dummyHeader);
    byte[] rawBufWithHeader =
        new byte[rawBuf.array().length + headerLen];
    System.arraycopy(rawBuf.array(), 0, rawBufWithHeader,
        headerLen, rawBuf.array().length);
    defaultEncodingCtx.compressAfterEncodingWithBlockType(rawBufWithHeader,
        BlockType.DATA);
    encodedResultWithHeader =
      defaultEncodingCtx.getUncompressedBytesWithHeader();
  }
  final int encodedSize =
      encodedResultWithHeader.length - headerLen;
  if (encoder != null) {
    // We need to account for the two-byte encoding algorithm ID that
    // comes after the 24-byte block header but before encoded KVs.
    headerLen += DataBlockEncoding.ID_SIZE;
  }
  byte[] encodedDataSection =
      new byte[encodedResultWithHeader.length - headerLen];
  System.arraycopy(encodedResultWithHeader, headerLen,
      encodedDataSection, 0, encodedDataSection.length);
  final ByteBuffer encodedBuf =
      ByteBuffer.wrap(encodedDataSection);
  encodedSizes.add(encodedSize);
  encodedBlocks.add(encodedBuf);
}
 
开发者ID:cloud-software-foundation,项目名称:c5,代码行数:50,代码来源:TestHFileBlock.java

示例7: writeEncodedBlock

import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext; //导入方法依赖的package包/类
static void writeEncodedBlock(Algorithm algo, DataBlockEncoding encoding,
     DataOutputStream dos, final List<Integer> encodedSizes,
    final List<ByteBuffer> encodedBlocks, int blockId, 
    boolean includesMemstoreTS, byte[] dummyHeader) throws IOException {
  ByteArrayOutputStream baos = new ByteArrayOutputStream();
  DoubleOutputStream doubleOutputStream =
      new DoubleOutputStream(dos, baos);
  writeTestKeyValues(doubleOutputStream, blockId, includesMemstoreTS);
  ByteBuffer rawBuf = ByteBuffer.wrap(baos.toByteArray());
  rawBuf.rewind();

  DataBlockEncoder encoder = encoding.getEncoder();
  int headerLen = dummyHeader.length;
  byte[] encodedResultWithHeader = null;
  if (encoder != null) {
    HFileBlockEncodingContext encodingCtx =
        encoder.newDataBlockEncodingContext(algo, encoding, dummyHeader);
    encoder.encodeKeyValues(rawBuf, includesMemstoreTS,
        encodingCtx);
    encodedResultWithHeader =
        encodingCtx.getUncompressedBytesWithHeader();
  } else {
    HFileBlockDefaultEncodingContext defaultEncodingCtx =
      new HFileBlockDefaultEncodingContext(algo, encoding, dummyHeader);
    byte[] rawBufWithHeader =
        new byte[rawBuf.array().length + headerLen];
    System.arraycopy(rawBuf.array(), 0, rawBufWithHeader,
        headerLen, rawBuf.array().length);
    defaultEncodingCtx.compressAfterEncoding(rawBufWithHeader,
        BlockType.DATA);
    encodedResultWithHeader =
      defaultEncodingCtx.getUncompressedBytesWithHeader();
  }
  final int encodedSize =
      encodedResultWithHeader.length - headerLen;
  if (encoder != null) {
    // We need to account for the two-byte encoding algorithm ID that
    // comes after the 24-byte block header but before encoded KVs.
    headerLen += DataBlockEncoding.ID_SIZE;
  }
  byte[] encodedDataSection =
      new byte[encodedResultWithHeader.length - headerLen];
  System.arraycopy(encodedResultWithHeader, headerLen,
      encodedDataSection, 0, encodedDataSection.length);
  final ByteBuffer encodedBuf =
      ByteBuffer.wrap(encodedDataSection);
  encodedSizes.add(encodedSize);
  encodedBlocks.add(encodedBuf);
}
 
开发者ID:daidong,项目名称:DominoHBase,代码行数:50,代码来源:TestHFileBlock.java


注:本文中的org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext.getUncompressedBytesWithHeader方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。