當前位置: 首頁>>代碼示例>>Java>>正文


Java HConstants.HFILEBLOCK_HEADER_SIZE屬性代碼示例

本文整理匯總了Java中org.apache.hadoop.hbase.HConstants.HFILEBLOCK_HEADER_SIZE屬性的典型用法代碼示例。如果您正苦於以下問題:Java HConstants.HFILEBLOCK_HEADER_SIZE屬性的具體用法?Java HConstants.HFILEBLOCK_HEADER_SIZE怎麽用?Java HConstants.HFILEBLOCK_HEADER_SIZE使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在org.apache.hadoop.hbase.HConstants的用法示例。


在下文中一共展示了HConstants.HFILEBLOCK_HEADER_SIZE屬性的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: Writer

/**
 * @param dataBlockEncoder data block encoding algorithm to use
 */
public Writer(HFileDataBlockEncoder dataBlockEncoder, HFileContext fileContext) {
  this.dataBlockEncoder = dataBlockEncoder != null
      ? dataBlockEncoder : NoOpDataBlockEncoder.INSTANCE;
  defaultBlockEncodingCtx = new HFileBlockDefaultEncodingContext(null,
      HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext);
  dataBlockEncodingCtx = this.dataBlockEncoder
      .newDataBlockEncodingContext(HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext);

  if (fileContext.getBytesPerChecksum() < HConstants.HFILEBLOCK_HEADER_SIZE) {
    throw new RuntimeException("Unsupported value of bytesPerChecksum. " +
        " Minimum is " + HConstants.HFILEBLOCK_HEADER_SIZE + " but the configured value is " +
        fileContext.getBytesPerChecksum());
  }

  baosInMemory = new ByteArrayOutputStream();

  prevOffsetByType = new long[BlockType.values().length];
  for (int i = 0; i < prevOffsetByType.length; ++i)
    prevOffsetByType[i] = -1;

  this.fileContext = fileContext;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:25,代碼來源:HFileBlock.java

示例2: headerSize

/**
 * Maps a minor version to the size of the header.
 */
public static int headerSize(boolean usesHBaseChecksum) {
  if (usesHBaseChecksum) {
    return HConstants.HFILEBLOCK_HEADER_SIZE;
  }
  return HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:9,代碼來源:HFileBlock.java

示例3: createTestBlockStr

public String createTestBlockStr(Compression.Algorithm algo,
    int correctLength, boolean useTag) throws IOException {
  HFileBlock.Writer hbw = createTestV2Block(algo, includesMemstoreTS, useTag);
  byte[] testV2Block = hbw.getHeaderAndDataForTest();
  int osOffset = HConstants.HFILEBLOCK_HEADER_SIZE + 9;
  if (testV2Block.length == correctLength) {
    // Force-set the "OS" field of the gzip header to 3 (Unix) to avoid
    // variations across operating systems.
    // See http://www.gzip.org/zlib/rfc-gzip.html for gzip format.
    // We only make this change when the compressed block length matches.
    // Otherwise, there are obviously other inconsistencies.
    testV2Block[osOffset] = 3;
  }
  return Bytes.toStringBinary(testV2Block);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:15,代碼來源:TestHFileBlock.java

示例4: testBlockHeapSizeInternals

protected void testBlockHeapSizeInternals() {
  if (ClassSize.is32BitJVM()) {
    assertTrue(HFileBlock.BYTE_BUFFER_HEAP_SIZE == 64);
  } else {
    assertTrue(HFileBlock.BYTE_BUFFER_HEAP_SIZE == 80);
  }

  for (int size : new int[] { 100, 256, 12345 }) {
    byte[] byteArr = new byte[HConstants.HFILEBLOCK_HEADER_SIZE + size];
    ByteBuffer buf = ByteBuffer.wrap(byteArr, 0, size);
    HFileContext meta = new HFileContextBuilder()
                        .withIncludesMvcc(includesMemstoreTS)
                        .withIncludesTags(includesTag)
                        .withHBaseCheckSum(false)
                        .withCompression(Algorithm.NONE)
                        .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM)
                        .withChecksumType(ChecksumType.NULL).build();
    HFileBlock block = new HFileBlock(BlockType.DATA, size, size, -1, buf,
        HFileBlock.FILL_HEADER, -1, 
        0, meta);
    long byteBufferExpectedSize =
        ClassSize.align(ClassSize.estimateBase(buf.getClass(), true)
            + HConstants.HFILEBLOCK_HEADER_SIZE + size);
    long hfileMetaSize =  ClassSize.align(ClassSize.estimateBase(HFileContext.class, true));
    long hfileBlockExpectedSize =
        ClassSize.align(ClassSize.estimateBase(HFileBlock.class, true));
    long expected = hfileBlockExpectedSize + byteBufferExpectedSize + hfileMetaSize;
    assertEquals("Block data size: " + size + ", byte buffer expected " +
        "size: " + byteBufferExpectedSize + ", HFileBlock class expected " +
        "size: " + hfileBlockExpectedSize + ";", expected,
        block.heapSize());
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:33,代碼來源:TestHFileBlock.java

示例5: getUncompressedSizeWithoutHeader

/**
 * The uncompressed size of the block data. Does not include header size.
 */
int getUncompressedSizeWithoutHeader() {
  expectState(State.BLOCK_READY);
  return uncompressedBytesWithHeader.length - HConstants.HFILEBLOCK_HEADER_SIZE;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:7,代碼來源:HFileBlock.java

示例6: generateHFileBlocks

private static HFileBlockPair[] generateHFileBlocks(int blockSize,
    int numBlocks) {
  HFileBlockPair[] returnedBlocks = new HFileBlockPair[numBlocks];
  Random rand = new Random();
  HashSet<String> usedStrings = new HashSet<String>();
  for (int i = 0; i < numBlocks; i++) {

    // The buffer serialized size needs to match the size of BlockSize. So we
    // declare our data size to be smaller than it by the serialization space
    // required.

    ByteBuffer cachedBuffer = ByteBuffer.allocate(blockSize
        - HFileBlock.EXTRA_SERIALIZATION_SPACE);
    rand.nextBytes(cachedBuffer.array());
    cachedBuffer.rewind();
    int onDiskSizeWithoutHeader = blockSize
        - HFileBlock.EXTRA_SERIALIZATION_SPACE;
    int uncompressedSizeWithoutHeader = blockSize
        - HFileBlock.EXTRA_SERIALIZATION_SPACE;
    long prevBlockOffset = rand.nextLong();
    BlockType.DATA.write(cachedBuffer);
    cachedBuffer.putInt(onDiskSizeWithoutHeader);
    cachedBuffer.putInt(uncompressedSizeWithoutHeader);
    cachedBuffer.putLong(prevBlockOffset);
    cachedBuffer.rewind();
    HFileContext meta = new HFileContextBuilder()
                        .withHBaseCheckSum(false)
                        .withIncludesMvcc(includesMemstoreTS)
                        .withIncludesTags(false)
                        .withCompression(Compression.Algorithm.NONE)
                        .withBytesPerCheckSum(0)
                        .withChecksumType(ChecksumType.NULL)
                        .build();
    HFileBlock generated = new HFileBlock(BlockType.DATA,
        onDiskSizeWithoutHeader, uncompressedSizeWithoutHeader,
        prevBlockOffset, cachedBuffer, HFileBlock.DONT_FILL_HEADER,
        blockSize,
        onDiskSizeWithoutHeader + HConstants.HFILEBLOCK_HEADER_SIZE, meta);

    String strKey;
    /* No conflicting keys */
    for (strKey = new Long(rand.nextLong()).toString(); !usedStrings
        .add(strKey); strKey = new Long(rand.nextLong()).toString())
      ;

    returnedBlocks[i] = new HFileBlockPair();
    returnedBlocks[i].blockName = new BlockCacheKey(strKey, 0);
    returnedBlocks[i].block = generated;
  }
  return returnedBlocks;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:51,代碼來源:CacheTestUtils.java

示例7: getOnDiskSizeWithoutHeader

/**
 * Returns the on-disk size of the data portion of the block. This is the
 * compressed size if compression is enabled. Can only be called in the
 * "block ready" state. Header is not compressed, and its size is not
 * included in the return value.
 *
 * @return the on-disk size of the block, not including the header.
 */
int getOnDiskSizeWithoutHeader() {
  expectState(State.BLOCK_READY);
  return onDiskBytesWithHeader.length
      + onDiskChecksum.length
      - HConstants.HFILEBLOCK_HEADER_SIZE;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:14,代碼來源:HFileBlock.java


注:本文中的org.apache.hadoop.hbase.HConstants.HFILEBLOCK_HEADER_SIZE屬性示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。