当前位置: 首页>>代码示例>>Java>>正文


Java MemStore.NO_PERSISTENT_TS属性代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.MemStore.NO_PERSISTENT_TS属性的典型用法代码示例。如果您正苦于以下问题:Java MemStore.NO_PERSISTENT_TS属性的具体用法?Java MemStore.NO_PERSISTENT_TS怎么用?Java MemStore.NO_PERSISTENT_TS使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在org.apache.hadoop.hbase.regionserver.MemStore的用法示例。


在下文中一共展示了MemStore.NO_PERSISTENT_TS属性的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: finishBlock

/**
 * Do the cleanup if a current block.
 *
 * @throws IOException
 */
private void finishBlock() throws IOException {
  if (this.out == null)
    return;
  long startTimeNs = System.nanoTime();

  int size = releaseCompressingStream(this.out);
  this.out = null;
  blockKeys.add(firstKeyInBlock);
  blockOffsets.add(Long.valueOf(blockBegin));
  blockDataSizes.add(Integer.valueOf(size));
  this.totalUncompressedBytes += size;

  HFile.offerWriteLatency(System.nanoTime() - startTimeNs);
  
  if (cacheConf.shouldCacheDataOnWrite()) {
    baosDos.flush();
    // we do not do data block encoding on disk for HFile v1
    byte[] bytes = baos.toByteArray();
    HFileBlock block = new HFileBlock(BlockType.DATA,
        (int) (outputStream.getPos() - blockBegin), bytes.length, -1,
        ByteBuffer.wrap(bytes, 0, bytes.length), HFileBlock.FILL_HEADER,
        blockBegin, MemStore.NO_PERSISTENT_TS, 
        HFileBlock.MINOR_VERSION_NO_CHECKSUM,        // minor version
        0,                                         // bytesPerChecksum
        ChecksumType.NULL.getCode(),               // checksum type
        (int) (outputStream.getPos() - blockBegin) +
        HFileBlock.HEADER_SIZE_NO_CHECKSUM);       // onDiskDataSizeWithHeader

    block = blockEncoder.diskToCacheFormat(block, false);
    passSchemaMetricsTo(block);
    cacheConf.getBlockCache().cacheBlock(
        new BlockCacheKey(name, blockBegin, DataBlockEncoding.NONE,
            block.getBlockType()), block);
    baosDos.close();
  }
  blockNumber++;
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:42,代码来源:HFileWriterV1.java

示例2: finishBlock

/**
 * Do the cleanup if a current block.
 *
 * @throws IOException
 */
private void finishBlock() throws IOException {
  if (this.out == null)
    return;
  long startTimeNs = System.nanoTime();

  int size = releaseCompressingStream(this.out);
  this.out = null;
  blockKeys.add(firstKeyInBlock);
  blockOffsets.add(Long.valueOf(blockBegin));
  blockDataSizes.add(Integer.valueOf(size));
  this.totalUncompressedBytes += size;

  HFile.offerWriteLatency(System.nanoTime() - startTimeNs);
  
  if (cacheConf.shouldCacheDataOnWrite()) {
    baosDos.flush();
    // we do not do data block encoding on disk for HFile v1
    byte[] bytes = baos.toByteArray();
    HFileBlock block = new HFileBlock(BlockType.DATA,
        (int) (outputStream.getPos() - blockBegin), bytes.length, -1,
        ByteBuffer.wrap(bytes, 0, bytes.length), HFileBlock.FILL_HEADER,
        blockBegin, MemStore.NO_PERSISTENT_TS, 
        HFileBlock.MINOR_VERSION_NO_CHECKSUM,        // minor version
        0,                                         // bytesPerChecksum
        ChecksumType.NULL.getCode(),               // checksum type
        (int) (outputStream.getPos() - blockBegin) +
        HFileBlock.HEADER_SIZE_NO_CHECKSUM);       // onDiskDataSizeWithHeader

    block = blockEncoder.diskToCacheFormat(block, false);
    cacheConf.getBlockCache().cacheBlock(
        new BlockCacheKey(name, blockBegin, DataBlockEncoding.NONE,
            block.getBlockType()), block);
    baosDos.close();
  }
}
 
开发者ID:daidong,项目名称:DominoHBase,代码行数:40,代码来源:HFileWriterV1.java

示例3: readBlockData

/**
 * Read a version 1 block. There is no uncompressed header, and the block type (the magic
 * record) is part of the compressed data. This implementation assumes that the bounded range
 * file input stream is needed to stop the decompressor reading into next block, because the
 * decompressor just grabs a bunch of data without regard to whether it is coming to end of the
 * compressed section. The block returned is still a version 2 block, and in particular, its
 * first {@link #HEADER_SIZE_WITH_CHECKSUMS} bytes contain a valid version 2 header.
 * @param offset the offset of the block to read in the file
 * @param onDiskSizeWithMagic the on-disk size of the version 1 block, including the magic
 *          record, which is the part of compressed data if using compression
 * @param uncompressedSizeWithMagic uncompressed size of the version 1 block, including the
 *          magic record
 */
@Override
public HFileBlock readBlockData(long offset, long onDiskSizeWithMagic,
    int uncompressedSizeWithMagic, boolean pread) throws IOException {
  if (uncompressedSizeWithMagic <= 0) {
    throw new IOException("Invalid uncompressedSize=" + uncompressedSizeWithMagic
        + " for a version 1 block");
  }

  if (onDiskSizeWithMagic <= 0 || onDiskSizeWithMagic >= Integer.MAX_VALUE) {
    throw new IOException("Invalid onDiskSize=" + onDiskSizeWithMagic + " (maximum allowed: "
        + Integer.MAX_VALUE + ")");
  }

  int onDiskSize = (int) onDiskSizeWithMagic;

  if (uncompressedSizeWithMagic < MAGIC_LENGTH) {
    throw new IOException("Uncompressed size for a version 1 block is "
        + uncompressedSizeWithMagic + " but must be at least " + MAGIC_LENGTH);
  }

  // The existing size already includes magic size, and we are inserting
  // a version 2 header.
  ByteBuffer buf = ByteBuffer.allocate(uncompressedSizeWithMagic + HEADER_DELTA);

  int onDiskSizeWithoutHeader;
  if (compressAlgo == Compression.Algorithm.NONE) {
    // A special case when there is no compression.
    if (onDiskSize != uncompressedSizeWithMagic) {
      throw new IOException("onDiskSize=" + onDiskSize + " and uncompressedSize="
          + uncompressedSizeWithMagic + " must be equal for version 1 with no compression");
    }

    // The first MAGIC_LENGTH bytes of what this will read will be
    // overwritten.
    readAtOffset(istream, buf.array(), buf.arrayOffset() + HEADER_DELTA, onDiskSize, false,
      offset, pread);

    onDiskSizeWithoutHeader = uncompressedSizeWithMagic - MAGIC_LENGTH;
  } else {
    InputStream bufferedBoundedStream = createBufferedBoundedStream(offset, onDiskSize, pread);
    decompress(buf.array(), buf.arrayOffset() + HEADER_DELTA, bufferedBoundedStream,
      uncompressedSizeWithMagic);

    // We don't really have a good way to exclude the "magic record" size
    // from the compressed block's size, since it is compressed as well.
    onDiskSizeWithoutHeader = onDiskSize;
  }

  BlockType newBlockType =
      BlockType.parse(buf.array(), buf.arrayOffset() + HEADER_DELTA, MAGIC_LENGTH);

  // We set the uncompressed size of the new HFile block we are creating
  // to the size of the data portion of the block without the magic record,
  // since the magic record gets moved to the header.
  HFileBlock b =
      new HFileBlock(newBlockType, onDiskSizeWithoutHeader, uncompressedSizeWithMagic
          - MAGIC_LENGTH, -1L, buf, FILL_HEADER, offset, MemStore.NO_PERSISTENT_TS, 0, 0,
          ChecksumType.NULL.getCode(), onDiskSizeWithoutHeader + HEADER_SIZE_NO_CHECKSUM);
  return b;
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:73,代码来源:HFileBlock.java


注:本文中的org.apache.hadoop.hbase.regionserver.MemStore.NO_PERSISTENT_TS属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。