当前位置: 首页>>代码示例>>Java>>正文


Java HFileBlockEncodingContext类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext的典型用法代码示例。如果您正苦于以下问题:Java HFileBlockEncodingContext类的具体用法?Java HFileBlockEncodingContext怎么用?Java HFileBlockEncodingContext使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


HFileBlockEncodingContext类属于org.apache.hadoop.hbase.io.encoding包,在下文中一共展示了HFileBlockEncodingContext类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: encode

import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext; //导入依赖的package包/类
@Override
public int encode(Cell cell, HFileBlockEncodingContext encodingCtx, DataOutputStream out)
    throws IOException {
  int klength = KeyValueUtil.keyLength(cell);
  int vlength = cell.getValueLength();

  out.writeInt(klength);
  out.writeInt(vlength);
  CellUtil.writeFlatKey(cell, out);
  out.write(cell.getValueArray(), cell.getValueOffset(), vlength);
  int encodedKvSize = klength + vlength + KeyValue.KEYVALUE_INFRASTRUCTURE_SIZE;
  // Write the additional tag into the stream
  if (encodingCtx.getHFileContext().isIncludesTags()) {
    int tagsLength = cell.getTagsLength();
    out.writeShort(tagsLength);
    if (tagsLength > 0) {
      out.write(cell.getTagsArray(), cell.getTagsOffset(), tagsLength);
    }
    encodedKvSize += tagsLength + KeyValue.TAGS_LENGTH_SIZE;
  }
  if (encodingCtx.getHFileContext().isIncludesMvcc()) {
    WritableUtils.writeVLong(out, cell.getSequenceId());
    encodedKvSize += WritableUtils.getVIntSize(cell.getSequenceId());
  }
  return encodedKvSize;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:NoOpDataBlockEncoder.java

示例2: createBlockOnDisk

import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext; //导入依赖的package包/类
private HFileBlock createBlockOnDisk(List<KeyValue> kvs, HFileBlock block, boolean useTags)
    throws IOException {
  int size;
  HFileBlockEncodingContext context = new HFileBlockDefaultEncodingContext(
      blockEncoder.getDataBlockEncoding(), HConstants.HFILEBLOCK_DUMMY_HEADER,
      block.getHFileContext());

  ByteArrayOutputStream baos = new ByteArrayOutputStream();
  baos.write(block.getDummyHeaderForVersion());
  DataOutputStream dos = new DataOutputStream(baos);
  blockEncoder.startBlockEncoding(context, dos);
  for (KeyValue kv : kvs) {
    blockEncoder.encode(kv, context, dos);
  }
  BufferGrabbingByteArrayOutputStream stream = new BufferGrabbingByteArrayOutputStream();
  baos.writeTo(stream);
  blockEncoder.endBlockEncoding(context, dos, stream.getBuffer(), BlockType.DATA);
  byte[] encodedBytes = baos.toByteArray();
  size = encodedBytes.length - block.getDummyHeaderForVersion().length;
  return new HFileBlock(context.getBlockType(), size, size, -1, ByteBuffer.wrap(encodedBytes),
      HFileBlock.FILL_HEADER, 0, block.getOnDiskDataSizeWithHeader(), block.getHFileContext());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:TestHFileDataBlockEncoder.java

示例3: startBlockEncoding

import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext; //导入依赖的package包/类
@Override
public void startBlockEncoding(HFileBlockEncodingContext blkEncodingCtx, DataOutputStream out)
    throws IOException {
  if (blkEncodingCtx.getClass() != HFileBlockDefaultEncodingContext.class) {
    throw new IOException(this.getClass().getName() + " only accepts "
        + HFileBlockDefaultEncodingContext.class.getName() + " as the " + "encoding context.");
  }

  HFileBlockDefaultEncodingContext encodingCtx = 
      (HFileBlockDefaultEncodingContext) blkEncodingCtx;
  encodingCtx.prepareEncoding(out);

  PrefixTreeEncoder builder = EncoderFactory.checkOut(out, encodingCtx.getHFileContext()
      .isIncludesMvcc());
  PrefixTreeEncodingState state = new PrefixTreeEncodingState();
  state.builder = builder;
  blkEncodingCtx.setEncodingState(state);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:PrefixTreeCodec.java

示例4: encodeKeyValues

import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext; //导入依赖的package包/类
/**
 * Copied from BufferedDataBlockEncoder. Almost definitely can be improved, but i'm not familiar
 * enough with the concept of the HFileBlockEncodingContext.
 */
@Override
public void encodeKeyValues(ByteBuffer in,
    HFileBlockEncodingContext blkEncodingCtx) throws IOException {
  if (blkEncodingCtx.getClass() != HFileBlockDefaultEncodingContext.class) {
    throw new IOException(this.getClass().getName() + " only accepts "
        + HFileBlockDefaultEncodingContext.class.getName() + " as the " + "encoding context.");
  }

  HFileBlockDefaultEncodingContext encodingCtx
      = (HFileBlockDefaultEncodingContext) blkEncodingCtx;
  encodingCtx.prepareEncoding();
  DataOutputStream dataOut = encodingCtx.getOutputStreamForEncoder();
  internalEncodeKeyValues(dataOut, in, encodingCtx.getHFileContext().isIncludesMvcc(),
      encodingCtx.getHFileContext().isIncludesTags());

  //do i need to check this, or will it always be DataBlockEncoding.PREFIX_TREE?
  if (encodingCtx.getDataBlockEncoding() != DataBlockEncoding.NONE) {
    encodingCtx.postEncoding(BlockType.ENCODED_DATA);
  } else {
    encodingCtx.postEncoding(BlockType.DATA);
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:27,代码来源:PrefixTreeCodec.java

示例5: startBlockEncoding

import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext; //导入依赖的package包/类
@Override
public void startBlockEncoding(HFileBlockEncodingContext blkEncodingCtx,
    DataOutputStream out) throws IOException {
  if (blkEncodingCtx.getClass() != HFileBlockDefaultEncodingContext.class) {
    throw new IOException(this.getClass().getName() + " only accepts "
        + HFileBlockDefaultEncodingContext.class.getName() + " as the "
        + "encoding context.");
  }

  HFileBlockDefaultEncodingContext encodingCtx = (HFileBlockDefaultEncodingContext) blkEncodingCtx;
  encodingCtx.prepareEncoding(out);

  NoneEncoder encoder = new NoneEncoder(out, encodingCtx);
  NoneEncodingState state = new NoneEncodingState();
  state.encoder = encoder;
  blkEncodingCtx.setEncodingState(state);
}
 
开发者ID:apache,项目名称:hbase,代码行数:18,代码来源:NoOpDataBlockEncoder.java

示例6: createBlockOnDisk

import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext; //导入依赖的package包/类
private HFileBlock createBlockOnDisk(List<KeyValue> kvs, HFileBlock block, boolean useTags)
    throws IOException {
  int size;
  HFileBlockEncodingContext context = new HFileBlockDefaultEncodingContext(
      blockEncoder.getDataBlockEncoding(), HConstants.HFILEBLOCK_DUMMY_HEADER,
      block.getHFileContext());

  ByteArrayOutputStream baos = new ByteArrayOutputStream();
  baos.write(block.getDummyHeaderForVersion());
  DataOutputStream dos = new DataOutputStream(baos);
  blockEncoder.startBlockEncoding(context, dos);
  for (KeyValue kv : kvs) {
    blockEncoder.encode(kv, context, dos);
  }
  blockEncoder.endBlockEncoding(context, dos, baos.getBuffer(), BlockType.DATA);
  byte[] encodedBytes = baos.toByteArray();
  size = encodedBytes.length - block.getDummyHeaderForVersion().length;
  return new HFileBlock(context.getBlockType(), size, size, -1, ByteBuffer.wrap(encodedBytes),
      HFileBlock.FILL_HEADER, 0, block.getOnDiskDataSizeWithHeader(), -1,
      block.getHFileContext());
}
 
开发者ID:apache,项目名称:hbase,代码行数:22,代码来源:TestHFileDataBlockEncoder.java

示例7: encode

import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext; //导入依赖的package包/类
@Override
public int encode(KeyValue kv, HFileBlockEncodingContext encodingCtx, DataOutputStream out)
    throws IOException {
  int klength = kv.getKeyLength();
  int vlength = kv.getValueLength();

  out.writeInt(klength);
  out.writeInt(vlength);
  out.write(kv.getBuffer(), kv.getKeyOffset(), klength);
  out.write(kv.getValueArray(), kv.getValueOffset(), vlength);
  int encodedKvSize = klength + vlength + KeyValue.KEYVALUE_INFRASTRUCTURE_SIZE;
  // Write the additional tag into the stream
  if (encodingCtx.getHFileContext().isIncludesTags()) {
    short tagsLength = kv.getTagsLength();
    out.writeShort(tagsLength);
    if (tagsLength > 0) {
      out.write(kv.getTagsArray(), kv.getTagsOffset(), tagsLength);
    }
    encodedKvSize += tagsLength + KeyValue.TAGS_LENGTH_SIZE;
  }
  if (encodingCtx.getHFileContext().isIncludesMvcc()) {
    WritableUtils.writeVLong(out, kv.getMvccVersion());
    encodedKvSize += WritableUtils.getVIntSize(kv.getMvccVersion());
  }
  return encodedKvSize;
}
 
开发者ID:shenli-uiuc,项目名称:PyroDB,代码行数:27,代码来源:NoOpDataBlockEncoder.java

示例8: encodeKeyValues

import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext; //导入依赖的package包/类
/**
 * Copied from BufferedDataBlockEncoder. Almost definitely can be improved, but i'm not familiar
 * enough with the concept of the HFileBlockEncodingContext.
 */
@Override
public void encodeKeyValues(ByteBuffer in, boolean includesMvccVersion,
    HFileBlockEncodingContext blkEncodingCtx) throws IOException {
  if (blkEncodingCtx.getClass() != HFileBlockDefaultEncodingContext.class) {
    throw new IOException(this.getClass().getName() + " only accepts "
        + HFileBlockDefaultEncodingContext.class.getName() + " as the " + "encoding context.");
  }

  HFileBlockDefaultEncodingContext encodingCtx
      = (HFileBlockDefaultEncodingContext) blkEncodingCtx;
  encodingCtx.prepareEncoding();
  DataOutputStream dataOut = encodingCtx.getOutputStreamForEncoder();
  internalEncodeKeyValues(dataOut, in, includesMvccVersion);

  //do i need to check this, or will it always be DataBlockEncoding.PREFIX_TREE?
  if (encodingCtx.getDataBlockEncoding() != DataBlockEncoding.NONE) {
    encodingCtx.postEncoding(BlockType.ENCODED_DATA);
  } else {
    encodingCtx.postEncoding(BlockType.DATA);
  }
}
 
开发者ID:cloud-software-foundation,项目名称:c5,代码行数:26,代码来源:PrefixTreeCodec.java

示例9: encodeDataBlock

import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext; //导入依赖的package包/类
private HFileBlock encodeDataBlock(HFileBlock block,
    DataBlockEncoding algo, boolean includesMemstoreTS,
    HFileBlockEncodingContext encodingCtx) {
  encodeBufferToHFileBlockBuffer(
    block.getBufferWithoutHeader(), algo, includesMemstoreTS, encodingCtx);
  byte[] encodedUncompressedBytes =
    encodingCtx.getUncompressedBytesWithHeader();
  ByteBuffer bufferWrapper = ByteBuffer.wrap(encodedUncompressedBytes);
  int sizeWithoutHeader = bufferWrapper.limit() - encodingCtx.getHeaderSize();
  HFileBlock encodedBlock = new HFileBlock(BlockType.ENCODED_DATA,
      block.getOnDiskSizeWithoutHeader(),
      sizeWithoutHeader, block.getPrevBlockOffset(),
      bufferWrapper, HFileBlock.FILL_HEADER, block.getOffset(),
      includesMemstoreTS, block.getMinorVersion(),
      block.getBytesPerChecksum(), block.getChecksumType(),
      block.getOnDiskDataSizeWithHeader());
  return encodedBlock;
}
 
开发者ID:daidong,项目名称:DominoHBase,代码行数:19,代码来源:HFileDataBlockEncoderImpl.java

示例10: newDataBlockEncodingContext

import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext; //导入依赖的package包/类
@Override
public HFileBlockEncodingContext newDataBlockEncodingContext(
    byte[] dummyHeader, HFileContext fileContext) {
  DataBlockEncoder encoder = encoding.getEncoder();
  if (encoder != null) {
    return encoder.newDataBlockEncodingContext(encoding, dummyHeader, fileContext);
  }
  return new HFileBlockDefaultEncodingContext(null, dummyHeader, fileContext);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:10,代码来源:HFileDataBlockEncoderImpl.java

示例11: startBlockEncoding

import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext; //导入依赖的package包/类
@Override
public void startBlockEncoding(HFileBlockEncodingContext encodingCtx, DataOutputStream out)
    throws IOException {
  if (this.encoding != null && this.encoding != DataBlockEncoding.NONE) {
    this.encoding.getEncoder().startBlockEncoding(encodingCtx, out);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:8,代码来源:HFileDataBlockEncoderImpl.java

示例12: newDataBlockEncodingContext

import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext; //导入依赖的package包/类
@Override
public HFileBlockEncodingContext newDataBlockEncodingContext(
    DataBlockEncoding encoding, byte[] header, HFileContext meta) {
  if(DataBlockEncoding.PREFIX_TREE != encoding){
    //i'm not sure why encoding is in the interface.  Each encoder implementation should probably
    //know it's encoding type
    throw new IllegalArgumentException("only DataBlockEncoding.PREFIX_TREE supported");
  }
  return new HFileBlockDefaultEncodingContext(encoding, header, meta);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:11,代码来源:PrefixTreeCodec.java

示例13: encode

import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext; //导入依赖的package包/类
@Override
public int encode(Cell cell, HFileBlockEncodingContext encodingCtx, DataOutputStream out)
    throws IOException {
  PrefixTreeEncodingState state = (PrefixTreeEncodingState) encodingCtx.getEncodingState();
  PrefixTreeEncoder builder = state.builder;
  builder.write(cell);
  int size = KeyValueUtil.length(cell);
  if (encodingCtx.getHFileContext().isIncludesMvcc()) {
    size += WritableUtils.getVIntSize(cell.getSequenceId());
  }
  return size;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:13,代码来源:PrefixTreeCodec.java

示例14: endBlockEncoding

import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext; //导入依赖的package包/类
@Override
public void endBlockEncoding(HFileBlockEncodingContext encodingCtx, DataOutputStream out,
    byte[] uncompressedBytesWithHeader) throws IOException {
  PrefixTreeEncodingState state = (PrefixTreeEncodingState) encodingCtx.getEncodingState();
  PrefixTreeEncoder builder = state.builder;
  builder.flush();
  EncoderFactory.checkIn(builder);
  // do i need to check this, or will it always be DataBlockEncoding.PREFIX_TREE?
  if (encodingCtx.getDataBlockEncoding() != DataBlockEncoding.NONE) {
    encodingCtx.postEncoding(BlockType.ENCODED_DATA);
  } else {
    encodingCtx.postEncoding(BlockType.DATA);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:15,代码来源:PrefixTreeCodec.java

示例15: beforeWriteToDisk

import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext; //导入依赖的package包/类
/**
 * Precondition: a non-encoded buffer. Postcondition: on-disk encoding.
 *
 * The encoded results can be stored in {@link HFileBlockEncodingContext}.
 *
 * @throws IOException
 */
@Override
public void beforeWriteToDisk(ByteBuffer in,
    HFileBlockEncodingContext encodeCtx,
    BlockType blockType) throws IOException {
  if (encoding == DataBlockEncoding.NONE) {
    // there is no need to encode the block before writing it to disk
    ((HFileBlockDefaultEncodingContext) encodeCtx).compressAfterEncodingWithBlockType(
        in.array(), blockType);
    return;
  }
  encodeBufferToHFileBlockBuffer(in, encoding, encodeCtx);
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:20,代码来源:HFileDataBlockEncoderImpl.java


注:本文中的org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。