当前位置: 首页>>代码示例>>Java>>正文


Java ByteBufferUtils.writeVLong方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.util.ByteBufferUtils.writeVLong方法的典型用法代码示例。如果您正苦于以下问题:Java ByteBufferUtils.writeVLong方法的具体用法?Java ByteBufferUtils.writeVLong怎么用?Java ByteBufferUtils.writeVLong使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.util.ByteBufferUtils的用法示例。


在下文中一共展示了ByteBufferUtils.writeVLong方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: decodeKeyValues

import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
/**
 * I don't think this method is called during normal HBase operation, so efficiency is not
 * important.
 */
public ByteBuffer decodeKeyValues(DataInputStream source, int allocateHeaderLength,
    int skipLastBytes, HFileBlockDecodingContext decodingCtx) throws IOException {
  ByteBuffer sourceAsBuffer = ByteBufferUtils.drainInputStreamToBuffer(source);// waste
  sourceAsBuffer.mark();
  PrefixTreeBlockMeta blockMeta = new PrefixTreeBlockMeta(sourceAsBuffer);
  sourceAsBuffer.rewind();
  int numV1BytesWithHeader = allocateHeaderLength + blockMeta.getNumKeyValueBytes();
  byte[] keyValueBytesWithHeader = new byte[numV1BytesWithHeader];
  ByteBuffer result = ByteBuffer.wrap(keyValueBytesWithHeader);
  result.rewind();
  CellSearcher searcher = null;
  try {
    boolean includesMvcc = decodingCtx.getHFileContext().isIncludesMvcc();
    searcher = DecoderFactory.checkOut(sourceAsBuffer, includesMvcc);
    while (searcher.advance()) {
      KeyValue currentCell = KeyValueUtil.copyToNewKeyValue(searcher.current());
      // needs to be modified for DirectByteBuffers. no existing methods to
      // write VLongs to byte[]
      int offset = result.arrayOffset() + result.position();
      System.arraycopy(currentCell.getBuffer(), currentCell.getOffset(), result.array(), offset,
          currentCell.getLength());
      int keyValueLength = KeyValueUtil.length(currentCell);
      ByteBufferUtils.skip(result, keyValueLength);
      offset += keyValueLength;
      if (includesMvcc) {
        ByteBufferUtils.writeVLong(result, currentCell.getMvccVersion());
      }
    }
    result.position(result.limit());//make it appear as if we were appending
    return result;
  } finally {
    DecoderFactory.checkIn(searcher);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:39,代码来源:PrefixTreeCodec.java

示例2: appendToByteBuffer

import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
public static void appendToByteBuffer(final ByteBuffer bb, final KeyValue kv,
    final boolean includeMvccVersion) {
  // keep pushing the limit out. assume enough capacity
  bb.limit(bb.position() + kv.getLength());
  bb.put(kv.getBuffer(), kv.getOffset(), kv.getLength());
  if (includeMvccVersion) {
    int numMvccVersionBytes = WritableUtils.getVIntSize(kv.getMvccVersion());
    ByteBufferUtils.extendLimit(bb, numMvccVersionBytes);
    ByteBufferUtils.writeVLong(bb, kv.getMvccVersion());
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:12,代码来源:KeyValueUtil.java

示例3: afterDecodingKeyValue

import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
protected final void afterDecodingKeyValue(DataInputStream source,
    ByteBuffer dest, HFileBlockDefaultDecodingContext decodingCtx) throws IOException {
  if (decodingCtx.getHFileContext().isIncludesTags()) {
    int tagsLength = ByteBufferUtils.readCompressedInt(source);
    // Put as unsigned short
    dest.put((byte) ((tagsLength >> 8) & 0xff));
    dest.put((byte) (tagsLength & 0xff));
    if (tagsLength > 0) {
      TagCompressionContext tagCompressionContext = decodingCtx.getTagCompressionContext();
      // When tag compression is been used in this file, tagCompressionContext will have a not
      // null value passed.
      if (tagCompressionContext != null) {
        tagCompressionContext.uncompressTags(source, dest, tagsLength);
      } else {
        ByteBufferUtils.copyFromStreamToBuffer(dest, source, tagsLength);
      }
    }
  }
  if (decodingCtx.getHFileContext().isIncludesMvcc()) {
    long memstoreTS = -1;
    try {
      // Copy memstore timestamp from the data input stream to the byte
      // buffer.
      memstoreTS = WritableUtils.readVLong(source);
      ByteBufferUtils.writeVLong(dest, memstoreTS);
    } catch (IOException ex) {
      throw new RuntimeException("Unable to copy memstore timestamp " +
          memstoreTS + " after decoding a key/value");
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:32,代码来源:BufferedDataBlockEncoder.java

示例4: afterDecodingKeyValue

import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
protected final void afterDecodingKeyValue(DataInputStream source,
    ByteBuffer dest, boolean includesMemstoreTS) {
  if (includesMemstoreTS) {
    long memstoreTS = -1;
    try {
      // Copy memstore timestamp from the data input stream to the byte
      // buffer.
      memstoreTS = WritableUtils.readVLong(source);
      ByteBufferUtils.writeVLong(dest, memstoreTS);
    } catch (IOException ex) {
      throw new RuntimeException("Unable to copy memstore timestamp " +
          memstoreTS + " after decoding a key/value");
    }
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:16,代码来源:BufferedDataBlockEncoder.java

示例5: afterDecodingKeyValue

import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
protected final void afterDecodingKeyValue(DataInputStream source,
    ByteBuffer dest, HFileBlockDefaultDecodingContext decodingCtx) throws IOException {
  if (decodingCtx.getHFileContext().isIncludesTags()) {
    short tagsLength = (short) ByteBufferUtils.readCompressedInt(source);
    dest.putShort(tagsLength);
    if (tagsLength > 0) {
      TagCompressionContext tagCompressionContext = decodingCtx.getTagCompressionContext();
      // When tag compression is been used in this file, tagCompressionContext will have a not
      // null value passed.
      if (tagCompressionContext != null) {
        tagCompressionContext.uncompressTags(source, dest, tagsLength);
      } else {
        ByteBufferUtils.copyFromStreamToBuffer(dest, source, tagsLength);
      }
    }
  }
  if (decodingCtx.getHFileContext().isIncludesMvcc()) {
    long memstoreTS = -1;
    try {
      // Copy memstore timestamp from the data input stream to the byte
      // buffer.
      memstoreTS = WritableUtils.readVLong(source);
      ByteBufferUtils.writeVLong(dest, memstoreTS);
    } catch (IOException ex) {
      throw new RuntimeException("Unable to copy memstore timestamp " +
          memstoreTS + " after decoding a key/value");
    }
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:30,代码来源:BufferedDataBlockEncoder.java

示例6: appendToByteBuffer

import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
public static void appendToByteBuffer(final ByteBuffer bb, final KeyValue kv,
    final boolean includeMvccVersion) {
  // keep pushing the limit out. assume enough capacity
  bb.limit(bb.position() + kv.getLength());
  bb.put(kv.getBuffer(), kv.getOffset(), kv.getLength());
  if (includeMvccVersion) {
    int numMvccVersionBytes = WritableUtils.getVIntSize(kv.getSequenceId());
    ByteBufferUtils.extendLimit(bb, numMvccVersionBytes);
    ByteBufferUtils.writeVLong(bb, kv.getSequenceId());
  }
}
 
开发者ID:apache,项目名称:hbase,代码行数:12,代码来源:KeyValueUtil.java

示例7: appendToByteBuffer

import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
public static void appendToByteBuffer(final ByteBuffer bb, final KeyValue kv,
    final boolean includeMvccVersion) {
  // keep pushing the limit out. assume enough capacity
  bb.limit(bb.position() + kv.getLength());
  bb.put(kv.getBuffer(), kv.getOffset(), kv.getLength());
  // TODO tags
  if (includeMvccVersion) {
    // TODO: 0.98 - comatibility	
    int numMvccVersionBytes = WritableUtils.getVIntSize(0/*kv.getMemstoreTS()*/);
    bb.limit(bb.limit() + numMvccVersionBytes);
    ByteBufferUtils.writeVLong(bb, 0/*kv.getMemstoreTS()*/);
  }
}
 
开发者ID:VladRodionov,项目名称:bigbase,代码行数:14,代码来源:TestUtils.java

示例8: decodeKeyValues

import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
/**
 * I don't think this method is called during normal HBase operation, so efficiency is not
 * important.
 */
@Override
public ByteBuffer decodeKeyValues(DataInputStream source, int allocateHeaderLength,
    int skipLastBytes, boolean includesMvccVersion) throws IOException {
  ByteBuffer sourceAsBuffer = ByteBufferUtils.drainInputStreamToBuffer(source);// waste
  sourceAsBuffer.mark();
  PrefixTreeBlockMeta blockMeta = new PrefixTreeBlockMeta(sourceAsBuffer);
  sourceAsBuffer.rewind();
  int numV1BytesWithHeader = allocateHeaderLength + blockMeta.getNumKeyValueBytes();
  byte[] keyValueBytesWithHeader = new byte[numV1BytesWithHeader];
  ByteBuffer result = ByteBuffer.wrap(keyValueBytesWithHeader);
  result.rewind();
  CellSearcher searcher = null;
  try {
    searcher = DecoderFactory.checkOut(sourceAsBuffer, includesMvccVersion);
    while (searcher.advance()) {
      KeyValue currentCell = KeyValueUtil.copyToNewKeyValue(searcher.current());
      // needs to be modified for DirectByteBuffers. no existing methods to
      // write VLongs to byte[]
      int offset = result.arrayOffset() + result.position();
      KeyValueUtil.appendToByteArray(currentCell, result.array(), offset);
      int keyValueLength = KeyValueUtil.length(currentCell);
      ByteBufferUtils.skip(result, keyValueLength);
      offset += keyValueLength;
      if (includesMvccVersion) {
        ByteBufferUtils.writeVLong(result, currentCell.getMvccVersion());
      }
    }
    result.position(result.limit());//make it appear as if we were appending
    return result;
  } finally {
    DecoderFactory.checkIn(searcher);
  }
}
 
开发者ID:cloud-software-foundation,项目名称:c5,代码行数:38,代码来源:PrefixTreeCodec.java


注:本文中的org.apache.hadoop.hbase.util.ByteBufferUtils.writeVLong方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。