当前位置: 首页>>代码示例>>Java>>正文


Java ByteBufferUtils.readCompressedInt方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.util.ByteBufferUtils.readCompressedInt方法的典型用法代码示例。如果您正苦于以下问题:Java ByteBufferUtils.readCompressedInt方法的具体用法?Java ByteBufferUtils.readCompressedInt怎么用?Java ByteBufferUtils.readCompressedInt使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.util.ByteBufferUtils的用法示例。


在下文中一共展示了ByteBufferUtils.readCompressedInt方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: decodeTags

import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
protected void decodeTags() {
  current.tagsLength = ByteBufferUtils.readCompressedInt(currentBuffer);
  if (tagCompressionContext != null) {
    if (current.uncompressTags) {
      // Tag compression is been used. uncompress it into tagsBuffer
      current.ensureSpaceForTags();
      try {
        current.tagsCompressedLength = tagCompressionContext.uncompressTags(currentBuffer,
            current.tagsBuffer, 0, current.tagsLength);
      } catch (IOException e) {
        throw new RuntimeException("Exception while uncompressing tags", e);
      }
    } else {
      ByteBufferUtils.skip(currentBuffer, current.tagsCompressedLength);
      current.uncompressTags = true;// Reset this.
    }
    current.tagsOffset = -1;
  } else {
    // When tag compress is not used, let us not do copying of tags bytes into tagsBuffer.
    // Just mark the tags Offset so as to create the KV buffer later in getKeyValueBuffer()
    current.tagsOffset = currentBuffer.position();
    ByteBufferUtils.skip(currentBuffer, current.tagsLength);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:BufferedDataBlockEncoder.java

示例2: getFirstKeyInBlock

import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
@Override
public ByteBuffer getFirstKeyInBlock(ByteBuffer block) {
  block.mark();
  block.position(Bytes.SIZEOF_INT);
  int keyLength = ByteBufferUtils.readCompressedInt(block);
  ByteBufferUtils.readCompressedInt(block);
  int commonLength = ByteBufferUtils.readCompressedInt(block);
  if (commonLength != 0) {
    throw new AssertionError("Nonzero common length in the first key in "
        + "block: " + commonLength);
  }
  int pos = block.position();
  block.reset();
  ByteBuffer dup = block.duplicate();
  dup.position(pos);
  dup.limit(pos + keyLength);
  return dup.slice();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:PrefixKeyDeltaEncoder.java

示例3: decodeTags

import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
protected void decodeTags() {
  current.tagsLength = ByteBufferUtils.readCompressedInt(currentBuffer);
  if (tagCompressionContext != null) {
    if (current.uncompressTags) {
      // Tag compression is been used. uncompress it into tagsBuffer
      current.ensureSpaceForTags();
      try {
        current.tagsCompressedLength = tagCompressionContext.uncompressTags(currentBuffer,
            current.tagsBuffer, 0, current.tagsLength);
      } catch (IOException e) {
        throw new RuntimeException("Exception while uncompressing tags", e);
      }
    } else {
      ByteBufferUtils.skip(currentBuffer, current.tagsCompressedLength);
      current.uncompressTags = true;// Reset this.
    }
    current.tagsOffset = -1;
  } else {
    // When tag compress is not used, let us not do temp copying of tags bytes into tagsBuffer.
    // Just mark the tags Offset so as to create the KV buffer later in getKeyValueBuffer()
    current.tagsOffset = currentBuffer.position();
    ByteBufferUtils.skip(currentBuffer, current.tagsLength);
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:25,代码来源:BufferedDataBlockEncoder.java

示例4: getFirstKeyInBlock

import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
@Override
public ByteBuffer getFirstKeyInBlock(ByteBuffer block) {
  block.mark();
  block.position(Bytes.SIZEOF_INT);
  int keyLength = ByteBufferUtils.readCompressedInt(block);
  ByteBufferUtils.readCompressedInt(block);
  int commonLength = ByteBufferUtils.readCompressedInt(block);
  if (commonLength != 0) {
    throw new AssertionError("Nonzero common length in the first key in "
        + "block: " + commonLength);
  }
  int pos = block.position();
  block.reset();
  return ByteBuffer.wrap(block.array(), block.arrayOffset() + pos, keyLength)
      .slice();
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:17,代码来源:PrefixKeyDeltaEncoder.java

示例5: getFirstKeyInBlock

import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
@Override
public ByteBuffer getFirstKeyInBlock(ByteBuffer block) {
  block.mark();
  block.position(Bytes.SIZEOF_INT + Bytes.SIZEOF_BYTE);
  int keyLength = ByteBufferUtils.readCompressedInt(block);
  ByteBufferUtils.readCompressedInt(block); // valueLength
  ByteBufferUtils.readCompressedInt(block); // commonLength
  int pos = block.position();
  block.reset();
  ByteBuffer dup = block.duplicate();
  dup.position(pos);
  dup.limit(pos + keyLength);
  return dup.slice();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:15,代码来源:FastDiffDeltaEncoder.java

示例6: afterDecodingKeyValue

import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
protected final void afterDecodingKeyValue(DataInputStream source,
    ByteBuffer dest, HFileBlockDefaultDecodingContext decodingCtx) throws IOException {
  if (decodingCtx.getHFileContext().isIncludesTags()) {
    int tagsLength = ByteBufferUtils.readCompressedInt(source);
    // Put as unsigned short
    dest.put((byte) ((tagsLength >> 8) & 0xff));
    dest.put((byte) (tagsLength & 0xff));
    if (tagsLength > 0) {
      TagCompressionContext tagCompressionContext = decodingCtx.getTagCompressionContext();
      // When tag compression is been used in this file, tagCompressionContext will have a not
      // null value passed.
      if (tagCompressionContext != null) {
        tagCompressionContext.uncompressTags(source, dest, tagsLength);
      } else {
        ByteBufferUtils.copyFromStreamToBuffer(dest, source, tagsLength);
      }
    }
  }
  if (decodingCtx.getHFileContext().isIncludesMvcc()) {
    long memstoreTS = -1;
    try {
      // Copy memstore timestamp from the data input stream to the byte
      // buffer.
      memstoreTS = WritableUtils.readVLong(source);
      ByteBufferUtils.writeVLong(dest, memstoreTS);
    } catch (IOException ex) {
      throw new RuntimeException("Unable to copy memstore timestamp " +
          memstoreTS + " after decoding a key/value");
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:32,代码来源:BufferedDataBlockEncoder.java

示例7: decodeKeyValue

import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
private int decodeKeyValue(DataInputStream source, ByteBuffer buffer,
    int prevKeyOffset)
        throws IOException, EncoderBufferTooSmallException {
  int keyLength = ByteBufferUtils.readCompressedInt(source);
  int valueLength = ByteBufferUtils.readCompressedInt(source);
  int commonLength = ByteBufferUtils.readCompressedInt(source);
  int keyOffset;
  keyLength += commonLength;

  ensureSpace(buffer, keyLength + valueLength + KeyValue.ROW_OFFSET);

  buffer.putInt(keyLength);
  buffer.putInt(valueLength);

  // copy the prefix
  if (commonLength > 0) {
    keyOffset = buffer.position();
    ByteBufferUtils.copyFromBufferToBuffer(buffer, buffer, prevKeyOffset,
        commonLength);
  } else {
    keyOffset = buffer.position();
  }

  // copy rest of the key and value
  int len = keyLength - commonLength + valueLength;
  ByteBufferUtils.copyFromStreamToBuffer(buffer, source, len);
  return keyOffset;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:29,代码来源:PrefixKeyDeltaEncoder.java

示例8: createSeeker

import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
@Override
public EncodedSeeker createSeeker(KVComparator comparator,
    final HFileBlockDecodingContext decodingCtx) {
  return new BufferedEncodedSeeker<SeekerState>(comparator, decodingCtx) {
    @Override
    protected void decodeNext() {
      current.keyLength = ByteBufferUtils.readCompressedInt(currentBuffer);
      current.valueLength = ByteBufferUtils.readCompressedInt(currentBuffer);
      current.lastCommonPrefix =
          ByteBufferUtils.readCompressedInt(currentBuffer);
      current.keyLength += current.lastCommonPrefix;
      current.ensureSpaceForKey();
      currentBuffer.get(current.keyBuffer, current.lastCommonPrefix,
          current.keyLength - current.lastCommonPrefix);
      current.valueOffset = currentBuffer.position();
      ByteBufferUtils.skip(currentBuffer, current.valueLength);
      if (includesTags()) {
        decodeTags();
      }
      if (includesMvcc()) {
        current.memstoreTS = ByteBufferUtils.readVLong(currentBuffer);
      } else {
        current.memstoreTS = 0;
      }
      current.nextKvOffset = currentBuffer.position();
    }

    @Override
    protected void decodeFirst() {
      ByteBufferUtils.skip(currentBuffer, Bytes.SIZEOF_INT);
      decodeNext();
    }
  };
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:35,代码来源:PrefixKeyDeltaEncoder.java

示例9: getFirstKeyInBlock

import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
@Override
public ByteBuffer getFirstKeyInBlock(ByteBuffer block) {
  block.mark();
  block.position(Bytes.SIZEOF_INT + Bytes.SIZEOF_BYTE);
  int keyLength = ByteBufferUtils.readCompressedInt(block);
  ByteBufferUtils.readCompressedInt(block); // valueLength
  ByteBufferUtils.readCompressedInt(block); // commonLength
  int pos = block.position();
  block.reset();
  return ByteBuffer.wrap(block.array(), pos, keyLength).slice();
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:12,代码来源:FastDiffDeltaEncoder.java

示例10: uncompressKeyValue

import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
private int uncompressKeyValue(DataInputStream source, ByteBuffer buffer,
    int prevKeyOffset)
        throws IOException, EncoderBufferTooSmallException {
  int keyLength = ByteBufferUtils.readCompressedInt(source);
  int valueLength = ByteBufferUtils.readCompressedInt(source);
  int commonLength = ByteBufferUtils.readCompressedInt(source);
  int keyOffset;
  keyLength += commonLength;

  ByteBufferUtils.ensureSpace(buffer, keyLength + valueLength
      + KeyValue.ROW_OFFSET);

  buffer.putInt(keyLength);
  buffer.putInt(valueLength);

  // copy the prefix
  if (commonLength > 0) {
    keyOffset = buffer.position();
    ByteBufferUtils.copyFromBufferToBuffer(buffer, buffer, prevKeyOffset,
        commonLength);
  } else {
    keyOffset = buffer.position();
  }

  // copy rest of the key and value
  int len = keyLength - commonLength + valueLength;
  ByteBufferUtils.copyFromStreamToBuffer(buffer, source, len);
  return keyOffset;
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:30,代码来源:PrefixKeyDeltaEncoder.java

示例11: getFirstKeyInBlock

import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
@Override
public ByteBuffer getFirstKeyInBlock(ByteBuffer block) {
  block.mark();
  block.position(Bytes.SIZEOF_INT);
  int keyLength = ByteBufferUtils.readCompressedInt(block);
  ByteBufferUtils.readCompressedInt(block);
  int commonLength = ByteBufferUtils.readCompressedInt(block);
  if (commonLength != 0) {
    throw new AssertionError("Nonzero common length in the first key in "
        + "block: " + commonLength);
  }
  int pos = block.position();
  block.reset();
  return ByteBuffer.wrap(block.array(), pos, keyLength).slice();
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:16,代码来源:PrefixKeyDeltaEncoder.java

示例12: createSeeker

import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
@Override
public EncodedSeeker createSeeker(RawComparator<byte[]> comparator,
    final boolean includesMemstoreTS) {
  return new BufferedEncodedSeeker<SeekerState>(comparator) {
    @Override
    protected void decodeNext() {
      current.keyLength = ByteBufferUtils.readCompressedInt(currentBuffer);
      current.valueLength = ByteBufferUtils.readCompressedInt(currentBuffer);
      current.lastCommonPrefix =
          ByteBufferUtils.readCompressedInt(currentBuffer);
      current.keyLength += current.lastCommonPrefix;
      current.ensureSpaceForKey();
      currentBuffer.get(current.keyBuffer, current.lastCommonPrefix,
          current.keyLength - current.lastCommonPrefix);
      current.valueOffset = currentBuffer.position();
      ByteBufferUtils.skip(currentBuffer, current.valueLength);
      if (includesMemstoreTS) {
        current.memstoreTS = ByteBufferUtils.readVLong(currentBuffer);
      } else {
        current.memstoreTS = 0;
      }
      current.nextKvOffset = currentBuffer.position();
    }

    @Override
    protected void decodeFirst() {
      ByteBufferUtils.skip(currentBuffer, Bytes.SIZEOF_INT);
      decodeNext();
    }
  };
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:32,代码来源:PrefixKeyDeltaEncoder.java

示例13: getFirstKeyInBlock

import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
@Override
public ByteBuffer getFirstKeyInBlock(ByteBuffer block) {
  block.mark();
  block.position(Bytes.SIZEOF_INT + Bytes.SIZEOF_BYTE);
  int keyLength = ByteBufferUtils.readCompressedInt(block);
  ByteBufferUtils.readCompressedInt(block); // valueLength
  ByteBufferUtils.readCompressedInt(block); // commonLength
  int pos = block.position();
  block.reset();
  return ByteBuffer.wrap(block.array(), block.arrayOffset() + pos, keyLength)
      .slice();
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:13,代码来源:FastDiffDeltaEncoder.java

示例14: afterDecodingKeyValue

import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
protected final void afterDecodingKeyValue(DataInputStream source,
    ByteBuffer dest, HFileBlockDefaultDecodingContext decodingCtx) throws IOException {
  if (decodingCtx.getHFileContext().isIncludesTags()) {
    short tagsLength = (short) ByteBufferUtils.readCompressedInt(source);
    dest.putShort(tagsLength);
    if (tagsLength > 0) {
      TagCompressionContext tagCompressionContext = decodingCtx.getTagCompressionContext();
      // When tag compression is been used in this file, tagCompressionContext will have a not
      // null value passed.
      if (tagCompressionContext != null) {
        tagCompressionContext.uncompressTags(source, dest, tagsLength);
      } else {
        ByteBufferUtils.copyFromStreamToBuffer(dest, source, tagsLength);
      }
    }
  }
  if (decodingCtx.getHFileContext().isIncludesMvcc()) {
    long memstoreTS = -1;
    try {
      // Copy memstore timestamp from the data input stream to the byte
      // buffer.
      memstoreTS = WritableUtils.readVLong(source);
      ByteBufferUtils.writeVLong(dest, memstoreTS);
    } catch (IOException ex) {
      throw new RuntimeException("Unable to copy memstore timestamp " +
          memstoreTS + " after decoding a key/value");
    }
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:30,代码来源:BufferedDataBlockEncoder.java

示例15: createSeeker

import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
@Override
public EncodedSeeker createSeeker(KVComparator comparator,
    final boolean includesMemstoreTS) {
  return new BufferedEncodedSeeker<SeekerState>(comparator) {
    @Override
    protected void decodeNext() {
      current.keyLength = ByteBufferUtils.readCompressedInt(currentBuffer);
      current.valueLength = ByteBufferUtils.readCompressedInt(currentBuffer);
      current.lastCommonPrefix =
          ByteBufferUtils.readCompressedInt(currentBuffer);
      current.keyLength += current.lastCommonPrefix;
      current.ensureSpaceForKey();
      currentBuffer.get(current.keyBuffer, current.lastCommonPrefix,
          current.keyLength - current.lastCommonPrefix);
      current.valueOffset = currentBuffer.position();
      ByteBufferUtils.skip(currentBuffer, current.valueLength);
      if (includesMemstoreTS) {
        current.memstoreTS = ByteBufferUtils.readVLong(currentBuffer);
      } else {
        current.memstoreTS = 0;
      }
      current.nextKvOffset = currentBuffer.position();
    }

    @Override
    protected void decodeFirst() {
      ByteBufferUtils.skip(currentBuffer, Bytes.SIZEOF_INT);
      decodeNext();
    }
  };
}
 
开发者ID:cloud-software-foundation,项目名称:c5,代码行数:32,代码来源:PrefixKeyDeltaEncoder.java


注:本文中的org.apache.hadoop.hbase.util.ByteBufferUtils.readCompressedInt方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。