本文整理汇总了Java中org.apache.hadoop.hbase.util.ByteBufferUtils.copyFromStreamToBuffer方法的典型用法代码示例。如果您正苦于以下问题:Java ByteBufferUtils.copyFromStreamToBuffer方法的具体用法?Java ByteBufferUtils.copyFromStreamToBuffer怎么用?Java ByteBufferUtils.copyFromStreamToBuffer使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.util.ByteBufferUtils
的用法示例。
在下文中一共展示了ByteBufferUtils.copyFromStreamToBuffer方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: internalDecodeKeyValues
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
@Override
protected ByteBuffer internalDecodeKeyValues(DataInputStream source, int allocateHeaderLength,
int skipLastBytes, HFileBlockDefaultDecodingContext decodingCtx) throws IOException {
int decompressedSize = source.readInt();
ByteBuffer buffer = ByteBuffer.allocate(decompressedSize +
allocateHeaderLength);
buffer.position(allocateHeaderLength);
ByteBufferUtils.copyFromStreamToBuffer(buffer, source, decompressedSize);
return buffer;
}
示例2: decompressFirstKV
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
/**
* Copies the first key/value from the given stream, and initializes
* decompression state based on it. Assumes that we have already read key
* and value lengths. Does not set {@link #qualifierLength} (not used by
* decompression) or {@link #prevOffset} (set by the calle afterwards).
*/
private void decompressFirstKV(ByteBuffer out, DataInputStream in)
throws IOException {
int kvPos = out.position();
out.putInt(keyLength);
out.putInt(valueLength);
prevTimestampOffset = out.position() + keyLength -
KeyValue.TIMESTAMP_TYPE_SIZE;
ByteBufferUtils.copyFromStreamToBuffer(out, in, keyLength + valueLength);
rowLength = out.getShort(kvPos + KeyValue.ROW_OFFSET);
familyLength = out.get(kvPos + KeyValue.ROW_OFFSET +
KeyValue.ROW_LENGTH_SIZE + rowLength);
type = out.get(prevTimestampOffset + KeyValue.TIMESTAMP_SIZE);
}
示例3: afterDecodingKeyValue
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
protected final void afterDecodingKeyValue(DataInputStream source,
ByteBuffer dest, HFileBlockDefaultDecodingContext decodingCtx) throws IOException {
if (decodingCtx.getHFileContext().isIncludesTags()) {
int tagsLength = ByteBufferUtils.readCompressedInt(source);
// Put as unsigned short
dest.put((byte) ((tagsLength >> 8) & 0xff));
dest.put((byte) (tagsLength & 0xff));
if (tagsLength > 0) {
TagCompressionContext tagCompressionContext = decodingCtx.getTagCompressionContext();
// When tag compression is been used in this file, tagCompressionContext will have a not
// null value passed.
if (tagCompressionContext != null) {
tagCompressionContext.uncompressTags(source, dest, tagsLength);
} else {
ByteBufferUtils.copyFromStreamToBuffer(dest, source, tagsLength);
}
}
}
if (decodingCtx.getHFileContext().isIncludesMvcc()) {
long memstoreTS = -1;
try {
// Copy memstore timestamp from the data input stream to the byte
// buffer.
memstoreTS = WritableUtils.readVLong(source);
ByteBufferUtils.writeVLong(dest, memstoreTS);
} catch (IOException ex) {
throw new RuntimeException("Unable to copy memstore timestamp " +
memstoreTS + " after decoding a key/value");
}
}
}
示例4: decodeKeyValue
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
private int decodeKeyValue(DataInputStream source, ByteBuffer buffer,
int prevKeyOffset)
throws IOException, EncoderBufferTooSmallException {
int keyLength = ByteBufferUtils.readCompressedInt(source);
int valueLength = ByteBufferUtils.readCompressedInt(source);
int commonLength = ByteBufferUtils.readCompressedInt(source);
int keyOffset;
keyLength += commonLength;
ensureSpace(buffer, keyLength + valueLength + KeyValue.ROW_OFFSET);
buffer.putInt(keyLength);
buffer.putInt(valueLength);
// copy the prefix
if (commonLength > 0) {
keyOffset = buffer.position();
ByteBufferUtils.copyFromBufferToBuffer(buffer, buffer, prevKeyOffset,
commonLength);
} else {
keyOffset = buffer.position();
}
// copy rest of the key and value
int len = keyLength - commonLength + valueLength;
ByteBufferUtils.copyFromStreamToBuffer(buffer, source, len);
return keyOffset;
}
示例5: uncompressKeyValues
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
@Override
public ByteBuffer uncompressKeyValues(DataInputStream source,
int preserveHeaderLength, int skipLastBytes, boolean includesMemstoreTS)
throws IOException {
int decompressedSize = source.readInt();
ByteBuffer buffer = ByteBuffer.allocate(decompressedSize +
preserveHeaderLength);
buffer.position(preserveHeaderLength);
ByteBufferUtils.copyFromStreamToBuffer(buffer, source, decompressedSize);
return buffer;
}
示例6: uncompressKeyValue
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
private int uncompressKeyValue(DataInputStream source, ByteBuffer buffer,
int prevKeyOffset)
throws IOException, EncoderBufferTooSmallException {
int keyLength = ByteBufferUtils.readCompressedInt(source);
int valueLength = ByteBufferUtils.readCompressedInt(source);
int commonLength = ByteBufferUtils.readCompressedInt(source);
int keyOffset;
keyLength += commonLength;
ByteBufferUtils.ensureSpace(buffer, keyLength + valueLength
+ KeyValue.ROW_OFFSET);
buffer.putInt(keyLength);
buffer.putInt(valueLength);
// copy the prefix
if (commonLength > 0) {
keyOffset = buffer.position();
ByteBufferUtils.copyFromBufferToBuffer(buffer, buffer, prevKeyOffset,
commonLength);
} else {
keyOffset = buffer.position();
}
// copy rest of the key and value
int len = keyLength - commonLength + valueLength;
ByteBufferUtils.copyFromStreamToBuffer(buffer, source, len);
return keyOffset;
}
示例7: afterDecodingKeyValue
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
protected final void afterDecodingKeyValue(DataInputStream source,
ByteBuffer dest, HFileBlockDefaultDecodingContext decodingCtx) throws IOException {
if (decodingCtx.getHFileContext().isIncludesTags()) {
short tagsLength = (short) ByteBufferUtils.readCompressedInt(source);
dest.putShort(tagsLength);
if (tagsLength > 0) {
TagCompressionContext tagCompressionContext = decodingCtx.getTagCompressionContext();
// When tag compression is been used in this file, tagCompressionContext will have a not
// null value passed.
if (tagCompressionContext != null) {
tagCompressionContext.uncompressTags(source, dest, tagsLength);
} else {
ByteBufferUtils.copyFromStreamToBuffer(dest, source, tagsLength);
}
}
}
if (decodingCtx.getHFileContext().isIncludesMvcc()) {
long memstoreTS = -1;
try {
// Copy memstore timestamp from the data input stream to the byte
// buffer.
memstoreTS = WritableUtils.readVLong(source);
ByteBufferUtils.writeVLong(dest, memstoreTS);
} catch (IOException ex) {
throw new RuntimeException("Unable to copy memstore timestamp " +
memstoreTS + " after decoding a key/value");
}
}
}
示例8: decodeKeyValues
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
@Override
public ByteBuffer decodeKeyValues(DataInputStream source,
int preserveHeaderLength, int skipLastBytes, boolean includesMemstoreTS)
throws IOException {
int decompressedSize = source.readInt();
ByteBuffer buffer = ByteBuffer.allocate(decompressedSize +
preserveHeaderLength);
buffer.position(preserveHeaderLength);
ByteBufferUtils.copyFromStreamToBuffer(buffer, source, decompressedSize);
return buffer;
}