本文整理汇总了Java中org.apache.hadoop.hbase.util.ByteBufferUtils.copyFromBufferToBuffer方法的典型用法代码示例。如果您正苦于以下问题:Java ByteBufferUtils.copyFromBufferToBuffer方法的具体用法?Java ByteBufferUtils.copyFromBufferToBuffer怎么用?Java ByteBufferUtils.copyFromBufferToBuffer使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.util.ByteBufferUtils
的用法示例。
在下文中一共展示了ByteBufferUtils.copyFromBufferToBuffer方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getKeyValueBuffer
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
@Override
public ByteBuffer getKeyValueBuffer() {
ByteBuffer kvBuffer = createKVBuffer();
kvBuffer.putInt(current.keyLength);
kvBuffer.putInt(current.valueLength);
kvBuffer.put(current.keyBuffer, 0, current.keyLength);
ByteBufferUtils.copyFromBufferToBuffer(kvBuffer, currentBuffer, current.valueOffset,
current.valueLength);
if (current.tagsLength > 0) {
// Put short as unsigned
kvBuffer.put((byte) (current.tagsLength >> 8 & 0xff));
kvBuffer.put((byte) (current.tagsLength & 0xff));
if (current.tagsOffset != -1) {
// the offset of the tags bytes in the underlying buffer is marked. So the temp
// buffer,tagsBuffer was not been used.
ByteBufferUtils.copyFromBufferToBuffer(kvBuffer, currentBuffer, current.tagsOffset,
current.tagsLength);
} else {
// When tagsOffset is marked as -1, tag compression was present and so the tags were
// uncompressed into temp buffer, tagsBuffer. Let us copy it from there
kvBuffer.put(current.tagsBuffer, 0, current.tagsLength);
}
}
return kvBuffer;
}
示例2: put
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
@Override
public SingleByteBuff put(int offset, ByteBuff src, int srcOffset, int length) {
if (src instanceof SingleByteBuff) {
ByteBufferUtils.copyFromBufferToBuffer(((SingleByteBuff) src).buf, this.buf, srcOffset,
offset, length);
} else {
// TODO we can do some optimization here? Call to asSubByteBuffer might
// create a copy.
ObjectIntPair<ByteBuffer> pair = new ObjectIntPair<>();
src.asSubByteBuffer(srcOffset, length, pair);
if (pair.getFirst() != null) {
ByteBufferUtils.copyFromBufferToBuffer(pair.getFirst(), this.buf, pair.getSecond(), offset,
length);
}
}
return this;
}
示例3: checkSizeAndGrow
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
protected void checkSizeAndGrow(int extra) {
long capacityNeeded = curBuf.position() + (long) extra;
if (capacityNeeded > curBuf.limit()) {
// guarantee it's possible to fit
if (capacityNeeded > MAX_ARRAY_SIZE) {
throw new BufferOverflowException();
}
// double until hit the cap
long nextCapacity = Math.min(curBuf.capacity() * 2L, MAX_ARRAY_SIZE);
// but make sure there is enough if twice the existing capacity is still too small
nextCapacity = Math.max(nextCapacity, capacityNeeded);
ByteBuffer newBuf = allocate((int) nextCapacity, curBuf.isDirect());
curBuf.flip();
ByteBufferUtils.copyFromBufferToBuffer(curBuf, newBuf);
curBuf = newBuf;
}
}
示例4: decodeKeyValue
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
private int decodeKeyValue(DataInputStream source, ByteBuffer buffer,
int prevKeyOffset)
throws IOException, EncoderBufferTooSmallException {
int keyLength = ByteBufferUtils.readCompressedInt(source);
int valueLength = ByteBufferUtils.readCompressedInt(source);
int commonLength = ByteBufferUtils.readCompressedInt(source);
int keyOffset;
keyLength += commonLength;
ensureSpace(buffer, keyLength + valueLength + KeyValue.ROW_OFFSET);
buffer.putInt(keyLength);
buffer.putInt(valueLength);
// copy the prefix
if (commonLength > 0) {
keyOffset = buffer.position();
ByteBufferUtils.copyFromBufferToBuffer(buffer, buffer, prevKeyOffset,
commonLength);
} else {
keyOffset = buffer.position();
}
// copy rest of the key and value
int len = keyLength - commonLength + valueLength;
ByteBufferUtils.copyFromStreamToBuffer(buffer, source, len);
return keyOffset;
}
示例5: uncompressKeyValue
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
private int uncompressKeyValue(DataInputStream source, ByteBuffer buffer,
int prevKeyOffset)
throws IOException, EncoderBufferTooSmallException {
int keyLength = ByteBufferUtils.readCompressedInt(source);
int valueLength = ByteBufferUtils.readCompressedInt(source);
int commonLength = ByteBufferUtils.readCompressedInt(source);
int keyOffset;
keyLength += commonLength;
ByteBufferUtils.ensureSpace(buffer, keyLength + valueLength
+ KeyValue.ROW_OFFSET);
buffer.putInt(keyLength);
buffer.putInt(valueLength);
// copy the prefix
if (commonLength > 0) {
keyOffset = buffer.position();
ByteBufferUtils.copyFromBufferToBuffer(buffer, buffer, prevKeyOffset,
commonLength);
} else {
keyOffset = buffer.position();
}
// copy rest of the key and value
int len = keyLength - commonLength + valueLength;
ByteBufferUtils.copyFromStreamToBuffer(buffer, source, len);
return keyOffset;
}
示例6: copyTagsTo
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
/**
* Copies the tags info into the tag portion of the cell
* @param cell
* @param destination
* @param destinationOffset
* @return the position after tags
*/
public static int copyTagsTo(Cell cell, ByteBuffer destination, int destinationOffset) {
int tlen = cell.getTagsLength();
if (cell instanceof ByteBufferExtendedCell) {
ByteBufferUtils.copyFromBufferToBuffer(((ByteBufferExtendedCell) cell).getTagsByteBuffer(),
destination, ((ByteBufferExtendedCell) cell).getTagsPosition(), destinationOffset, tlen);
} else {
ByteBufferUtils.copyFromArrayToBuffer(destination, destinationOffset, cell.getTagsArray(),
cell.getTagsOffset(), tlen);
}
return destinationOffset + tlen;
}
示例7: copyRowTo
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
/**
* Copies the row to the given bytebuffer
* @param cell cell the cell whose row has to be copied
* @param destination the destination bytebuffer to which the row has to be copied
* @param destinationOffset the offset in the destination byte[]
* @return the offset of the bytebuffer after the copy has happened
*/
public static int copyRowTo(Cell cell, ByteBuffer destination, int destinationOffset) {
short rowLen = cell.getRowLength();
if (cell instanceof ByteBufferExtendedCell) {
ByteBufferUtils.copyFromBufferToBuffer(((ByteBufferExtendedCell) cell).getRowByteBuffer(),
destination, ((ByteBufferExtendedCell) cell).getRowPosition(), destinationOffset, rowLen);
} else {
ByteBufferUtils.copyFromArrayToBuffer(destination, destinationOffset, cell.getRowArray(),
cell.getRowOffset(), rowLen);
}
return destinationOffset + rowLen;
}
示例8: copyFamilyTo
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
/**
* Copies the family to the given bytebuffer
* @param cell the cell whose family has to be copied
* @param destination the destination bytebuffer to which the family has to be copied
* @param destinationOffset the offset in the destination bytebuffer
* @return the offset of the bytebuffer after the copy has happened
*/
public static int copyFamilyTo(Cell cell, ByteBuffer destination, int destinationOffset) {
byte fLen = cell.getFamilyLength();
if (cell instanceof ByteBufferExtendedCell) {
ByteBufferUtils.copyFromBufferToBuffer(((ByteBufferExtendedCell) cell).getFamilyByteBuffer(),
destination, ((ByteBufferExtendedCell) cell).getFamilyPosition(), destinationOffset, fLen);
} else {
ByteBufferUtils.copyFromArrayToBuffer(destination, destinationOffset, cell.getFamilyArray(),
cell.getFamilyOffset(), fLen);
}
return destinationOffset + fLen;
}
示例9: copyQualifierTo
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
/**
* Copies the qualifier to the given bytebuffer
* @param cell the cell whose qualifier has to be copied
* @param destination the destination bytebuffer to which the qualifier has to be copied
* @param destinationOffset the offset in the destination bytebuffer
* @return the offset of the bytebuffer after the copy has happened
*/
public static int copyQualifierTo(Cell cell, ByteBuffer destination, int destinationOffset) {
int qlen = cell.getQualifierLength();
if (cell instanceof ByteBufferExtendedCell) {
ByteBufferUtils.copyFromBufferToBuffer(
((ByteBufferExtendedCell) cell).getQualifierByteBuffer(),
destination, ((ByteBufferExtendedCell) cell).getQualifierPosition(),
destinationOffset, qlen);
} else {
ByteBufferUtils.copyFromArrayToBuffer(destination, destinationOffset,
cell.getQualifierArray(), cell.getQualifierOffset(), qlen);
}
return destinationOffset + qlen;
}
示例10: copyValueTo
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
/**
* Copies the value to the given bytebuffer
* @param cell the cell whose value has to be copied
* @param destination the destination bytebuffer to which the value has to be copied
* @param destinationOffset the offset in the destination bytebuffer
* @return the offset of the bytebuffer after the copy has happened
*/
public static int copyValueTo(Cell cell, ByteBuffer destination, int destinationOffset) {
int vlen = cell.getValueLength();
if (cell instanceof ByteBufferExtendedCell) {
ByteBufferUtils.copyFromBufferToBuffer(((ByteBufferExtendedCell) cell).getValueByteBuffer(),
destination, ((ByteBufferExtendedCell) cell).getValuePosition(), destinationOffset, vlen);
} else {
ByteBufferUtils.copyFromArrayToBuffer(destination, destinationOffset, cell.getValueArray(),
cell.getValueOffset(), vlen);
}
return destinationOffset + vlen;
}
示例11: put
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
/**
* Copies from a src MBB to this MBB.
* @param offset the position in this MBB to which the copy should happen
* @param src the src MBB
* @param srcOffset the offset in the src MBB from where the elements should be read
* @param length the length upto which the copy should happen
*/
@Override
public MultiByteBuff put(int offset, ByteBuff src, int srcOffset, int length) {
int destItemIndex = getItemIndex(offset);
int srcItemIndex = getItemIndex(srcOffset);
ByteBuffer destItem = this.items[destItemIndex];
offset = offset - this.itemBeginPos[destItemIndex];
ByteBuffer srcItem = getItemByteBuffer(src, srcItemIndex);
srcOffset = srcOffset - this.itemBeginPos[srcItemIndex];
int toRead, toWrite, toMove;
while (length > 0) {
toWrite = destItem.limit() - offset;
toRead = srcItem.limit() - srcOffset;
toMove = Math.min(length, Math.min(toRead, toWrite));
ByteBufferUtils.copyFromBufferToBuffer(srcItem, destItem, srcOffset, offset, toMove);
length -= toMove;
if (length == 0) break;
if (toRead < toWrite) {
srcItem = getItemByteBuffer(src, ++srcItemIndex);
srcOffset = 0;
offset += toMove;
} else if (toRead > toWrite) {
destItem = this.items[++destItemIndex];
offset = 0;
srcOffset += toMove;
} else {
// toRead = toWrite case
srcItem = getItemByteBuffer(src, ++srcItemIndex);
srcOffset = 0;
destItem = this.items[++destItemIndex];
offset = 0;
}
}
return this;
}
示例12: write
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
@Override
public void write(ByteBuffer b, int off, int len) throws IOException {
int toWrite = 0;
while (len > 0) {
toWrite = Math.min(len, this.curBuf.remaining());
ByteBufferUtils.copyFromBufferToBuffer(b, this.curBuf, off, toWrite);
off += toWrite;
len -= toWrite;
if (len > 0) {
allocateNewBuffer();// The curBuf is over. Let us move to the next one
}
}
}
示例13: unpack
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
/**
* Retrieves the decompressed/decrypted view of this block. An encoded block remains in its
* encoded structure. Internal structures are shared between instances where applicable.
*/
HFileBlock unpack(HFileContext fileContext, FSReader reader) throws IOException {
if (!fileContext.isCompressedOrEncrypted()) {
// TODO: cannot use our own fileContext here because HFileBlock(ByteBuffer, boolean),
// which is used for block serialization to L2 cache, does not preserve encoding and
// encryption details.
return this;
}
HFileBlock unpacked = new HFileBlock(this);
unpacked.allocateBuffer(); // allocates space for the decompressed block
HFileBlockDecodingContext ctx = blockType == BlockType.ENCODED_DATA ?
reader.getBlockDecodingContext() : reader.getDefaultBlockDecodingContext();
ByteBuffer dup = this.buf.duplicate();
dup.position(this.headerSize());
dup = dup.slice();
ctx.prepareDecoding(unpacked.getOnDiskSizeWithoutHeader(),
unpacked.getUncompressedSizeWithoutHeader(), unpacked.getBufferWithoutHeader(),
dup);
// Preserve the next block's header bytes in the new block if we have them.
if (unpacked.hasNextBlockHeader()) {
// Both the buffers are limited till checksum bytes and avoid the next block's header.
// Below call to copyFromBufferToBuffer() will try positional read/write from/to buffers when
// any of the buffer is DBB. So we change the limit on a dup buffer. No copying just create
// new BB objects
ByteBuffer inDup = this.buf.duplicate();
inDup.limit(inDup.limit() + headerSize());
ByteBuffer outDup = unpacked.buf.duplicate();
outDup.limit(outDup.limit() + unpacked.headerSize());
ByteBufferUtils.copyFromBufferToBuffer(
outDup,
inDup,
this.onDiskDataSizeWithHeader,
unpacked.headerSize() + unpacked.uncompressedSizeWithoutHeader
+ unpacked.totalChecksumBytes(), unpacked.headerSize());
}
return unpacked;
}
示例14: serialize
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
@Override
public void serialize(ByteBuffer destination) {
ByteBufferUtils.copyFromBufferToBuffer(destination, this.buf, 0, getSerializedLength()
- EXTRA_SERIALIZATION_SPACE);
serializeExtraInfo(destination);
}
示例15: write
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
@Override
public void write(ByteBuffer buf, int offset) {
ByteBufferUtils.copyFromBufferToBuffer(this.buf, buf, this.offset, offset, this.length);
}