本文整理汇总了Java中org.apache.hadoop.hbase.util.ByteBufferUtils.putInt方法的典型用法代码示例。如果您正苦于以下问题:Java ByteBufferUtils.putInt方法的具体用法?Java ByteBufferUtils.putInt怎么用?Java ByteBufferUtils.putInt使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.util.ByteBufferUtils
的用法示例。
在下文中一共展示了ByteBufferUtils.putInt方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: compressKeyValues
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
@Override
public void compressKeyValues(DataOutputStream out,
ByteBuffer in, boolean includesMemstoreTS) throws IOException {
in.rewind();
ByteBufferUtils.putInt(out, in.limit());
DiffCompressionState previousState = new DiffCompressionState();
DiffCompressionState currentState = new DiffCompressionState();
while (in.hasRemaining()) {
compressSingleKeyValue(previousState, currentState,
out, in);
afterEncodingKeyValue(in, out, includesMemstoreTS);
// swap previousState <-> currentState
DiffCompressionState tmp = previousState;
previousState = currentState;
currentState = tmp;
}
}
示例2: compressKeyValues
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
@Override
public void compressKeyValues(DataOutputStream out,
ByteBuffer in, boolean includesMemstoreTS) throws IOException {
in.rewind();
ByteBufferUtils.putInt(out, in.limit());
FastDiffCompressionState previousState = new FastDiffCompressionState();
FastDiffCompressionState currentState = new FastDiffCompressionState();
while (in.hasRemaining()) {
compressSingleKeyValue(previousState, currentState,
out, in);
afterEncodingKeyValue(in, out, includesMemstoreTS);
// swap previousState <-> currentState
FastDiffCompressionState tmp = previousState;
previousState = currentState;
currentState = tmp;
}
}
示例3: internalEncodeKeyValues
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
@Override
public void internalEncodeKeyValues(DataOutputStream out,
ByteBuffer in, HFileBlockDefaultEncodingContext encodingCtx) throws IOException {
in.rewind();
ByteBufferUtils.putInt(out, in.limit());
DiffCompressionState previousState = new DiffCompressionState();
DiffCompressionState currentState = new DiffCompressionState();
while (in.hasRemaining()) {
compressSingleKeyValue(previousState, currentState,
out, in);
afterEncodingKeyValue(in, out, encodingCtx);
// swap previousState <-> currentState
DiffCompressionState tmp = previousState;
previousState = currentState;
currentState = tmp;
}
}
示例4: internalEncodeKeyValues
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
@Override
public void internalEncodeKeyValues(DataOutputStream out, ByteBuffer in,
HFileBlockDefaultEncodingContext encodingCtx) throws IOException {
in.rewind();
ByteBufferUtils.putInt(out, in.limit());
FastDiffCompressionState previousState = new FastDiffCompressionState();
FastDiffCompressionState currentState = new FastDiffCompressionState();
while (in.hasRemaining()) {
compressSingleKeyValue(previousState, currentState,
out, in);
afterEncodingKeyValue(in, out, encodingCtx);
// swap previousState <-> currentState
FastDiffCompressionState tmp = previousState;
previousState = currentState;
currentState = tmp;
}
}
示例5: write
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
@Override
public int write(OutputStream out, boolean withTags) throws IOException {
int lenToWrite = getSerializedSize(withTags);
ByteBufferUtils.putInt(out, keyBuffer.capacity());
ByteBufferUtils.putInt(out, valueLength);
// Write key
out.write(keyBuffer.array());
// Write value
ByteBufferUtils.copyBufferToStream(out, this.valueBuffer, this.valueOffset, this.valueLength);
if (withTags && this.tagsLength > 0) {
// 2 bytes tags length followed by tags bytes
// tags length is serialized with 2 bytes only(short way) even if the type is int.
// As this is non -ve numbers, we save the sign bit. See HBASE-11437
out.write((byte) (0xff & (this.tagsLength >> 8)));
out.write((byte) (0xff & this.tagsLength));
ByteBufferUtils.copyBufferToStream(out, this.tagsBuffer, this.tagsOffset, this.tagsLength);
}
return lenToWrite;
}
示例6: write
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
/**
* Made into a static method so as to reuse the logic within
* ValueAndTagRewriteByteBufferExtendedCell
*/
static int write(OutputStream out, boolean withTags, Cell cell, byte[] value, byte[] tags)
throws IOException {
int valLen = value == null ? 0 : value.length;
ByteBufferUtils.putInt(out, KeyValueUtil.keyLength(cell));// Key length
ByteBufferUtils.putInt(out, valLen);// Value length
int len = 2 * Bytes.SIZEOF_INT;
len += writeFlatKey(cell, out);// Key
if (valLen > 0) {
out.write(value);// Value
}
len += valLen;
if (withTags && tags != null) {
// Write the tagsLength 2 bytes
out.write((byte) (0xff & (tags.length >> 8)));
out.write((byte) (0xff & tags.length));
out.write(tags);
len += KeyValue.TAGS_LENGTH_SIZE + tags.length;
}
return len;
}
示例7: writeCell
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
/**
* Writes the cell to the given OutputStream
* @param cell the cell to be written
* @param out the outputstream
* @param withTags if tags are to be written or not
* @return the total bytes written
* @throws IOException
*/
public static int writeCell(Cell cell, OutputStream out, boolean withTags) throws IOException {
if (cell instanceof ExtendedCell) {
return ((ExtendedCell) cell).write(out, withTags);
} else {
ByteBufferUtils.putInt(out, estimatedSerializedSizeOfKey(cell));
ByteBufferUtils.putInt(out, cell.getValueLength());
writeFlatKey(cell, out);
writeValue(out, cell, cell.getValueLength());
int tagsLength = cell.getTagsLength();
if (withTags) {
byte[] len = new byte[Bytes.SIZEOF_SHORT];
Bytes.putAsShort(len, 0, tagsLength);
out.write(len);
if (tagsLength > 0) {
writeTags(out, cell, tagsLength);
}
}
int lenWritten = (2 * Bytes.SIZEOF_INT) + estimatedSerializedSizeOfKey(cell)
+ cell.getValueLength();
if (withTags) {
lenWritten += Bytes.SIZEOF_SHORT + tagsLength;
}
return lenWritten;
}
}
示例8: testKeyValueSerialization
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
public void testKeyValueSerialization() throws Exception {
KeyValue kvA1 = new KeyValue(Bytes.toBytes("key"), Bytes.toBytes("cf"), Bytes.toBytes("qualA"),
Bytes.toBytes("1"));
KeyValue kvA2 = new KeyValue(Bytes.toBytes("key"), Bytes.toBytes("cf"), Bytes.toBytes("qualA"),
Bytes.toBytes("2"));
MockKeyValue mkvA1 = new MockKeyValue(kvA1);
MockKeyValue mkvA2 = new MockKeyValue(kvA2);
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
DataOutputStream os = new DataOutputStream(byteArrayOutputStream);
ByteBufferUtils.putInt(os, KeyValueUtil.getSerializedSize(mkvA1, true));
KeyValueUtil.oswrite(mkvA1, os, true);
ByteBufferUtils.putInt(os, KeyValueUtil.getSerializedSize(mkvA2, true));
KeyValueUtil.oswrite(mkvA2, os, true);
DataInputStream is = new DataInputStream(new ByteArrayInputStream(
byteArrayOutputStream.toByteArray()));
KeyValue deSerKV1 = KeyValueUtil.iscreate(is, true);
assertTrue(kvA1.equals(deSerKV1));
KeyValue deSerKV2 = KeyValueUtil.iscreate(is, true);
assertTrue(kvA2.equals(deSerKV2));
}
示例9: startBlockEncoding
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
@Override
public void startBlockEncoding(HFileBlockEncodingContext blkEncodingCtx, DataOutputStream out)
throws IOException {
if (blkEncodingCtx.getClass() != HFileBlockDefaultEncodingContext.class) {
throw new IOException (this.getClass().getName() + " only accepts "
+ HFileBlockDefaultEncodingContext.class.getName() + " as the " +
"encoding context.");
}
HFileBlockDefaultEncodingContext encodingCtx =
(HFileBlockDefaultEncodingContext) blkEncodingCtx;
encodingCtx.prepareEncoding(out);
if (encodingCtx.getHFileContext().isIncludesTags()
&& encodingCtx.getHFileContext().isCompressTags()) {
if (encodingCtx.getTagCompressionContext() != null) {
// It will be overhead to create the TagCompressionContext again and again for every block
// encoding.
encodingCtx.getTagCompressionContext().clear();
} else {
try {
TagCompressionContext tagCompressionContext = new TagCompressionContext(
LRUDictionary.class, Byte.MAX_VALUE);
encodingCtx.setTagCompressionContext(tagCompressionContext);
} catch (Exception e) {
throw new IOException("Failed to initialize TagCompressionContext", e);
}
}
}
ByteBufferUtils.putInt(out, 0); // DUMMY length. This will be updated in endBlockEncoding()
blkEncodingCtx.setEncodingState(new BufferedDataBlockEncodingState());
}
示例10: compressKeyValues
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
@Override
public void compressKeyValues(DataOutputStream out,
ByteBuffer in, boolean includesMemstoreTS) throws IOException {
in.rewind();
ByteBufferUtils.putInt(out, in.limit());
ByteBufferUtils.moveBufferToStream(out, in, in.limit());
}
示例11: compressKeyValues
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
@Override
public void compressKeyValues(DataOutputStream writeHere,
ByteBuffer in, boolean includesMemstoreTS) throws IOException {
in.rewind();
ByteBufferUtils.putInt(writeHere, in.limit());
int prevOffset = -1;
int offset = 0;
int keyLength = 0;
while (in.hasRemaining()) {
offset = in.position();
keyLength = addKV(prevOffset, writeHere, in, keyLength);
afterEncodingKeyValue(in, writeHere, includesMemstoreTS);
prevOffset = offset;
}
}
示例12: internalEncodeKeyValues
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
@Override
public void internalEncodeKeyValues(DataOutputStream out,
ByteBuffer in, HFileBlockDefaultEncodingContext encodingCtx) throws IOException {
in.rewind();
ByteBufferUtils.putInt(out, in.limit());
ByteBufferUtils.moveBufferToStream(out, in, in.limit());
}
示例13: internalEncodeKeyValues
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
@Override
public void internalEncodeKeyValues(DataOutputStream writeHere, ByteBuffer in,
HFileBlockDefaultEncodingContext encodingCtx) throws IOException {
in.rewind();
ByteBufferUtils.putInt(writeHere, in.limit());
int prevOffset = -1;
int offset = 0;
int keyLength = 0;
while (in.hasRemaining()) {
offset = in.position();
keyLength = addKV(prevOffset, writeHere, in, keyLength);
afterEncodingKeyValue(in, writeHere, encodingCtx);
prevOffset = offset;
}
}
示例14: internalEncodeKeyValues
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
@Override
public void internalEncodeKeyValues(DataOutputStream out,
ByteBuffer in, boolean includesMemstoreTS) throws IOException {
in.rewind();
ByteBufferUtils.putInt(out, in.limit());
ByteBufferUtils.moveBufferToStream(out, in, in.limit());
}
示例15: createCellReference
import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
private int createCellReference(ByteBufferKeyValue cell, ByteBuffer idxBuffer, int idxOffset) {
int offset = idxOffset;
int dataChunkID = cell.getChunkId();
offset = ByteBufferUtils.putInt(idxBuffer, offset, dataChunkID); // write data chunk id
offset = ByteBufferUtils.putInt(idxBuffer, offset, cell.getOffset()); // offset
offset = ByteBufferUtils.putInt(idxBuffer, offset, KeyValueUtil.length(cell)); // length
offset = ByteBufferUtils.putLong(idxBuffer, offset, cell.getSequenceId()); // seqId
return offset;
}