本文整理匯總了Java中org.apache.hadoop.hbase.KeyValue.getKeyValueDataStructureSize方法的典型用法代碼示例。如果您正苦於以下問題:Java KeyValue.getKeyValueDataStructureSize方法的具體用法?Java KeyValue.getKeyValueDataStructureSize怎麽用?Java KeyValue.getKeyValueDataStructureSize使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.hbase.KeyValue
的用法示例。
在下文中一共展示了KeyValue.getKeyValueDataStructureSize方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: readKV
import org.apache.hadoop.hbase.KeyValue; //導入方法依賴的package包/類
/**
* Uncompresses a KeyValue from a DataInput and returns it.
*
* @param in the DataInput
* @param readContext the compressionContext to use.
* @return an uncompressed KeyValue
* @throws IOException
*/
public static KeyValue readKV(DataInput in, CompressionContext readContext)
throws IOException {
int keylength = WritableUtils.readVInt(in);
int vlength = WritableUtils.readVInt(in);
int tagsLength = WritableUtils.readVInt(in);
int length = (int) KeyValue.getKeyValueDataStructureSize(keylength, vlength, tagsLength);
byte[] backingArray = new byte[length];
int pos = 0;
pos = Bytes.putInt(backingArray, pos, keylength);
pos = Bytes.putInt(backingArray, pos, vlength);
// the row
int elemLen = Compressor.uncompressIntoArray(backingArray,
pos + Bytes.SIZEOF_SHORT, in, readContext.rowDict);
checkLength(elemLen, Short.MAX_VALUE);
pos = Bytes.putShort(backingArray, pos, (short)elemLen);
pos += elemLen;
// family
elemLen = Compressor.uncompressIntoArray(backingArray,
pos + Bytes.SIZEOF_BYTE, in, readContext.familyDict);
checkLength(elemLen, Byte.MAX_VALUE);
pos = Bytes.putByte(backingArray, pos, (byte)elemLen);
pos += elemLen;
// qualifier
elemLen = Compressor.uncompressIntoArray(backingArray, pos, in,
readContext.qualifierDict);
pos += elemLen;
// the rest
in.readFully(backingArray, pos, length - pos);
return new KeyValue(backingArray, 0, length);
}
示例2: binarySearch
import org.apache.hadoop.hbase.KeyValue; //導入方法依賴的package包/類
/**
* Searches for the latest value for the specified column.
*
* @param kvs the array to search
* @param family family name
* @param foffset family offset
* @param flength family length
* @param qualifier column qualifier
* @param qoffset qualifier offset
* @param qlength qualifier length
*
* @return the index where the value was found, or -1 otherwise
*/
protected int binarySearch(final Cell [] kvs,
final byte [] family, final int foffset, final int flength,
final byte [] qualifier, final int qoffset, final int qlength) {
double keyValueSize = (double)
KeyValue.getKeyValueDataStructureSize(kvs[0].getRowLength(), flength, qlength, 0);
byte[] buffer = localBuffer.get();
if (buffer == null || keyValueSize > buffer.length) {
// pad to the smallest multiple of the pad width
buffer = new byte[(int) Math.ceil(keyValueSize / PAD_WIDTH) * PAD_WIDTH];
localBuffer.set(buffer);
}
Cell searchTerm = KeyValueUtil.createFirstOnRow(buffer, 0,
kvs[0].getRowArray(), kvs[0].getRowOffset(), kvs[0].getRowLength(),
family, foffset, flength,
qualifier, qoffset, qlength);
// pos === ( -(insertion point) - 1)
int pos = Arrays.binarySearch(kvs, searchTerm, KeyValue.COMPARATOR);
// never will exact match
if (pos < 0) {
pos = (pos+1) * -1;
// pos is now insertion point
}
if (pos == kvs.length) {
return -1; // doesn't exist
}
return pos;
}
示例3: readCell
import org.apache.hadoop.hbase.KeyValue; //導入方法依賴的package包/類
static KeyValue readCell(PositionedByteRange pbr) throws Exception {
int kvStartPos = pbr.getPosition();
int keyLen = pbr.getInt();
int valLen = pbr.getInt();
pbr.setPosition(pbr.getPosition() + keyLen + valLen); // Skip the key and value section
int tagsLen = ((pbr.get() & 0xff) << 8) ^ (pbr.get() & 0xff);
pbr.setPosition(pbr.getPosition() + tagsLen); // Skip the tags section
long mvcc = pbr.getVLong();
KeyValue kv = new KeyValue(pbr.getBytes(), kvStartPos,
(int) KeyValue.getKeyValueDataStructureSize(keyLen, valLen, tagsLen));
kv.setSequenceId(mvcc);
return kv;
}
示例4: getIterator
import org.apache.hadoop.hbase.KeyValue; //導入方法依賴的package包/類
/**
* Provides access to compressed value.
* @param headerSize header size of the block.
* @return Forwards sequential iterator.
*/
public Iterator<Cell> getIterator(int headerSize) {
final int rawSize = rawKVs.length;
byte[] encodedDataWithHeader = getEncodedData();
int bytesToSkip = headerSize + Bytes.SIZEOF_SHORT;
ByteArrayInputStream bais = new ByteArrayInputStream(encodedDataWithHeader,
bytesToSkip, encodedDataWithHeader.length - bytesToSkip);
final DataInputStream dis = new DataInputStream(bais);
return new Iterator<Cell>() {
private ByteBuffer decompressedData = null;
@Override
public boolean hasNext() {
if (decompressedData == null) {
return rawSize > 0;
}
return decompressedData.hasRemaining();
}
@Override
public Cell next() {
if (decompressedData == null) {
try {
decompressedData = dataBlockEncoder.decodeKeyValues(dis, dataBlockEncoder
.newDataBlockDecodingContext(meta));
} catch (IOException e) {
throw new RuntimeException("Problem with data block encoder, " +
"most likely it requested more bytes than are available.", e);
}
decompressedData.rewind();
}
int offset = decompressedData.position();
int klen = decompressedData.getInt();
int vlen = decompressedData.getInt();
int tagsLen = 0;
ByteBufferUtils.skip(decompressedData, klen + vlen);
// Read the tag length in case when steam contain tags
if (meta.isIncludesTags()) {
tagsLen = ((decompressedData.get() & 0xff) << 8) ^ (decompressedData.get() & 0xff);
ByteBufferUtils.skip(decompressedData, tagsLen);
}
KeyValue kv = new KeyValue(decompressedData.array(), offset,
(int) KeyValue.getKeyValueDataStructureSize(klen, vlen, tagsLen));
if (meta.isIncludesMvcc()) {
long mvccVersion = ByteBufferUtils.readVLong(decompressedData);
kv.setSequenceId(mvccVersion);
}
return kv;
}
@Override
public void remove() {
throw new NotImplementedException("remove() is not supported!");
}
@Override
public String toString() {
return "Iterator of: " + dataBlockEncoder.getClass().getName();
}
};
}