本文整理汇总了Java中org.apache.hadoop.io.WritableUtils.getVIntSize方法的典型用法代码示例。如果您正苦于以下问题:Java WritableUtils.getVIntSize方法的具体用法?Java WritableUtils.getVIntSize怎么用?Java WritableUtils.getVIntSize使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.io.WritableUtils
的用法示例。
在下文中一共展示了WritableUtils.getVIntSize方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: writeRecords
import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
private long writeRecords(int count, boolean knownKeyLength,
boolean knownValueLength, boolean close) throws IOException {
long rawDataSize = 0;
for (int nx = 0; nx < count; nx++) {
String key = TestTFileByteArrays.composeSortedKey("key", nx);
DataOutputStream outKey =
writer.prepareAppendKey(knownKeyLength ? key.length() : -1);
outKey.write(key.getBytes());
outKey.close();
String value = "value" + nx;
DataOutputStream outValue =
writer.prepareAppendValue(knownValueLength ? value.length() : -1);
outValue.write(value.getBytes());
outValue.close();
rawDataSize +=
WritableUtils.getVIntSize(key.getBytes().length)
+ key.getBytes().length
+ WritableUtils.getVIntSize(value.getBytes().length)
+ value.getBytes().length;
}
if (close) {
closeOutput();
}
return rawDataSize;
}
示例2: internalEncode
import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
@Override
public int internalEncode(Cell cell, HFileBlockDefaultEncodingContext encodingContext,
DataOutputStream out) throws IOException {
int klength = KeyValueUtil.keyLength(cell);
int vlength = cell.getValueLength();
out.writeInt(klength);
out.writeInt(vlength);
CellUtil.writeFlatKey(cell, out);
out.write(cell.getValueArray(), cell.getValueOffset(), vlength);
int size = klength + vlength + KeyValue.KEYVALUE_INFRASTRUCTURE_SIZE;
// Write the additional tag into the stream
if (encodingContext.getHFileContext().isIncludesTags()) {
int tagsLength = cell.getTagsLength();
out.writeShort(tagsLength);
if (tagsLength > 0) {
out.write(cell.getTagsArray(), cell.getTagsOffset(), tagsLength);
}
size += tagsLength + KeyValue.TAGS_LENGTH_SIZE;
}
if (encodingContext.getHFileContext().isIncludesMvcc()) {
WritableUtils.writeVLong(out, cell.getSequenceId());
size += WritableUtils.getVIntSize(cell.getSequenceId());
}
return size;
}
示例3: mark
import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
@Override
public void mark() throws IOException {
if (getBackupStore() == null) {
backupStore = new BackupStore<KEYIN,VALUEIN>(conf, taskid);
}
isMarked = true;
if (!inReset) {
backupStore.reinitialize();
if (currentKeyLength == -1) {
// The user has not called next() for this iterator yet, so
// there is no current record to mark and copy to backup store.
return;
}
assert (currentValueLength != -1);
int requestedSize = currentKeyLength + currentValueLength +
WritableUtils.getVIntSize(currentKeyLength) +
WritableUtils.getVIntSize(currentValueLength);
DataOutputStream out = backupStore.getOutputStream(requestedSize);
writeFirstKeyValueBytes(out);
backupStore.updateCounters(requestedSize);
} else {
backupStore.mark();
}
}
示例4: writeRecords
import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
static long writeRecords(Writer writer, int count) throws IOException {
long rawDataSize = 0;
int nx;
for (nx = 0; nx < count; nx++) {
byte[] key = composeSortedKey(KEY, nx).getBytes();
byte[] value = (VALUE + nx).getBytes();
writer.append(key, value);
rawDataSize +=
WritableUtils.getVIntSize(key.length) + key.length
+ WritableUtils.getVIntSize(value.length) + value.length;
}
return rawDataSize;
}
示例5: appendToByteBuffer
import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
public static void appendToByteBuffer(final ByteBuffer bb, final KeyValue kv,
final boolean includeMvccVersion) {
// keep pushing the limit out. assume enough capacity
bb.limit(bb.position() + kv.getLength());
bb.put(kv.getBuffer(), kv.getOffset(), kv.getLength());
if (includeMvccVersion) {
int numMvccVersionBytes = WritableUtils.getVIntSize(kv.getMvccVersion());
ByteBufferUtils.extendLimit(bb, numMvccVersionBytes);
ByteBufferUtils.writeVLong(bb, kv.getMvccVersion());
}
}
示例6: init
import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
/**
* Open the file and write its header.
*/
private void init() throws IOException {
FileSystem fs = this.path.getFileSystem(conf);
FSDataOutputStream fsOut = fs.create(this.path);
this.countingOut = new CountingOutputStream(
new BufferedOutputStream(fsOut));
this.out = new DataOutputStream(this.countingOut);
// put any necessary config strings into the header.
MetaBlock m = this.header.getMetaBlock();
if (isCharData) {
m.put(MetaBlock.ENTRY_ENCODING_KEY, MetaBlock.CLOB_ENCODING);
} else {
m.put(MetaBlock.ENTRY_ENCODING_KEY, MetaBlock.BLOB_ENCODING);
}
if (null != codec) {
m.put(MetaBlock.COMPRESSION_CODEC_KEY, this.codecName);
}
// Serialize the value of maxEntriesPerSegment as a VInt in a byte array
// and put that into the metablock as ENTRIES_PER_SEGMENT_KEY.
int segmentBufLen = WritableUtils.getVIntSize(this.maxEntriesPerSegment);
DataOutputBuffer entriesPerSegBuf = new DataOutputBuffer(segmentBufLen);
WritableUtils.writeVInt(entriesPerSegBuf, this.maxEntriesPerSegment);
byte [] entriesPerSegArray =
Arrays.copyOf(entriesPerSegBuf.getData(), segmentBufLen);
m.put(MetaBlock.ENTRIES_PER_SEGMENT_KEY,
new BytesWritable(entriesPerSegArray));
// Write the file header to the file.
this.header.write(out);
// Now we're ready to accept record data from the user.
}
示例7: append
import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
public void append(K key, V value) throws IOException {
if (key.getClass() != keyClass)
throw new IOException("wrong key class: "+ key.getClass()
+" is not "+ keyClass);
if (value.getClass() != valueClass)
throw new IOException("wrong value class: "+ value.getClass()
+" is not "+ valueClass);
// Append the 'key'
keySerializer.serialize(key);
int keyLength = buffer.getLength();
if (keyLength < 0) {
throw new IOException("Negative key-length not allowed: " + keyLength +
" for " + key);
}
// Append the 'value'
valueSerializer.serialize(value);
int valueLength = buffer.getLength() - keyLength;
if (valueLength < 0) {
throw new IOException("Negative value-length not allowed: " +
valueLength + " for " + value);
}
// Write the record out
WritableUtils.writeVInt(out, keyLength); // key length
WritableUtils.writeVInt(out, valueLength); // value length
out.write(buffer.getData(), 0, buffer.getLength()); // data
// Reset
buffer.reset();
// Update bytes written
decompressedBytesWritten += keyLength + valueLength +
WritableUtils.getVIntSize(keyLength) +
WritableUtils.getVIntSize(valueLength);
++numRecordsWritten;
}
示例8: write
import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
@Override
public void write(Cell c) throws IOException {
KeyValue kv = KeyValueUtil.ensureKeyValue(c);
expectState(State.WRITING);
this.dataBlockEncoder.encode(kv, dataBlockEncodingCtx, this.userDataStream);
this.unencodedDataSizeWritten += kv.getLength();
if (dataBlockEncodingCtx.getHFileContext().isIncludesMvcc()) {
this.unencodedDataSizeWritten += WritableUtils.getVIntSize(kv.getMvccVersion());
}
}
示例9: positionToNextRecord
import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
protected boolean positionToNextRecord(DataInput dIn) throws IOException {
// Sanity check
if (eof) {
throw new EOFException("Completed reading " + bytesRead);
}
// Read key and value lengths
currentKeyLength = WritableUtils.readVInt(dIn);
currentValueLength = WritableUtils.readVInt(dIn);
bytesRead += WritableUtils.getVIntSize(currentKeyLength) +
WritableUtils.getVIntSize(currentValueLength);
// Check for EOF
if (currentKeyLength == EOF_MARKER && currentValueLength == EOF_MARKER) {
eof = true;
return false;
}
// Sanity check
if (currentKeyLength < 0) {
throw new IOException("Rec# " + recNo + ": Negative key-length: " +
currentKeyLength);
}
if (currentValueLength < 0) {
throw new IOException("Rec# " + recNo + ": Negative value-length: " +
currentValueLength);
}
return true;
}
示例10: reserveSpace
import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
boolean reserveSpace(DataInputBuffer key, DataInputBuffer value)
throws IOException {
int keyLength = key.getLength() - key.getPosition();
int valueLength = value.getLength() - value.getPosition();
int requestedSize = keyLength + valueLength +
WritableUtils.getVIntSize(keyLength) +
WritableUtils.getVIntSize(valueLength);
return reserveSpace(requestedSize);
}
示例11: write
import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
/**
* Write the key and value to the cache in the IFile format
* @param key
* @param value
* @throws IOException
*/
public void write(DataInputBuffer key, DataInputBuffer value)
throws IOException {
int keyLength = key.getLength() - key.getPosition();
int valueLength = value.getLength() - value.getPosition();
WritableUtils.writeVInt(dataOut, keyLength);
WritableUtils.writeVInt(dataOut, valueLength);
dataOut.write(key.getData(), key.getPosition(), keyLength);
dataOut.write(value.getData(), value.getPosition(), valueLength);
usedSize += keyLength + valueLength +
WritableUtils.getVIntSize(keyLength) +
WritableUtils.getVIntSize(valueLength);
LOG.debug("ID: " + segmentList.size() + " WRITE TO MEM");
}
示例12: vintToBytes
import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
/**
* @param vint Integer to make a vint of.
* @return Vint as bytes array.
*/
public static byte [] vintToBytes(final long vint) {
long i = vint;
int size = WritableUtils.getVIntSize(i);
byte [] result = new byte[size];
int offset = 0;
if (i >= -112 && i <= 127) {
result[offset] = (byte) i;
return result;
}
int len = -112;
if (i < 0) {
i ^= -1L; // take one's complement'
len = -120;
}
long tmp = i;
while (tmp != 0) {
tmp = tmp >> 8;
len--;
}
result[offset++] = (byte) len;
len = (len < -120) ? -(len + 120) : -(len + 112);
for (int idx = len; idx != 0; idx--) {
int shiftbits = (idx - 1) * 8;
long mask = 0xFFL << shiftbits;
result[offset++] = (byte)((i & mask) >> shiftbits);
}
return result;
}
示例13: addAfterRowFamilyQualifier
import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
/***************** internal add methods ************************/
private void addAfterRowFamilyQualifier(Cell cell){
// timestamps
timestamps[totalCells] = cell.getTimestamp();
timestampEncoder.add(cell.getTimestamp());
// memstore timestamps
if (includeMvccVersion) {
mvccVersions[totalCells] = cell.getMvccVersion();
mvccVersionEncoder.add(cell.getMvccVersion());
totalUnencodedBytes += WritableUtils.getVIntSize(cell.getMvccVersion());
}else{
//must overwrite in case there was a previous version in this array slot
mvccVersions[totalCells] = 0L;
if(totalCells == 0){//only need to do this for the first cell added
mvccVersionEncoder.add(0L);
}
//totalUncompressedBytes += 0;//mvccVersion takes zero bytes when disabled
}
// types
typeBytes[totalCells] = cell.getTypeByte();
cellTypeEncoder.add(cell.getTypeByte());
// values
totalValueBytes += cell.getValueLength();
// double the array each time we run out of space
values = ArrayUtils.growIfNecessary(values, totalValueBytes, 2 * totalValueBytes);
CellUtil.copyValueTo(cell, values, valueOffsets[totalCells]);
if (cell.getValueLength() > maxValueLength) {
maxValueLength = cell.getValueLength();
}
valueOffsets[totalCells + 1] = totalValueBytes;
// general
totalUnencodedBytes += KeyValueUtil.length(cell);
++totalCells;
}
示例14: compare
import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
int n1 = WritableUtils.decodeVIntSize(b1[s1]);
int n2 = WritableUtils.decodeVIntSize(b2[s2]);
n1 -= WritableUtils.getVIntSize(n1);
n2 -= WritableUtils.getVIntSize(n2);
return compareBytes(b1, s1+n1, l1-n1, b2, s2+n2, l2-n2);
}
示例15: checkSpec
import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
static void checkSpec(GridmixKey a, GridmixKey b) throws Exception {
final Random r = new Random();
final long s = r.nextLong();
r.setSeed(s);
LOG.info("spec: " + s);
final DataInputBuffer in = new DataInputBuffer();
final DataOutputBuffer out = new DataOutputBuffer();
a.setType(GridmixKey.REDUCE_SPEC);
b.setType(GridmixKey.REDUCE_SPEC);
for (int i = 0; i < 100; ++i) {
final int in_rec = r.nextInt(Integer.MAX_VALUE);
a.setReduceInputRecords(in_rec);
final int out_rec = r.nextInt(Integer.MAX_VALUE);
a.setReduceOutputRecords(out_rec);
final int out_bytes = r.nextInt(Integer.MAX_VALUE);
a.setReduceOutputBytes(out_bytes);
final int min = WritableUtils.getVIntSize(in_rec)
+ WritableUtils.getVIntSize(out_rec)
+ WritableUtils.getVIntSize(out_bytes)
+ WritableUtils.getVIntSize(0);
assertEquals(min + 2, a.fixedBytes()); // meta + vint min
final int size = r.nextInt(1024) + a.fixedBytes() + 1;
setSerialize(a, r.nextLong(), size, out);
assertEquals(size, out.getLength());
assertTrue(a.equals(a));
assertEquals(0, a.compareTo(a));
in.reset(out.getData(), 0, out.getLength());
b.readFields(in);
assertEquals(size, b.getSize());
assertEquals(in_rec, b.getReduceInputRecords());
assertEquals(out_rec, b.getReduceOutputRecords());
assertEquals(out_bytes, b.getReduceOutputBytes());
assertTrue(a.equals(b));
assertEquals(0, a.compareTo(b));
assertEquals(a.hashCode(), b.hashCode());
}
}