本文整理汇总了Java中org.apache.hadoop.io.WritableUtils.readVInt方法的典型用法代码示例。如果您正苦于以下问题:Java WritableUtils.readVInt方法的具体用法?Java WritableUtils.readVInt怎么用?Java WritableUtils.readVInt使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.io.WritableUtils
的用法示例。
在下文中一共展示了WritableUtils.readVInt方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: readFields
import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
public void readFields(DataInput in) throws IOException {
char [] chars = new char[3];
for (int i = 0; i < 3; i++) {
chars[i] = (char) in.readByte();
}
// Check that these match what we expect. Throws IOE if not.
checkHeaderChars(chars);
this.version = WritableUtils.readVInt(in);
if (this.version != LATEST_LOB_VERSION) {
// Right now we only have one version we can handle.
throw new IOException("Unexpected LobFile version " + this.version);
}
this.startMark = new RecordStartMark(in);
this.metaBlock = new MetaBlock(in);
}
示例2: readCompressed
import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
/**
* Reads the next compressed entry and returns it as a byte array
*
* @param in the DataInput to read from
* @param dict the dictionary we use for our read.
* @return the uncompressed array.
*/
@Deprecated
static byte[] readCompressed(DataInput in, Dictionary dict)
throws IOException {
byte status = in.readByte();
if (status == Dictionary.NOT_IN_DICTIONARY) {
int length = WritableUtils.readVInt(in);
// if this isn't in the dictionary, we need to add to the dictionary.
byte[] arr = new byte[length];
in.readFully(arr);
if (dict != null) dict.addEntry(arr, 0, length);
return arr;
} else {
// Status here is the higher-order byte of index of the dictionary entry
// (when its not Dictionary.NOT_IN_DICTIONARY -- dictionary indices are
// shorts).
short dictIdx = toShort(status, in.readByte());
byte[] entry = dict.getEntry(dictIdx);
if (entry == null) {
throw new IOException("Missing dictionary entry for index "
+ dictIdx);
}
return entry;
}
}
示例3: compare
import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
@Override
public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
try {
di.reset(b1, s1, l1);
final int x1 = WritableUtils.readVInt(di);
di.reset(b2, s2, l2);
final int x2 = WritableUtils.readVInt(di);
final int ret = (b1[s1 + x1] != b2[s2 + x2])
? b1[s1 + x1] - b2[s2 + x2]
: super.compare(b1, s1, x1, b2, s2, x2);
di.reset(reset, 0, 0);
return ret;
} catch (IOException e) {
throw new RuntimeException(e);
}
}
示例4: readCompactBlockArray
import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
public static Block[] readCompactBlockArray(
DataInput in, int logVersion) throws IOException {
int num = WritableUtils.readVInt(in);
if (num < 0) {
throw new IOException("Invalid block array length: " + num);
}
Block prev = null;
Block[] ret = new Block[num];
for (int i = 0; i < num; i++) {
long id = in.readLong();
long sz = WritableUtils.readVLong(in) +
((prev != null) ? prev.getNumBytes() : 0);
long gs = WritableUtils.readVLong(in) +
((prev != null) ? prev.getGenerationStamp() : 0);
ret[i] = new Block(id, sz, gs);
prev = ret[i];
}
return ret;
}
示例5: readObject
import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
protected void readObject(Writable obj, DataInputStream inStream) throws IOException {
int numBytes = WritableUtils.readVInt(inStream);
byte[] buffer;
// For BytesWritable and Text, use the specified length to set the length
// this causes the "obvious" translations to work. So that if you emit
// a string "abc" from C++, it shows up as "abc".
if (obj instanceof BytesWritable) {
buffer = new byte[numBytes];
inStream.readFully(buffer);
((BytesWritable) obj).set(buffer, 0, numBytes);
} else if (obj instanceof Text) {
buffer = new byte[numBytes];
inStream.readFully(buffer);
((Text) obj).set(buffer);
} else {
obj.readFields(inStream);
}
}
示例6: getKey
import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
/**
* Retrieve a field encryption key to use in <strong>decrypting</strong> the field.
* <p>
* Metadata can be read from the DataInput object. All meta-data that was written to the stream should be read out, regardless if it is used.
*
* @param visibility
* Visibility expression for the field.
* @param in
* Stream from which metadata is read.
* @return Field encryption key.
* @throws IOException
* Not actually thrown.
*/
private byte[] getKey(ColumnVisibility visibility, DataInput in) throws IOException {
if (config.encryptUsingVisibility) {
if (visibility.getParseTree().getType() != NodeType.EMPTY) {
// Rebuild the key from the shares created based on the visibility expression.
byte[] key = readVisibilityShare(visibility.getParseTree(), visibility.getExpression(), in, false);
if (key == null) {
throw new IllegalKeyRequestException();
}
return key;
} else {
return new byte[config.keyLength];
}
} else {
int version = WritableUtils.readVInt(in);
return keys.getKey(config.keyId, version, config.keyLength);
}
}
示例7: readFields
import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
/**
* read fields, not only index attributes are included
*
* @param in
* @throws IOException
*/
@Override public void readFields(DataInput in) throws IOException {
indexType = IndexType.valueOf(WritableUtils.readString(in));
if (IndexType.isUserDefinedIndex(indexType)) {
int size = WritableUtils.readVInt(in);
indexRelations = new ArrayList<>(size);
for (int i = 0; i < size; ++i) {
IndexRelationship relationship = new IndexRelationship();
relationship.readFields(in);
indexRelations.add(relationship);
}
} else {
indexFamilyMap = readTreeMap(in);
familyMap = readTreeMap(in);
}
}
示例8: readFields
import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
/**
* {@inheritDoc}
* @throws IOException If the child InputSplit cannot be read, typically
* for faliing access checks.
*/
@SuppressWarnings("unchecked") // Generic array assignment
public void readFields(DataInput in) throws IOException {
int card = WritableUtils.readVInt(in);
if (splits == null || splits.length != card) {
splits = new InputSplit[card];
}
Class<? extends InputSplit>[] cls = new Class[card];
try {
for (int i = 0; i < card; ++i) {
cls[i] =
Class.forName(Text.readString(in)).asSubclass(InputSplit.class);
}
for (int i = 0; i < card; ++i) {
splits[i] = ReflectionUtils.newInstance(cls[i], null);
splits[i].readFields(in);
}
} catch (ClassNotFoundException e) {
throw (IOException)new IOException("Failed split init").initCause(e);
}
}
示例9: readFields
import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
/**
* Reads the values of each field.
*
* @param in The input to read from.
* @throws IOException When reading the input fails.
*/
@Override
public void readFields(DataInput in) throws IOException {
Version version = Version.UNVERSIONED;
// TableSplit was not versioned in the beginning.
// In order to introduce it now, we make use of the fact
// that tableName was written with Bytes.writeByteArray,
// which encodes the array length as a vint which is >= 0.
// Hence if the vint is >= 0 we have an old version and the vint
// encodes the length of tableName.
// If < 0 we just read the version and the next vint is the length.
// @see Bytes#readByteArray(DataInput)
int len = WritableUtils.readVInt(in);
if (len < 0) {
// what we just read was the version
version = Version.fromCode(len);
len = WritableUtils.readVInt(in);
}
byte[] tableNameBytes = new byte[len];
in.readFully(tableNameBytes);
tableName = TableName.valueOf(tableNameBytes);
startRow = Bytes.readByteArray(in);
endRow = Bytes.readByteArray(in);
regionLocation = Bytes.toString(Bytes.readByteArray(in));
if (version.atLeast(Version.INITIAL)) {
scan = Bytes.toString(Bytes.readByteArray(in));
}
length = WritableUtils.readVLong(in);
}
示例10: readFields
import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
public void readFields(DataInput in) throws IOException {
int len = WritableUtils.readVInt(in);
locations = new String[len];
for (int i = 0; i < locations.length; i++) {
locations[i] = Text.readString(in);
}
startOffset = WritableUtils.readVLong(in);
inputDataLength = WritableUtils.readVLong(in);
}
示例11: decrypt
import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
/**
* Decrypt the given entry.
*
* @param entry
* {@link MutableEntry} to encrypt.
* @param result
* {@link MutableEntry} to write result to.
* @param columnVisibility
* The parsed column visibility.
*
* @throws IOException
* Not actually thrown.
*/
void decrypt(MutableEntry entry, MutableEntry result, ColumnVisibility columnVisibility) throws IOException {
ByteArrayInputStream ciphertextStream = new ByteArrayInputStream(entry.getBytes(config.destination));
DataInput ciphertextIn = new DataInputStream(ciphertextStream);
byte[] key = getKey(columnVisibility, ciphertextIn);
byte[] ciphertext = new byte[ciphertextStream.available()];
ciphertextIn.readFully(ciphertext);
byte[] decryptedData = encryptor.decrypt(key, ciphertext);
// Break apart the decrypted data.
ByteArrayInputStream dataStream = new ByteArrayInputStream(decryptedData);
DataInput dataIn = new DataInputStream(dataStream);
for (EntryField source : config.sources) {
switch (source) {
case ROW:
case COLUMN_FAMILY:
case COLUMN_QUALIFIER:
case COLUMN_VISIBILITY:
case VALUE:
int length = WritableUtils.readVInt(dataIn);
byte[] bytes = new byte[length];
dataIn.readFully(bytes);
result.setBytes(source, bytes);
break;
// case TIMESTAMP:
// result.timestamp = WritableUtils.readVLong(dataIn);
// break;
// case DELETE:
// result.delete = dataIn.readBoolean();
// break;
default:
throw new UnsupportedOperationException();
}
}
}
示例12: readFields
import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
/**
*/
@Override
public void readFields(DataInput in) throws IOException {
keyId = WritableUtils.readVInt(in);
expiryDate = WritableUtils.readVLong(in);
int len = WritableUtils.readVIntInRange(in, -1, MAX_KEY_LEN);
if (len == -1) {
keyBytes = null;
} else {
keyBytes = new byte[len];
in.readFully(keyBytes);
}
}
示例13: positionToNextRecord
import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
protected boolean positionToNextRecord(DataInput dIn) throws IOException {
// Sanity check
if (eof) {
throw new EOFException("Completed reading " + bytesRead);
}
// Read key and value lengths
currentKeyLength = WritableUtils.readVInt(dIn);
currentValueLength = WritableUtils.readVInt(dIn);
bytesRead += WritableUtils.getVIntSize(currentKeyLength) +
WritableUtils.getVIntSize(currentValueLength);
// Check for EOF
if (currentKeyLength == EOF_MARKER && currentValueLength == EOF_MARKER) {
eof = true;
return false;
}
// Sanity check
if (currentKeyLength < 0) {
throw new IOException("Rec# " + recNo + ": Negative key-length: " +
currentKeyLength);
}
if (currentValueLength < 0) {
throw new IOException("Rec# " + recNo + ": Negative value-length: " +
currentValueLength);
}
return true;
}
示例14: readKV
import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
/**
* Uncompresses a KeyValue from a DataInput and returns it.
*
* @param in the DataInput
* @param readContext the compressionContext to use.
* @return an uncompressed KeyValue
* @throws IOException
*/
public static KeyValue readKV(DataInput in, CompressionContext readContext)
throws IOException {
int keylength = WritableUtils.readVInt(in);
int vlength = WritableUtils.readVInt(in);
int tagsLength = WritableUtils.readVInt(in);
int length = (int) KeyValue.getKeyValueDataStructureSize(keylength, vlength, tagsLength);
byte[] backingArray = new byte[length];
int pos = 0;
pos = Bytes.putInt(backingArray, pos, keylength);
pos = Bytes.putInt(backingArray, pos, vlength);
// the row
int elemLen = Compressor.uncompressIntoArray(backingArray,
pos + Bytes.SIZEOF_SHORT, in, readContext.rowDict);
checkLength(elemLen, Short.MAX_VALUE);
pos = Bytes.putShort(backingArray, pos, (short)elemLen);
pos += elemLen;
// family
elemLen = Compressor.uncompressIntoArray(backingArray,
pos + Bytes.SIZEOF_BYTE, in, readContext.familyDict);
checkLength(elemLen, Byte.MAX_VALUE);
pos = Bytes.putByte(backingArray, pos, (byte)elemLen);
pos += elemLen;
// qualifier
elemLen = Compressor.uncompressIntoArray(backingArray, pos, in,
readContext.qualifierDict);
pos += elemLen;
// the rest
in.readFully(backingArray, pos, length - pos);
return new KeyValue(backingArray, 0, length);
}
示例15: readFields
import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
@Override
public void readFields(DataInput in) throws IOException {
clear();
int len = WritableUtils.readVInt(in);
T[] enums = enumClass.getEnumConstants();
for (int i = 0; i < len; ++i) {
int ord = WritableUtils.readVInt(in);
Counter counter = newCounter(enums[ord]);
counter.setValue(WritableUtils.readVLong(in));
counters[ord] = counter;
}
}