当前位置: 首页>>代码示例>>Java>>正文


Java WritableUtils.readVLong方法代码示例

本文整理汇总了Java中org.apache.hadoop.io.WritableUtils.readVLong方法的典型用法代码示例。如果您正苦于以下问题:Java WritableUtils.readVLong方法的具体用法?Java WritableUtils.readVLong怎么用?Java WritableUtils.readVLong使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.io.WritableUtils的用法示例。


在下文中一共展示了WritableUtils.readVLong方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: readFields

import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
public void readFields(DataInput in) throws IOException {
  // After the RecordStartMark, we expect to get a SEGMENT_HEADER_ID (-1).
  long segmentId = WritableUtils.readVLong(in);
  if (SEGMENT_HEADER_ID != segmentId) {
    throw new IOException("Expected segment header id " + SEGMENT_HEADER_ID
        + "; got " + segmentId);
  }

  // Get the length of the rest of the segment, in bytes.
  long length = WritableUtils.readVLong(in);

  // Now read the actual main byte array.
  if (length > Integer.MAX_VALUE) {
    throw new IOException("Unexpected oversize data array length: "
        + length);
  } else if (length < 0) {
    throw new IOException("Unexpected undersize data array length: "
        + length);
  }
  byte [] segmentData = new byte[(int) length];
  in.readFully(segmentData);
  recordLenBytes = new BytesWritable(segmentData);

  reset(); // Reset the iterator allowing the user to yield offset/lengths.
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:26,代码来源:LobFile.java

示例2: readFields

import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
@Override
public void readFields(DataInput in) throws IOException {
  id = WritableUtils.readVInt(in);
  sleepDuration = WritableUtils.readVLong(in);
  nMaps = WritableUtils.readVInt(in);
  nSpec = WritableUtils.readVInt(in);
  if (reduceDurations.length < nSpec) {
    reduceDurations = new long[nSpec];
  }
  for (int i = 0; i < nSpec; ++i) {
    reduceDurations[i] = WritableUtils.readVLong(in);
  }
  final int nLoc = WritableUtils.readVInt(in);
  if (nLoc != locations.length) {
    locations = new String[nLoc];
  }
  for (int i = 0; i < nLoc; ++i) {
    locations[i] = Text.readString(in);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:SleepJob.java

示例3: readCompactBlockArray

import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
public static Block[] readCompactBlockArray(
    DataInput in, int logVersion) throws IOException {
  int num = WritableUtils.readVInt(in);
  if (num < 0) {
    throw new IOException("Invalid block array length: " + num);
  }
  Block prev = null;
  Block[] ret = new Block[num];
  for (int i = 0; i < num; i++) {
    long id = in.readLong();
    long sz = WritableUtils.readVLong(in) +
        ((prev != null) ? prev.getNumBytes() : 0);
    long gs = WritableUtils.readVLong(in) +
        ((prev != null) ? prev.getGenerationStamp() : 0);
    ret[i] = new Block(id, sz, gs);
    prev = ret[i];
  }
  return ret;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:FSImageSerialization.java

示例4: afterDecodingKeyValue

import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
protected final void afterDecodingKeyValue(DataInputStream source,
    ByteBuffer dest, HFileBlockDefaultDecodingContext decodingCtx) throws IOException {
  if (decodingCtx.getHFileContext().isIncludesTags()) {
    int tagsLength = ByteBufferUtils.readCompressedInt(source);
    // Put as unsigned short
    dest.put((byte) ((tagsLength >> 8) & 0xff));
    dest.put((byte) (tagsLength & 0xff));
    if (tagsLength > 0) {
      TagCompressionContext tagCompressionContext = decodingCtx.getTagCompressionContext();
      // When tag compression is been used in this file, tagCompressionContext will have a not
      // null value passed.
      if (tagCompressionContext != null) {
        tagCompressionContext.uncompressTags(source, dest, tagsLength);
      } else {
        ByteBufferUtils.copyFromStreamToBuffer(dest, source, tagsLength);
      }
    }
  }
  if (decodingCtx.getHFileContext().isIncludesMvcc()) {
    long memstoreTS = -1;
    try {
      // Copy memstore timestamp from the data input stream to the byte
      // buffer.
      memstoreTS = WritableUtils.readVLong(source);
      ByteBufferUtils.writeVLong(dest, memstoreTS);
    } catch (IOException ex) {
      throw new RuntimeException("Unable to copy memstore timestamp " +
          memstoreTS + " after decoding a key/value");
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:32,代码来源:BufferedDataBlockEncoder.java

示例5: readFields

import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
/**
 */
@Override
public void readFields(DataInput in) throws IOException {
  keyId = WritableUtils.readVInt(in);
  expiryDate = WritableUtils.readVLong(in);
  int len = WritableUtils.readVIntInRange(in, -1, MAX_KEY_LEN);
  if (len == -1) {
    keyBytes = null;
  } else {
    keyBytes = new byte[len];
    in.readFully(keyBytes);
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:15,代码来源:DelegationKey.java

示例6: next

import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
/**
 * Aligns the iteration capability to return info about the next
 * record in the IndexSegment. Must be called before the first
 * record.
 * @return true if there is another record described in this IndexSegment.
 */
public boolean next() {
  this.prevOffset = this.curOffset;
  if (null == dataInputBuf) {
    // We need to set up the iterator; this is the first use.
    if (null == recordLenBytes) {
      return false; // We don't have any records?
    }

    this.dataInputBuf = new DataInputBuffer();
    this.dataInputBuf.reset(recordLenBytes.getBytes(),
        0, recordLenBytes.getLength());

    this.curOffset = this.tableEntry.getFirstIndexOffset();
    this.prevOffset = 0;
  } else {
    this.curOffset += this.curLen;
  }

  boolean available = dataInputBuf.getPosition() < dataInputBuf.getLength();
  if (available) {
    this.prevInputBufPos = dataInputBuf.getPosition();
    // Then read out the next record length.
    try {
      this.prevLen = this.curLen;
      this.curLen = WritableUtils.readVLong(dataInputBuf);
    } catch (IOException ioe) {
      // Shouldn't happen; data in DataInputBuffer is materialized.
      throw new RuntimeException(ioe);
    }
  }

  return available;
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:40,代码来源:LobFile.java

示例7: readFields

import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
/**
 * Reads the values of each field.
 *
 * @param in  The input to read from.
 * @throws IOException When reading the input fails.
 */
@Override
public void readFields(DataInput in) throws IOException {
  Version version = Version.UNVERSIONED;
  // TableSplit was not versioned in the beginning.
  // In order to introduce it now, we make use of the fact
  // that tableName was written with Bytes.writeByteArray,
  // which encodes the array length as a vint which is >= 0.
  // Hence if the vint is >= 0 we have an old version and the vint
  // encodes the length of tableName.
  // If < 0 we just read the version and the next vint is the length.
  // @see Bytes#readByteArray(DataInput)
  int len = WritableUtils.readVInt(in);
  if (len < 0) {
    // what we just read was the version
    version = Version.fromCode(len);
    len = WritableUtils.readVInt(in);
  }
  byte[] tableNameBytes = new byte[len];
  in.readFully(tableNameBytes);
  tableName = TableName.valueOf(tableNameBytes);
  startRow = Bytes.readByteArray(in);
  endRow = Bytes.readByteArray(in);
  regionLocation = Bytes.toString(Bytes.readByteArray(in));
  if (version.atLeast(Version.INITIAL)) {
    scan = Bytes.toString(Bytes.readByteArray(in));
  }
  length = WritableUtils.readVLong(in);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:35,代码来源:TableSplit.java

示例8: readFields

import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
@Override
public void readFields(DataInput in) throws IOException {
  rec_in = WritableUtils.readVLong(in);
  rec_out = WritableUtils.readVLong(in);
  bytes_out = WritableUtils.readVLong(in);
  sizeOfResourceUsageMetrics =  WritableUtils.readVInt(in);
  if (sizeOfResourceUsageMetrics > 0) {
    metrics = new ResourceUsageMetrics();
    metrics.readFields(in);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:GridmixKey.java

示例9: readFields

import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
@Override
public synchronized void readFields(DataInput in) throws IOException {
  name = StringInterner.weakIntern(Text.readString(in));
  displayName = in.readBoolean() ? 
      StringInterner.weakIntern(Text.readString(in)) : name;
  value = WritableUtils.readVLong(in);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:GenericCounter.java

示例10: readFields

import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
public void readFields(DataInput in) throws IOException {
  int len = WritableUtils.readVInt(in);
  locations = new String[len];
  for (int i = 0; i < locations.length; i++) {
    locations[i] = Text.readString(in);
  }
  startOffset = WritableUtils.readVLong(in);
  inputDataLength = WritableUtils.readVLong(in);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:JobSplit.java

示例11: isTokenExpired

import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
/**
 * check if a token is expired. for unit test only. return true when token is
 * expired, false otherwise
 */
static boolean isTokenExpired(Token<BlockTokenIdentifier> token)
    throws IOException {
  ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
  DataInputStream in = new DataInputStream(buf);
  long expiryDate = WritableUtils.readVLong(in);
  return isExpired(expiryDate);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:BlockTokenSecretManager.java

示例12: openIndex

import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
/**
 * Get the first index segment out of the file; determine
 * where that is by loading the index locator at the end of
 * the file.
 */
private void openIndex() throws IOException {
  // Jump to the end of the file.
  // At the end of the file is a RSM followed by two VLongs;
  // the first of these is the value -2 (one byte) and the
  // second of these is the offset of the beginning of the index (up to
  // 9 bytes).
  internalSeek(fileLen - RecordStartMark.START_MARK_LENGTH - 10);

  byte [] finaleBuffer = new byte[RecordStartMark.START_MARK_LENGTH + 10];
  this.dataIn.readFully(finaleBuffer);

  // Figure out where in the finaleBuffer the RSM actually starts,
  // as the finale might not fully fill the finaleBuffer.
  int rsmStart = findRecordStartMark(finaleBuffer);
  if (-1 == rsmStart) {
    throw new IOException(
        "Corrupt file index; could not find index start offset.");
  }

  // Wrap a buffer around those two vlongs.
  int vlongStart = rsmStart + RecordStartMark.START_MARK_LENGTH;
  DataInputBuffer inBuf = new DataInputBuffer();
  inBuf.reset(finaleBuffer, vlongStart, finaleBuffer.length - vlongStart);

  long offsetMarker = WritableUtils.readVLong(inBuf);
  if (SEGMENT_OFFSET_ID != offsetMarker) {
    // This isn't the correct signature; we got an RSM ahead of some
    // other data.
    throw new IOException("Invalid segment offset id: " + offsetMarker);
  }

  // This will contain the position of the IndexTable.
  long indexTableStart = WritableUtils.readVLong(inBuf);
  LOG.debug("IndexTable begins at " + indexTableStart);

  readIndexTable(indexTableStart);

  // Set up to read records from the beginning of the file. This
  // starts with the first IndexSegment.
  curIndexSegmentId = 0;
  loadIndexSegment();

  // This has moved the file pointer all over but we don't need to
  // worry about resetting it now. The next() method will seek the
  // file pointer to the first record when the user is ready to
  // consume it.
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:53,代码来源:LobFile.java

示例13: readFields

import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
@Override
public void readFields(DataInput dataInput) throws IOException {
  rk = WritableUtils.readVLong(dataInput);
  next = WritableUtils.readVLong(dataInput);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:6,代码来源:IntegrationTestBulkLoad.java

示例14: readFields

import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
public void readFields(DataInput in) throws IOException {
  file = new Path(WritableUtils.readString(in));
  len = WritableUtils.readVLong(in);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:5,代码来源:GenericMRLoadGenerator.java

示例15: readFields

import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
public void readFields(DataInput in) throws IOException {
  firstRow = WritableUtils.readVLong(in);
  rowCount = WritableUtils.readVLong(in);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:5,代码来源:TeraGen.java


注:本文中的org.apache.hadoop.io.WritableUtils.readVLong方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。