当前位置: 首页>>代码示例>>Java>>正文


Java WritableUtils.writeVLong方法代码示例

本文整理汇总了Java中org.apache.hadoop.io.WritableUtils.writeVLong方法的典型用法代码示例。如果您正苦于以下问题:Java WritableUtils.writeVLong方法的具体用法?Java WritableUtils.writeVLong怎么用?Java WritableUtils.writeVLong使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.io.WritableUtils的用法示例。


在下文中一共展示了WritableUtils.writeVLong方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: encode

import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
@Override
public int encode(Cell cell, HFileBlockEncodingContext encodingCtx, DataOutputStream out)
    throws IOException {
  int klength = KeyValueUtil.keyLength(cell);
  int vlength = cell.getValueLength();

  out.writeInt(klength);
  out.writeInt(vlength);
  CellUtil.writeFlatKey(cell, out);
  out.write(cell.getValueArray(), cell.getValueOffset(), vlength);
  int encodedKvSize = klength + vlength + KeyValue.KEYVALUE_INFRASTRUCTURE_SIZE;
  // Write the additional tag into the stream
  if (encodingCtx.getHFileContext().isIncludesTags()) {
    int tagsLength = cell.getTagsLength();
    out.writeShort(tagsLength);
    if (tagsLength > 0) {
      out.write(cell.getTagsArray(), cell.getTagsOffset(), tagsLength);
    }
    encodedKvSize += tagsLength + KeyValue.TAGS_LENGTH_SIZE;
  }
  if (encodingCtx.getHFileContext().isIncludesMvcc()) {
    WritableUtils.writeVLong(out, cell.getSequenceId());
    encodedKvSize += WritableUtils.getVIntSize(cell.getSequenceId());
  }
  return encodedKvSize;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:NoOpDataBlockEncoder.java

示例2: internalEncode

import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
@Override
public int internalEncode(Cell cell, HFileBlockDefaultEncodingContext encodingContext,
    DataOutputStream out) throws IOException {
  int klength = KeyValueUtil.keyLength(cell);
  int vlength = cell.getValueLength();

  out.writeInt(klength);
  out.writeInt(vlength);
  CellUtil.writeFlatKey(cell, out);
  out.write(cell.getValueArray(), cell.getValueOffset(), vlength);
  int size = klength + vlength + KeyValue.KEYVALUE_INFRASTRUCTURE_SIZE;
  // Write the additional tag into the stream
  if (encodingContext.getHFileContext().isIncludesTags()) {
    int tagsLength = cell.getTagsLength();
    out.writeShort(tagsLength);
    if (tagsLength > 0) {
      out.write(cell.getTagsArray(), cell.getTagsOffset(), tagsLength);
    }
    size += tagsLength + KeyValue.TAGS_LENGTH_SIZE;
  }
  if (encodingContext.getHFileContext().isIncludesMvcc()) {
    WritableUtils.writeVLong(out, cell.getSequenceId());
    size += WritableUtils.getVIntSize(cell.getSequenceId());
  }
  return size;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:CopyKeyDataBlockEncoder.java

示例3: writeCompactBlockArray

import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
/**
 * Write an array of blocks as compactly as possible. This uses
 * delta-encoding for the generation stamp and size, following
 * the principle that genstamp increases relatively slowly,
 * and size is equal for all but the last block of a file.
 */
public static void writeCompactBlockArray(
    Block[] blocks, DataOutputStream out) throws IOException {
  WritableUtils.writeVInt(out, blocks.length);
  Block prev = null;
  for (Block b : blocks) {
    long szDelta = b.getNumBytes() -
        (prev != null ? prev.getNumBytes() : 0);
    long gsDelta = b.getGenerationStamp() -
        (prev != null ? prev.getGenerationStamp() : 0);
    out.writeLong(b.getBlockId()); // blockid is random
    WritableUtils.writeVLong(out, szDelta);
    WritableUtils.writeVLong(out, gsDelta);
    prev = b;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:FSImageSerialization.java

示例4: writeImpl

import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
@VisibleForTesting
void writeImpl(DataOutput out) throws IOException {
  out.writeByte(VERSION);
  owner.write(out);
  renewer.write(out);
  realUser.write(out);
  WritableUtils.writeVLong(out, issueDate);
  WritableUtils.writeVLong(out, maxDate);
  WritableUtils.writeVInt(out, sequenceNumber);
  WritableUtils.writeVInt(out, masterKeyId);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:AbstractDelegationTokenIdentifier.java

示例5: write

import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
/**
 */
@Override
public void write(DataOutput out) throws IOException {
  WritableUtils.writeVInt(out, keyId);
  WritableUtils.writeVLong(out, expiryDate);
  if (keyBytes == null) {
    WritableUtils.writeVInt(out, -1);
  } else {
    WritableUtils.writeVInt(out, keyBytes.length);
    out.write(keyBytes);
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:14,代码来源:DelegationKey.java

示例6: write

import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
public void write(DataOutput out) throws IOException {
  // Write the SEGMENT_HEADER_ID to distinguish this from a LobRecord.
  WritableUtils.writeVLong(out, SEGMENT_HEADER_ID);

  // The length of the main body of the segment is the length of the
  // data byte array.
  int segmentBytesLen = recordLenBytes.getLength();
  WritableUtils.writeVLong(out, segmentBytesLen);

  // Write the body of the segment.
  out.write(recordLenBytes.getBytes(), 0, segmentBytesLen);
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:13,代码来源:LobFile.java

示例7: writeBlobRecord

import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
@Override
/**
 * {@inheritDoc}
 */
public OutputStream writeBlobRecord(long claimedLen) throws IOException {
  finishRecord(); // finish any previous record.
  checkForNull(this.out);
  startRecordIndex();
  this.header.getStartMark().write(out);
  LOG.debug("Starting new record; id=" + curEntryId
      + "; claimedLen=" + claimedLen);
  WritableUtils.writeVLong(out, curEntryId);
  WritableUtils.writeVLong(out, claimedLen);
  this.curClaimedLen = claimedLen;
  this.userCountingOutputStream = new CountingOutputStream(
      new CloseShieldOutputStream(out));
  if (null == this.codec) {
    // No codec; pass thru the same OutputStream to the user.
    this.userOutputStream = this.userCountingOutputStream;
  } else {
    // Wrap our CountingOutputStream in a compressing OutputStream to
    // give to the user.
    this.compressor.reset();
    this.userOutputStream = new CompressorStream(
        this.userCountingOutputStream, compressor);
  }

  return this.userOutputStream;
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:30,代码来源:LobFile.java

示例8: afterEncodingKeyValue

import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
/**
 * @param cell
 * @param out
 * @param encodingCtx
 * @return unencoded size added
 * @throws IOException
 */
protected final int afterEncodingKeyValue(Cell cell, DataOutputStream out,
    HFileBlockDefaultEncodingContext encodingCtx) throws IOException {
  int size = 0;
  if (encodingCtx.getHFileContext().isIncludesTags()) {
    int tagsLength = cell.getTagsLength();
    ByteBufferUtils.putCompressedInt(out, tagsLength);
    // There are some tags to be written
    if (tagsLength > 0) {
      TagCompressionContext tagCompressionContext = encodingCtx.getTagCompressionContext();
      // When tag compression is enabled, tagCompressionContext will have a not null value. Write
      // the tags using Dictionary compression in such a case
      if (tagCompressionContext != null) {
        tagCompressionContext
            .compressTags(out, cell.getTagsArray(), cell.getTagsOffset(), tagsLength);
      } else {
        out.write(cell.getTagsArray(), cell.getTagsOffset(), tagsLength);
      }
    }
    size += tagsLength + KeyValue.TAGS_LENGTH_SIZE;
  }
  if (encodingCtx.getHFileContext().isIncludesMvcc()) {
    // Copy memstore timestamp from the byte buffer to the output stream.
    long memstoreTS = cell.getSequenceId();
    WritableUtils.writeVLong(out, memstoreTS);
    // TODO use a writeVLong which returns the #bytes written so that 2 time parsing can be
    // avoided.
    size += WritableUtils.getVIntSize(memstoreTS);
  }
  return size;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:38,代码来源:BufferedDataBlockEncoder.java

示例9: testConsistencyWithHadoopVLong

import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
@Test
public void testConsistencyWithHadoopVLong() throws IOException {
  ByteArrayOutputStream baos = new ByteArrayOutputStream();
  DataOutputStream dos = new DataOutputStream(baos);
  for (long l : testNumbers) {
    baos.reset();
    ByteBuffer b = ByteBuffer.allocate(MAX_VLONG_LENGTH);
    ByteBufferUtils.writeVLong(b, l);
    String bufStr = Bytes.toStringBinary(b.array(),
        b.arrayOffset(), b.position());
    WritableUtils.writeVLong(dos, l);
    String baosStr = Bytes.toStringBinary(baos.toByteArray());
    assertEquals(baosStr, bufStr);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:16,代码来源:TestByteBufferUtils.java

示例10: write

import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
@Override
public void write(DataOutput out) throws IOException {
  WritableUtils.writeVLong(out, expiryDate);
  WritableUtils.writeVInt(out, keyId);
  WritableUtils.writeString(out, userId);
  WritableUtils.writeString(out, blockPoolId);
  WritableUtils.writeVLong(out, blockId);
  WritableUtils.writeVInt(out, modes.size());
  for (AccessMode aMode : modes) {
    WritableUtils.writeEnum(out, aMode);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:BlockTokenIdentifier.java

示例11: write

import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
public void write(DataOutput out) throws IOException {
  WritableUtils.writeVInt(out, locations.length);
  for (int i = 0; i < locations.length; i++) {
    Text.writeString(out, locations[i]);
  }
  WritableUtils.writeVLong(out, startOffset);
  WritableUtils.writeVLong(out, inputDataLength);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:JobSplit.java

示例12: write

import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
@Override
public void write(DataOutput out) throws IOException {
  //TODO Write resources version no too
  WritableUtils.writeVLong(out, cumulativeCpuUsage);  // long #1
  WritableUtils.writeVLong(out, cumulativeGpuUsage);
  WritableUtils.writeVLong(out, virtualMemoryUsage);  // long #2
  WritableUtils.writeVLong(out, physicalMemoryUsage); // long #3
  WritableUtils.writeVLong(out, heapUsage);           // long #4
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:ResourceUsageMetrics.java

示例13: write

import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
public void write(DataOutput out) throws IOException {
  WritableUtils.writeString(out, file.toString());
  WritableUtils.writeVLong(out, len);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:5,代码来源:GenericMRLoadGenerator.java

示例14: write

import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
public void write(DataOutput out) throws IOException {
  WritableUtils.writeVLong(out, firstRow);
  WritableUtils.writeVLong(out, rowCount);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:5,代码来源:TeraGen.java

示例15: write

import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
@Override
public void write(DataOutput dataOutput) throws IOException {
  WritableUtils.writeVLong(dataOutput, chainId);
  WritableUtils.writeVLong(dataOutput, order);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:6,代码来源:IntegrationTestBulkLoad.java


注:本文中的org.apache.hadoop.io.WritableUtils.writeVLong方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。