当前位置: 首页>>代码示例>>Java>>正文


Java WritableUtils类代码示例

本文整理汇总了Java中org.apache.hadoop.io.WritableUtils的典型用法代码示例。如果您正苦于以下问题:Java WritableUtils类的具体用法?Java WritableUtils怎么用?Java WritableUtils使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


WritableUtils类属于org.apache.hadoop.io包,在下文中一共展示了WritableUtils类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: writeCompactBlockArray

import org.apache.hadoop.io.WritableUtils; //导入依赖的package包/类
/**
 * Write an array of blocks as compactly as possible. This uses
 * delta-encoding for the generation stamp and size, following
 * the principle that genstamp increases relatively slowly,
 * and size is equal for all but the last block of a file.
 */
public static void writeCompactBlockArray(
    Block[] blocks, DataOutputStream out) throws IOException {
  WritableUtils.writeVInt(out, blocks.length);
  Block prev = null;
  for (Block b : blocks) {
    long szDelta = b.getNumBytes() -
        (prev != null ? prev.getNumBytes() : 0);
    long gsDelta = b.getGenerationStamp() -
        (prev != null ? prev.getGenerationStamp() : 0);
    out.writeLong(b.getBlockId()); // blockid is random
    WritableUtils.writeVLong(out, szDelta);
    WritableUtils.writeVLong(out, gsDelta);
    prev = b;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:FSImageSerialization.java

示例2: writeRecords

import org.apache.hadoop.io.WritableUtils; //导入依赖的package包/类
private long writeRecords(int count, boolean knownKeyLength,
    boolean knownValueLength, boolean close) throws IOException {
  long rawDataSize = 0;
  for (int nx = 0; nx < count; nx++) {
    String key = TestTFileByteArrays.composeSortedKey("key", nx);
    DataOutputStream outKey =
        writer.prepareAppendKey(knownKeyLength ? key.length() : -1);
    outKey.write(key.getBytes());
    outKey.close();
    String value = "value" + nx;
    DataOutputStream outValue =
        writer.prepareAppendValue(knownValueLength ? value.length() : -1);
    outValue.write(value.getBytes());
    outValue.close();
    rawDataSize +=
        WritableUtils.getVIntSize(key.getBytes().length)
            + key.getBytes().length
            + WritableUtils.getVIntSize(value.getBytes().length)
            + value.getBytes().length;
  }
  if (close) {
    closeOutput();
  }
  return rawDataSize;
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:26,代码来源:TestTFileStreams.java

示例3: setupResponseOldVersionFatal

import org.apache.hadoop.io.WritableUtils; //导入依赖的package包/类
/**
 * Setup response for the IPC Call on Fatal Error from a 
 * client that is using old version of Hadoop.
 * The response is serialized using the previous protocol's response
 * layout.
 * 
 * @param response buffer to serialize the response into
 * @param call {@link Call} to which we are setting up the response
 * @param rv return value for the IPC Call, if the call was successful
 * @param errorClass error class, if the the call failed
 * @param error error message, if the call failed
 * @throws IOException
 */
private void setupResponseOldVersionFatal(ByteArrayOutputStream response, 
                           Call call,
                           Writable rv, String errorClass, String error) 
throws IOException {
  final int OLD_VERSION_FATAL_STATUS = -1;
  response.reset();
  DataOutputStream out = new DataOutputStream(response);
  out.writeInt(call.callId);                // write call id
  out.writeInt(OLD_VERSION_FATAL_STATUS);   // write FATAL_STATUS
  WritableUtils.writeString(out, errorClass);
  WritableUtils.writeString(out, error);

  if (call.connection.useWrap) {
    wrapWithSasl(response, call);
  }
  call.setResponse(ByteBuffer.wrap(response.toByteArray()));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:Server.java

示例4: _readMvccVersion

import org.apache.hadoop.io.WritableUtils; //导入依赖的package包/类
/**
 * Actually do the mvcc read. Does no checks.
 * @param position
 */
private void _readMvccVersion(final int position) {
  // This is Bytes#bytesToVint inlined so can save a few instructions in this hot method; i.e.
  // previous if one-byte vint, we'd redo the vint call to find int size.
  // Also the method is kept small so can be inlined.
  byte firstByte = blockBuffer.array()[position];
  int len = WritableUtils.decodeVIntSize(firstByte);
  if (len == 1) {
    this.currMemstoreTS = firstByte;
  } else {
    long i = 0;
    for (int idx = 0; idx < len - 1; idx++) {
      byte b = blockBuffer.array()[position + 1 + idx];
      i = i << 8;
      i = i | (b & 0xFF);
    }
    currMemstoreTS = (WritableUtils.isNegativeVInt(firstByte) ? ~i : i);
  }
  this.currMemstoreTSLen = len;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:24,代码来源:HFileReaderV2.java

示例5: setupResponse

import org.apache.hadoop.io.WritableUtils; //导入依赖的package包/类
/**
 * Setup response for the IPC Call.
 * 
 * @param response buffer to serialize the response into
 * @param call {@link Call} to which we are setting up the response
 * @param status {@link Status} of the IPC call
 * @param rv return value for the IPC Call, if the call was successful
 * @param errorClass error class, if the the call failed
 * @param error error message, if the call failed
 * @throws IOException
 */
private void setupResponse(ByteArrayOutputStream response, 
                           Call call, Status status, 
                           Writable rv, String errorClass, String error) 
throws IOException {
  response.reset();
  DataOutputStream out = new DataOutputStream(response);
  out.writeInt(call.id);                // write call id
  out.writeInt(status.state);           // write status

  if (status == Status.SUCCESS) {
    rv.write(out);
  } else {
    WritableUtils.writeString(out, errorClass);
    WritableUtils.writeString(out, error);
  }
  /*if (call.connection.useWrap) {
    wrapWithSasl(response, call);
  }*/
  call.setResponse(ByteBuffer.wrap(response.toByteArray()));
}
 
开发者ID:spafka,项目名称:spark_deep,代码行数:32,代码来源:Server.java

示例6: readFields

import org.apache.hadoop.io.WritableUtils; //导入依赖的package包/类
@Override public void readFields(DataInput in) throws IOException {
  family = WritableUtils.readCompressedByteArray(in);
  qualifier = WritableUtils.readCompressedByteArray(in);
  dataType = DataType.valueOf(WritableUtils.readString(in));
  isIndex = WritableUtils.readVInt(in) == 1;
  hashCode = calHashCode();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:8,代码来源:ColumnInfo.java

示例7: write

import org.apache.hadoop.io.WritableUtils; //导入依赖的package包/类
@Override
public void write(DataOutput out) throws IOException {
  super.write(out);
  WritableUtils.writeVInt(out, id);
  WritableUtils.writeVInt(out, maps);
  WritableUtils.writeVLong(out, inputRecords);
  WritableUtils.writeVLong(out, outputBytes);
  WritableUtils.writeVLong(out, outputRecords);
  WritableUtils.writeVLong(out, maxMemory);
  WritableUtils.writeVInt(out, reduces);
  for (int i = 0; i < reduces; ++i) {
    out.writeDouble(reduceBytes[i]);
    out.writeDouble(reduceRecords[i]);
  }
  WritableUtils.writeVInt(out, nSpec);
  for (int i = 0; i < nSpec; ++i) {
    WritableUtils.writeVLong(out, reduceOutputBytes[i]);
    WritableUtils.writeVLong(out, reduceOutputRecords[i]);
  }
  mapMetrics.write(out);
  int numReduceMetrics = (reduceMetrics == null) ? 0 : reduceMetrics.length;
  WritableUtils.writeVInt(out, numReduceMetrics);
  for (int i = 0; i < numReduceMetrics; ++i) {
    reduceMetrics[i].write(out);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:LoadSplit.java

示例8: add

import org.apache.hadoop.io.WritableUtils; //导入依赖的package包/类
/**
 * Adds a new entry to this block index chunk.
 *
 * @param firstKey the first key in the block pointed to by this entry
 * @param blockOffset the offset of the next-level block pointed to by this
 *          entry
 * @param onDiskDataSize the on-disk data of the block pointed to by this
 *          entry, including header size
 * @param curTotalNumSubEntries if this chunk is the root index chunk under
 *          construction, this specifies the current total number of
 *          sub-entries in all leaf-level chunks, including the one
 *          corresponding to the second-level entry being added.
 */
void add(byte[] firstKey, long blockOffset, int onDiskDataSize,
    long curTotalNumSubEntries) {
  // Record the offset for the secondary index
  secondaryIndexOffsetMarks.add(curTotalNonRootEntrySize);
  curTotalNonRootEntrySize += SECONDARY_INDEX_ENTRY_OVERHEAD
      + firstKey.length;

  curTotalRootSize += Bytes.SIZEOF_LONG + Bytes.SIZEOF_INT
      + WritableUtils.getVIntSize(firstKey.length) + firstKey.length;

  blockKeys.add(firstKey);
  blockOffsets.add(blockOffset);
  onDiskDataSizes.add(onDiskDataSize);

  if (curTotalNumSubEntries != -1) {
    numSubEntriesAt.add(curTotalNumSubEntries);

    // Make sure the parallel arrays are in sync.
    if (numSubEntriesAt.size() != blockKeys.size()) {
      throw new IllegalStateException("Only have key/value count " +
          "stats for " + numSubEntriesAt.size() + " block index " +
          "entries out of " + blockKeys.size());
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:39,代码来源:HFileBlockIndex.java

示例9: readFields

import org.apache.hadoop.io.WritableUtils; //导入依赖的package包/类
public void readFields(DataInput in) throws IOException {
  // After the RecordStartMark, we expect to get a SEGMENT_HEADER_ID (-1).
  long segmentId = WritableUtils.readVLong(in);
  if (SEGMENT_HEADER_ID != segmentId) {
    throw new IOException("Expected segment header id " + SEGMENT_HEADER_ID
        + "; got " + segmentId);
  }

  // Get the length of the rest of the segment, in bytes.
  long length = WritableUtils.readVLong(in);

  // Now read the actual main byte array.
  if (length > Integer.MAX_VALUE) {
    throw new IOException("Unexpected oversize data array length: "
        + length);
  } else if (length < 0) {
    throw new IOException("Unexpected undersize data array length: "
        + length);
  }
  byte [] segmentData = new byte[(int) length];
  in.readFully(segmentData);
  recordLenBytes = new BytesWritable(segmentData);

  reset(); // Reset the iterator allowing the user to yield offset/lengths.
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:26,代码来源:LobFile.java

示例10: readClass

import org.apache.hadoop.io.WritableUtils; //导入依赖的package包/类
/** Reads and returns the class as written by {@link #writeClass(DataOutput, Class)} */
static Class<?> readClass(Configuration conf, DataInput in) throws IOException {
  Class<?> instanceClass = null;
  int b = (byte)WritableUtils.readVInt(in);
  if (b == NOT_ENCODED) {
    String className = Text.readString(in);
    try {
      instanceClass = getClassByName(conf, className);
    } catch (ClassNotFoundException e) {
      LOG.error("Can't find class " + className, e);
      throw new IOException("Can't find class " + className, e);
    }
  } else {
    instanceClass = CODE_TO_CLASS.get(b);
  }
  return instanceClass;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:HbaseObjectWritableFor96Migration.java

示例11: append

import org.apache.hadoop.io.WritableUtils; //导入依赖的package包/类
public void append(DataInputBuffer key, DataInputBuffer value)
throws IOException {
  int keyLength = key.getLength() - key.getPosition();
  if (keyLength < 0) {
    throw new IOException("Negative key-length not allowed: " + keyLength + 
                          " for " + key);
  }
  
  int valueLength = value.getLength() - value.getPosition();
  if (valueLength < 0) {
    throw new IOException("Negative value-length not allowed: " + 
                          valueLength + " for " + value);
  }

  WritableUtils.writeVInt(out, keyLength);
  WritableUtils.writeVInt(out, valueLength);
  out.write(key.getData(), key.getPosition(), keyLength); 
  out.write(value.getData(), value.getPosition(), valueLength); 
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:InMemoryWriter.java

示例12: readFields

import org.apache.hadoop.io.WritableUtils; //导入依赖的package包/类
public void readFields(DataInput in) throws IOException {
  this.taskid.readFields(in);
  setProgress(in.readFloat());
  this.numSlots = in.readInt();
  this.runState = WritableUtils.readEnum(in, State.class);
  setDiagnosticInfo(StringInterner.weakIntern(Text.readString(in)));
  setStateString(StringInterner.weakIntern(Text.readString(in)));
  this.phase = WritableUtils.readEnum(in, Phase.class); 
  this.startTime = in.readLong(); 
  this.finishTime = in.readLong(); 
  counters = new Counters();
  this.includeAllCounters = in.readBoolean();
  this.outputSize = in.readLong();
  counters.readFields(in);
  nextRecordRange.readFields(in);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:TaskStatus.java

示例13: writeCompressed

import org.apache.hadoop.io.WritableUtils; //导入依赖的package包/类
/**
 * Compresses and writes an array to a DataOutput
 * 
 * @param data the array to write.
 * @param out the DataOutput to write into
 * @param dict the dictionary to use for compression
 */
@Deprecated
static void writeCompressed(byte[] data, int offset, int length,
    DataOutput out, Dictionary dict)
    throws IOException {
  short dictIdx = Dictionary.NOT_IN_DICTIONARY;
  if (dict != null) {
    dictIdx = dict.findEntry(data, offset, length);
  }
  if (dictIdx == Dictionary.NOT_IN_DICTIONARY) {
    // not in dict
    out.writeByte(Dictionary.NOT_IN_DICTIONARY);
    WritableUtils.writeVInt(out, length);
    out.write(data, offset, length);
  } else {
    out.writeShort(dictIdx);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:Compressor.java

示例14: compare

import org.apache.hadoop.io.WritableUtils; //导入依赖的package包/类
@Override
public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
  try {
    di.reset(b1, s1, l1);
    final int x1 = WritableUtils.readVInt(di);
    di.reset(b2, s2, l2);
    final int x2 = WritableUtils.readVInt(di);
    final int ret = (b1[s1 + x1] != b2[s2 + x2])
      ? b1[s1 + x1] - b2[s2 + x2]
      : super.compare(b1, s1, x1, b2, s2, x2);
    di.reset(reset, 0, 0);
    return ret;
  } catch (IOException e) {
    throw new RuntimeException(e);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:GridmixKey.java

示例15: readFields

import org.apache.hadoop.io.WritableUtils; //导入依赖的package包/类
/**
 * {@inheritDoc}
 * @throws IOException If the child InputSplit cannot be read, typically
 *                     for failing access checks.
 */
@SuppressWarnings("unchecked")  // Generic array assignment
public void readFields(DataInput in) throws IOException {
  int card = WritableUtils.readVInt(in);
  if (splits == null || splits.length != card) {
    splits = new InputSplit[card];
  }
  Class<? extends InputSplit>[] cls = new Class[card];
  try {
    for (int i = 0; i < card; ++i) {
      cls[i] =
        Class.forName(Text.readString(in)).asSubclass(InputSplit.class);
    }
    for (int i = 0; i < card; ++i) {
      splits[i] = ReflectionUtils.newInstance(cls[i], null);
      SerializationFactory factory = new SerializationFactory(conf);
      Deserializer deserializer = factory.getDeserializer(cls[i]);
      deserializer.open((DataInputStream)in);
      splits[i] = (InputSplit)deserializer.deserialize(splits[i]);
    }
  } catch (ClassNotFoundException e) {
    throw new IOException("Failed split init", e);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:CompositeInputSplit.java


注:本文中的org.apache.hadoop.io.WritableUtils类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。