当前位置: 首页>>代码示例>>Java>>正文


Java WritableUtils.writeVInt方法代码示例

本文整理汇总了Java中org.apache.hadoop.io.WritableUtils.writeVInt方法的典型用法代码示例。如果您正苦于以下问题:Java WritableUtils.writeVInt方法的具体用法?Java WritableUtils.writeVInt怎么用?Java WritableUtils.writeVInt使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.io.WritableUtils的用法示例。


在下文中一共展示了WritableUtils.writeVInt方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: concatData

import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
/**
 * Concat the data in preparation for it to be encrypted.
 *
 * @param entry
 *          Entry to pull data from.
 * @return Concatenated data.
 * @throws IOException
 *           Not actually thrown.
 */
private byte[] concatData(MutableEntry entry) throws IOException {
  ByteArrayOutputStream dataStream = new ByteArrayOutputStream();
  DataOutput dataOut = new DataOutputStream(dataStream);

  for (EntryField source : config.sources) {
    switch (source) {
      case ROW:
      case COLUMN_FAMILY:
      case COLUMN_QUALIFIER:
      case COLUMN_VISIBILITY:
      case VALUE:
        byte[] bytes = entry.getBytes(source);
        WritableUtils.writeVInt(dataOut, bytes.length);
        dataOut.write(bytes);
        break;

      default:
        throw new UnsupportedOperationException();
    }
  }
  return dataStream.toByteArray();
}
 
开发者ID:mit-ll,项目名称:PACE,代码行数:32,代码来源:FieldEncryptor.java

示例2: write

import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
@Override
public void write(DataOutput out) throws IOException {
  super.write(out);
  WritableUtils.writeVInt(out, id);
  WritableUtils.writeVInt(out, maps);
  WritableUtils.writeVLong(out, inputRecords);
  WritableUtils.writeVLong(out, outputBytes);
  WritableUtils.writeVLong(out, outputRecords);
  WritableUtils.writeVLong(out, maxMemory);
  WritableUtils.writeVInt(out, reduces);
  for (int i = 0; i < reduces; ++i) {
    out.writeDouble(reduceBytes[i]);
    out.writeDouble(reduceRecords[i]);
  }
  WritableUtils.writeVInt(out, nSpec);
  for (int i = 0; i < nSpec; ++i) {
    WritableUtils.writeVLong(out, reduceOutputBytes[i]);
    WritableUtils.writeVLong(out, reduceOutputRecords[i]);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:GridmixSplit.java

示例3: getKey

import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
/**
 * Get a field encryption key for use in <strong>encrypting</strong> the field.
 * <p>
 * Any metadata needed to retrieve this key later should be written to the DataOuput object.
 *
 * @param visibility
 *          Visibility expression for the field.
 * @param out
 *          DataOutput object to write metadata to.
 * @return Field encryption key.
 * @throws IOException
 *           Not actually thrown.
 */
private byte[] getKey(ColumnVisibility visibility, DataOutput out) throws IOException {
  if (config.encryptUsingVisibility) {
    byte[] key = new byte[config.keyLength];

    if (visibility.getParseTree().getType() != NodeType.EMPTY) {
      random.nextBytes(key);
      writeVisibilityShare(key, visibility.getParseTree(), visibility.getExpression(), out);
    }

    return key;
  } else {
    KeyWithVersion keyData = keys.getKey(config.keyId, config.keyLength);
    WritableUtils.writeVInt(out, keyData.version); // Write the version of the key being used as meta-data.
    return keyData.key;
  }
}
 
开发者ID:mit-ll,项目名称:PACE,代码行数:30,代码来源:FieldEncryptor.java

示例4: write

import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
@Override public void write(DataOutput out) throws IOException {
  mainColumn.write(out);
  WritableUtils.writeVInt(out, additionalColumns.size());
  for (ColumnInfo ci : additionalColumns) {
    ci.write(out);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:8,代码来源:IndexRelationship.java

示例5: write

import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
/**
 */
@Override
public void write(DataOutput out) throws IOException {
  WritableUtils.writeVInt(out, keyId);
  WritableUtils.writeVLong(out, expiryDate);
  if (keyBytes == null) {
    WritableUtils.writeVInt(out, -1);
  } else {
    WritableUtils.writeVInt(out, keyBytes.length);
    out.write(keyBytes);
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:14,代码来源:DelegationKey.java

示例6: writeByteArray

import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
/**
 * Write byte-array with a WritableableUtils.vint prefix.
 * @param out output stream to be written to
 * @param b array to write
 * @throws IOException e
 */
public static void writeByteArray(final DataOutput out, final byte [] b)
throws IOException {
  if(b == null) {
    WritableUtils.writeVInt(out, 0);
  } else {
    writeByteArray(out, b, 0, b.length);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:15,代码来源:Bytes.java

示例7: append

import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
public void append(K key, V value) throws IOException {
  if (key.getClass() != keyClass)
    throw new IOException("wrong key class: "+ key.getClass()
                          +" is not "+ keyClass);
  if (value.getClass() != valueClass)
    throw new IOException("wrong value class: "+ value.getClass()
                          +" is not "+ valueClass);

  // Append the 'key'
  keySerializer.serialize(key);
  int keyLength = buffer.getLength();
  if (keyLength < 0) {
    throw new IOException("Negative key-length not allowed: " + keyLength + 
                          " for " + key);
  }

  // Append the 'value'
  valueSerializer.serialize(value);
  int valueLength = buffer.getLength() - keyLength;
  if (valueLength < 0) {
    throw new IOException("Negative value-length not allowed: " + 
                          valueLength + " for " + value);
  }
  
  // Write the record out
  WritableUtils.writeVInt(out, keyLength);                  // key length
  WritableUtils.writeVInt(out, valueLength);                // value length
  out.write(buffer.getData(), 0, buffer.getLength());       // data

  // Reset
  buffer.reset();
  
  // Update bytes written
  decompressedBytesWritten += keyLength + valueLength + 
                              WritableUtils.getVIntSize(keyLength) + 
                              WritableUtils.getVIntSize(valueLength);
  ++numRecordsWritten;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:39,代码来源:IFile.java

示例8: write

import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
public void write(DataOutput out) throws IOException {
  // Start with the record type id.
  WritableUtils.writeVLong(out, INDEX_TABLE_ID);

  // Then the count of the records.
  WritableUtils.writeVInt(out, tableEntries.size());

  // Followed by the table itself.
  for (IndexTableEntry entry : tableEntries) {
    entry.write(out);
  }
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:13,代码来源:LobFile.java

示例9: init

import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
/**
 * Open the file and write its header.
 */
private void init() throws IOException {
  FileSystem fs = this.path.getFileSystem(conf);
  FSDataOutputStream fsOut = fs.create(this.path);
  this.countingOut = new CountingOutputStream(
      new BufferedOutputStream(fsOut));
  this.out = new DataOutputStream(this.countingOut);

  // put any necessary config strings into the header.
  MetaBlock m = this.header.getMetaBlock();
  if (isCharData) {
    m.put(MetaBlock.ENTRY_ENCODING_KEY, MetaBlock.CLOB_ENCODING);
  } else {
    m.put(MetaBlock.ENTRY_ENCODING_KEY, MetaBlock.BLOB_ENCODING);
  }

  if (null != codec) {
    m.put(MetaBlock.COMPRESSION_CODEC_KEY, this.codecName);
  }

  // Serialize the value of maxEntriesPerSegment as a VInt in a byte array
  // and put that into the metablock as ENTRIES_PER_SEGMENT_KEY.
  int segmentBufLen = WritableUtils.getVIntSize(this.maxEntriesPerSegment);
  DataOutputBuffer entriesPerSegBuf = new DataOutputBuffer(segmentBufLen);
  WritableUtils.writeVInt(entriesPerSegBuf, this.maxEntriesPerSegment);
  byte [] entriesPerSegArray =
      Arrays.copyOf(entriesPerSegBuf.getData(), segmentBufLen);
  m.put(MetaBlock.ENTRIES_PER_SEGMENT_KEY,
      new BytesWritable(entriesPerSegArray));

  // Write the file header to the file.
  this.header.write(out);

  // Now we're ready to accept record data from the user.
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:38,代码来源:LobFile.java

示例10: write

import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
public void write(K key, V value) throws IOException {
 if (key.getClass() != keyClass) {
   throw new IOException("wrong key class: "+ key.getClass()
                         +" is not "+ keyClass);
 }
 if (value.getClass() != valueClass) {
   throw new IOException("wrong value class: "+ value.getClass()
                         +" is not "+ valueClass);
 }
 // Append the 'key'
 keySerializer.serialize(key);
 int keyLength = dataBuffer.getLength();
 if (keyLength < 0) {
   throw new IOException("Negative key-length not allowed: " + keyLength + 
                         " for " + key);
 }
 // Append the 'value'
 valueSerializer.serialize(value);
 int valueLength = dataBuffer.getLength() - keyLength;
 if (valueLength < 0) {
   throw new IOException("Negative value-length not allowed: " + 
                         valueLength + " for " + value);
 }
 // Write the record out
 WritableUtils.writeVInt(outputStream, keyLength);
 WritableUtils.writeVInt(outputStream, valueLength);
 outputStream.write(dataBuffer.getData(), 0, dataBuffer.getLength());
 // Reset
 dataBuffer.reset();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:TestMerge.java

示例11: write

import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
/**
 * Writes the field values to the output.
 *
 * @param out  The output to write to.
 * @throws IOException When writing the values to the output fails.
 */
@Override
public void write(DataOutput out) throws IOException {
  WritableUtils.writeVInt(out, VERSION.code);
  Bytes.writeByteArray(out, tableName.getName());
  Bytes.writeByteArray(out, startRow);
  Bytes.writeByteArray(out, endRow);
  Bytes.writeByteArray(out, Bytes.toBytes(regionLocation));
  Bytes.writeByteArray(out, Bytes.toBytes(scan));
  WritableUtils.writeVLong(out, length);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:17,代码来源:TableSplit.java

示例12: write

import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
@Override
public void write(DataOutput out) throws IOException {
  WritableUtils.writeVLong(out, expiryDate);
  WritableUtils.writeVInt(out, keyId);
  WritableUtils.writeString(out, userId);
  WritableUtils.writeString(out, blockPoolId);
  WritableUtils.writeVLong(out, blockId);
  WritableUtils.writeVInt(out, modes.size());
  for (AccessMode aMode : modes) {
    WritableUtils.writeEnum(out, aMode);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:BlockTokenIdentifier.java

示例13: testGridmixJobSpecGroupingComparator

import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
@Test (timeout=3000)
public void testGridmixJobSpecGroupingComparator() throws Exception {
  GridmixJob.SpecGroupingComparator test = new GridmixJob.SpecGroupingComparator();

  ByteArrayOutputStream data = new ByteArrayOutputStream();
  DataOutputStream dos = new DataOutputStream(data);
  WritableUtils.writeVInt(dos, 2);
  WritableUtils.writeVInt(dos, 1);
  // 0: REDUCE SPEC
  WritableUtils.writeVInt(dos, 0);
  WritableUtils.writeVInt(dos, 7);
  WritableUtils.writeVInt(dos, 4);

  byte[] b1 = data.toByteArray();

  byte[] b2 = data.toByteArray();

  // the same object should be equals
  assertEquals(0, test.compare(b1, 0, 1, b2, 0, 1));
  b2[2] = 1;
  // for Reduce
  assertEquals(-1, test.compare(b1, 0, 1, b2, 0, 1));
  // by Reduce spec
  b2[2] = 1; // 1: DATA SPEC
  assertEquals(-1, test.compare(b1, 0, 1, b2, 0, 1));
  // compare GridmixKey the same objects should be equals
  assertEquals(0, test.compare(new GridmixKey(GridmixKey.DATA, 100, 2),
          new GridmixKey(GridmixKey.DATA, 100, 2)));
  // REDUSE SPEC
  assertEquals(-1, test.compare(
          new GridmixKey(GridmixKey.REDUCE_SPEC, 100, 2), new GridmixKey(
          GridmixKey.DATA, 100, 2)));
  assertEquals(1, test.compare(new GridmixKey(GridmixKey.DATA, 100, 2),
          new GridmixKey(GridmixKey.REDUCE_SPEC, 100, 2)));
  // only DATA
  assertEquals(2, test.compare(new GridmixKey(GridmixKey.DATA, 102, 2),
          new GridmixKey(GridmixKey.DATA, 100, 2)));

}
 
开发者ID:naver,项目名称:hadoop,代码行数:40,代码来源:TestGridMixClasses.java

示例14: write

import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
/**
 * FrameworkGroup ::= #counter (key value)*
 */
@Override
@SuppressWarnings("unchecked")
public void write(DataOutput out) throws IOException {
  WritableUtils.writeVInt(out, size());
  for (int i = 0; i < counters.length; ++i) {
    Counter counter = (C) counters[i];
    if (counter != null) {
      WritableUtils.writeVInt(out, i);
      WritableUtils.writeVLong(out, counter.getValue());
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:16,代码来源:FrameworkCounterGroup.java

示例15: writeJobSplitMetaInfo

import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
private static void writeJobSplitMetaInfo(FileSystem fs, Path filename, 
    FsPermission p, int splitMetaInfoVersion, 
    JobSplit.SplitMetaInfo[] allSplitMetaInfo) 
throws IOException {
  // write the splits meta-info to a file for the job tracker
  FSDataOutputStream out = 
    FileSystem.create(fs, filename, p);
  out.write(JobSplit.META_SPLIT_FILE_HEADER);
  WritableUtils.writeVInt(out, splitMetaInfoVersion);
  WritableUtils.writeVInt(out, allSplitMetaInfo.length);
  for (JobSplit.SplitMetaInfo splitMetaInfo : allSplitMetaInfo) {
    splitMetaInfo.write(out);
  }
  out.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:16,代码来源:JobSplitWriter.java


注:本文中的org.apache.hadoop.io.WritableUtils.writeVInt方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。