當前位置: 首頁>>代碼示例>>Java>>正文


Java Text.writeString方法代碼示例

本文整理匯總了Java中org.apache.hadoop.io.Text.writeString方法的典型用法代碼示例。如果您正苦於以下問題:Java Text.writeString方法的具體用法?Java Text.writeString怎麽用?Java Text.writeString使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.io.Text的用法示例。


在下文中一共展示了Text.writeString方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: write

import org.apache.hadoop.io.Text; //導入方法依賴的package包/類
public synchronized void write(DataOutput out) throws IOException {
  jobid.write(out);
  out.writeFloat(setupProgress);
  out.writeFloat(mapProgress);
  out.writeFloat(reduceProgress);
  out.writeFloat(cleanupProgress);
  WritableUtils.writeEnum(out, runState);
  out.writeLong(startTime);
  Text.writeString(out, user);
  WritableUtils.writeEnum(out, priority);
  Text.writeString(out, schedulingInfo);
  out.writeLong(finishTime);
  out.writeBoolean(isRetired);
  Text.writeString(out, historyFile);
  Text.writeString(out, jobName);
  Text.writeString(out, trackingUrl);
  Text.writeString(out, jobFile);
  out.writeBoolean(isUber);

  // Serialize the job's ACLs
  out.writeInt(jobACLs.size());
  for (Entry<JobACL, AccessControlList> entry : jobACLs.entrySet()) {
    WritableUtils.writeEnum(out, entry.getKey());
    entry.getValue().write(out);
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:27,代碼來源:JobStatus.java

示例2: write

import org.apache.hadoop.io.Text; //導入方法依賴的package包/類
@Override
public void write(DataOutput out) throws IOException {
  Text.writeString(out, getPath().toString(), Text.DEFAULT_MAX_LEN);
  out.writeLong(getLen());
  out.writeBoolean(isDirectory());
  out.writeShort(getReplication());
  out.writeLong(getBlockSize());
  out.writeLong(getModificationTime());
  out.writeLong(getAccessTime());
  getPermission().write(out);
  Text.writeString(out, getOwner(), Text.DEFAULT_MAX_LEN);
  Text.writeString(out, getGroup(), Text.DEFAULT_MAX_LEN);
  out.writeBoolean(isSymlink());
  if (isSymlink()) {
    Text.writeString(out, getSymlink().toString(), Text.DEFAULT_MAX_LEN);
  }
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:18,代碼來源:FileStatus.java

示例3: write

import org.apache.hadoop.io.Text; //導入方法依賴的package包/類
@Override
/** {@inheritDoc} */
public void write(DataOutput output) throws IOException {

  output.writeInt(splitId);

  if (this.oracleDataChunks == null) {
    output.writeInt(0);
  } else {
    output.writeInt(this.oracleDataChunks.size());
    for (OraOopOracleDataChunk dataChunk : this.oracleDataChunks) {
      Text.writeString(output, dataChunk.getClass().getName());
      dataChunk.write(output);
    }
  }
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:17,代碼來源:OraOopDBInputSplit.java

示例4: write

import org.apache.hadoop.io.Text; //導入方法依賴的package包/類
public void write(DataOutput out) throws IOException {
  Text.writeString(out, jobFile);
  taskId.write(out);
  out.writeInt(partition);
  out.writeInt(numSlotsRequired);
  taskStatus.write(out);
  skipRanges.write(out);
  out.writeBoolean(skipping);
  out.writeBoolean(jobCleanup);
  if (jobCleanup) {
    WritableUtils.writeEnum(out, jobRunStateForCleanup);
  }
  out.writeBoolean(jobSetup);
  out.writeBoolean(writeSkipRecs);
  out.writeBoolean(taskCleanup);
  Text.writeString(out, user);
  out.writeInt(encryptedSpillKey.length);
  extraData.write(out);
  out.write(encryptedSpillKey);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:21,代碼來源:Task.java

示例5: writeHeaderAndWrapStream

import org.apache.hadoop.io.Text; //導入方法依賴的package包/類
/**
 * Write out a header to the given stream that indicates the chosen
 * compression codec, and return the same stream wrapped with that codec.
 * If no codec is specified, simply adds buffering to the stream, so that
 * the returned stream is always buffered.
 * 
 * @param os The stream to write header to and wrap. This stream should
 * be unbuffered.
 * @return A stream wrapped with the specified compressor, or buffering
 * if compression is not enabled.
 * @throws IOException if an IO error occurs or the compressor cannot be
 * instantiated
 */
DataOutputStream writeHeaderAndWrapStream(OutputStream os)
throws IOException {
  DataOutputStream dos = new DataOutputStream(os);

  dos.writeBoolean(imageCodec != null);

  if (imageCodec != null) {
    String codecClassName = imageCodec.getClass().getCanonicalName();
    Text.writeString(dos, codecClassName);

    return new DataOutputStream(imageCodec.createOutputStream(os));
  } else {
    // use a buffered output stream
    return new DataOutputStream(new BufferedOutputStream(os));
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:30,代碼來源:FSImageCompression.java

示例6: write

import org.apache.hadoop.io.Text; //導入方法依賴的package包/類
@Override
/**
 * {@inheritDoc}
 */
public void write(DataOutput out) throws IOException {
  out.writeBoolean(this.isNew);
  Text.writeString(out, this.sqoopRecord.getClass().getName());
  this.sqoopRecord.write(out);
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:10,代碼來源:MergeRecord.java

示例7: writeClass

import org.apache.hadoop.io.Text; //導入方法依賴的package包/類
/** Writes the encoded class code as defined in CLASS_TO_CODE, or
 * the whole class name if not defined in the mapping.
 */
static void writeClass(DataOutput out, Class<?> c) throws IOException {
  Integer code = CLASS_TO_CODE.get(c);
  if (code == null) {
    WritableUtils.writeVInt(out, NOT_ENCODED);
    Text.writeString(out, c.getName());
  } else {
    WritableUtils.writeVInt(out, code);
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:13,代碼來源:HbaseObjectWritableFor96Migration.java

示例8: write

import org.apache.hadoop.io.Text; //導入方法依賴的package包/類
public void write(DataOutput out) throws IOException {
  WritableUtils.writeVInt(out, locations.length);
  for (int i = 0; i < locations.length; i++) {
    Text.writeString(out, locations[i]);
  }
  WritableUtils.writeVLong(out, startOffset);
  WritableUtils.writeVLong(out, inputDataLength);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:9,代碼來源:JobSplit.java

示例9: write

import org.apache.hadoop.io.Text; //導入方法依賴的package包/類
@Override
public void write(DataOutput out) throws IOException {
  out.writeLong(bytes);
  out.writeInt(nLoc);
  for (int i = 0; i < nLoc; ++i) {
    Text.writeString(out, locations[i]);
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:9,代碼來源:GenerateData.java

示例10: write

import org.apache.hadoop.io.Text; //導入方法依賴的package包/類
private void write(DataOutput out) throws IOException {
  WritableUtils.writeVInt(out, values.length);
  WritableUtils.writeVLong(out, written);
  for (int i = 0; i < values.length; ++i) {
    Text.writeString(out, values[i].getClass().getName());
  }
  for (int i = 0; i < values.length; ++i) {
    if (has(i)) {
      values[i].write(out);
    }
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:13,代碼來源:TestTupleWritable.java

示例11: write

import org.apache.hadoop.io.Text; //導入方法依賴的package包/類
@Override
public void write(DataOutput out) throws IOException {
  Text.writeString(out, path);

  final boolean dir = isDir();
  out.writeBoolean(dir);
  if (dir) {
    out.writeInt(children.length);
    for(String c : children) {
      Text.writeString(out, c);
    }
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:14,代碼來源:HadoopArchives.java

示例12: write

import org.apache.hadoop.io.Text; //導入方法依賴的package包/類
@Override
public void write(DataOutput out) throws IOException {
  Text.writeString(out, name);
}
 
開發者ID:jiangxiluning,項目名稱:kafka-connect-hdfs,代碼行數:5,代碼來源:WALEntry.java

示例13: write

import org.apache.hadoop.io.Text; //導入方法依賴的package包/類
@Override
public void write(DataOutput out) throws IOException {
  Text.writeString(out, (protocol == null) ? "" : protocol);
}
 
開發者ID:spafka,項目名稱:spark_deep,代碼行數:5,代碼來源:ConnectionHeader.java

示例14: write

import org.apache.hadoop.io.Text; //導入方法依賴的package包/類
@Override
public void write(DataOutput output) throws IOException {
  Text.writeString(output, this.getId());
  output.writeBoolean(this.isSubPartition);
  output.writeLong(this.blocks);
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:7,代碼來源:OraOopOracleDataChunkPartition.java

示例15: write

import org.apache.hadoop.io.Text; //導入方法依賴的package包/類
public void write(DataOutput out) throws IOException {
  Text.writeString(out, mapId);
  WritableUtils.writeVLong(out, compressedLength);
  WritableUtils.writeVLong(out, uncompressedLength);
  WritableUtils.writeVInt(out, forReduce);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:7,代碼來源:ShuffleHeader.java


注:本文中的org.apache.hadoop.io.Text.writeString方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。