当前位置: 首页>>代码示例>>Java>>正文


Java Text.writeString方法代码示例

本文整理汇总了Java中org.apache.hadoop.io.Text.writeString方法的典型用法代码示例。如果您正苦于以下问题:Java Text.writeString方法的具体用法?Java Text.writeString怎么用?Java Text.writeString使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.io.Text的用法示例。


在下文中一共展示了Text.writeString方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: write

import org.apache.hadoop.io.Text; //导入方法依赖的package包/类
public synchronized void write(DataOutput out) throws IOException {
  jobid.write(out);
  out.writeFloat(setupProgress);
  out.writeFloat(mapProgress);
  out.writeFloat(reduceProgress);
  out.writeFloat(cleanupProgress);
  WritableUtils.writeEnum(out, runState);
  out.writeLong(startTime);
  Text.writeString(out, user);
  WritableUtils.writeEnum(out, priority);
  Text.writeString(out, schedulingInfo);
  out.writeLong(finishTime);
  out.writeBoolean(isRetired);
  Text.writeString(out, historyFile);
  Text.writeString(out, jobName);
  Text.writeString(out, trackingUrl);
  Text.writeString(out, jobFile);
  out.writeBoolean(isUber);

  // Serialize the job's ACLs
  out.writeInt(jobACLs.size());
  for (Entry<JobACL, AccessControlList> entry : jobACLs.entrySet()) {
    WritableUtils.writeEnum(out, entry.getKey());
    entry.getValue().write(out);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:JobStatus.java

示例2: write

import org.apache.hadoop.io.Text; //导入方法依赖的package包/类
@Override
public void write(DataOutput out) throws IOException {
  Text.writeString(out, getPath().toString(), Text.DEFAULT_MAX_LEN);
  out.writeLong(getLen());
  out.writeBoolean(isDirectory());
  out.writeShort(getReplication());
  out.writeLong(getBlockSize());
  out.writeLong(getModificationTime());
  out.writeLong(getAccessTime());
  getPermission().write(out);
  Text.writeString(out, getOwner(), Text.DEFAULT_MAX_LEN);
  Text.writeString(out, getGroup(), Text.DEFAULT_MAX_LEN);
  out.writeBoolean(isSymlink());
  if (isSymlink()) {
    Text.writeString(out, getSymlink().toString(), Text.DEFAULT_MAX_LEN);
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:18,代码来源:FileStatus.java

示例3: write

import org.apache.hadoop.io.Text; //导入方法依赖的package包/类
@Override
/** {@inheritDoc} */
public void write(DataOutput output) throws IOException {

  output.writeInt(splitId);

  if (this.oracleDataChunks == null) {
    output.writeInt(0);
  } else {
    output.writeInt(this.oracleDataChunks.size());
    for (OraOopOracleDataChunk dataChunk : this.oracleDataChunks) {
      Text.writeString(output, dataChunk.getClass().getName());
      dataChunk.write(output);
    }
  }
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:17,代码来源:OraOopDBInputSplit.java

示例4: write

import org.apache.hadoop.io.Text; //导入方法依赖的package包/类
public void write(DataOutput out) throws IOException {
  Text.writeString(out, jobFile);
  taskId.write(out);
  out.writeInt(partition);
  out.writeInt(numSlotsRequired);
  taskStatus.write(out);
  skipRanges.write(out);
  out.writeBoolean(skipping);
  out.writeBoolean(jobCleanup);
  if (jobCleanup) {
    WritableUtils.writeEnum(out, jobRunStateForCleanup);
  }
  out.writeBoolean(jobSetup);
  out.writeBoolean(writeSkipRecs);
  out.writeBoolean(taskCleanup);
  Text.writeString(out, user);
  out.writeInt(encryptedSpillKey.length);
  extraData.write(out);
  out.write(encryptedSpillKey);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:Task.java

示例5: writeHeaderAndWrapStream

import org.apache.hadoop.io.Text; //导入方法依赖的package包/类
/**
 * Write out a header to the given stream that indicates the chosen
 * compression codec, and return the same stream wrapped with that codec.
 * If no codec is specified, simply adds buffering to the stream, so that
 * the returned stream is always buffered.
 * 
 * @param os The stream to write header to and wrap. This stream should
 * be unbuffered.
 * @return A stream wrapped with the specified compressor, or buffering
 * if compression is not enabled.
 * @throws IOException if an IO error occurs or the compressor cannot be
 * instantiated
 */
DataOutputStream writeHeaderAndWrapStream(OutputStream os)
throws IOException {
  DataOutputStream dos = new DataOutputStream(os);

  dos.writeBoolean(imageCodec != null);

  if (imageCodec != null) {
    String codecClassName = imageCodec.getClass().getCanonicalName();
    Text.writeString(dos, codecClassName);

    return new DataOutputStream(imageCodec.createOutputStream(os));
  } else {
    // use a buffered output stream
    return new DataOutputStream(new BufferedOutputStream(os));
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:FSImageCompression.java

示例6: write

import org.apache.hadoop.io.Text; //导入方法依赖的package包/类
@Override
/**
 * {@inheritDoc}
 */
public void write(DataOutput out) throws IOException {
  out.writeBoolean(this.isNew);
  Text.writeString(out, this.sqoopRecord.getClass().getName());
  this.sqoopRecord.write(out);
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:10,代码来源:MergeRecord.java

示例7: writeClass

import org.apache.hadoop.io.Text; //导入方法依赖的package包/类
/** Writes the encoded class code as defined in CLASS_TO_CODE, or
 * the whole class name if not defined in the mapping.
 */
static void writeClass(DataOutput out, Class<?> c) throws IOException {
  Integer code = CLASS_TO_CODE.get(c);
  if (code == null) {
    WritableUtils.writeVInt(out, NOT_ENCODED);
    Text.writeString(out, c.getName());
  } else {
    WritableUtils.writeVInt(out, code);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:13,代码来源:HbaseObjectWritableFor96Migration.java

示例8: write

import org.apache.hadoop.io.Text; //导入方法依赖的package包/类
public void write(DataOutput out) throws IOException {
  WritableUtils.writeVInt(out, locations.length);
  for (int i = 0; i < locations.length; i++) {
    Text.writeString(out, locations[i]);
  }
  WritableUtils.writeVLong(out, startOffset);
  WritableUtils.writeVLong(out, inputDataLength);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:JobSplit.java

示例9: write

import org.apache.hadoop.io.Text; //导入方法依赖的package包/类
@Override
public void write(DataOutput out) throws IOException {
  out.writeLong(bytes);
  out.writeInt(nLoc);
  for (int i = 0; i < nLoc; ++i) {
    Text.writeString(out, locations[i]);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:GenerateData.java

示例10: write

import org.apache.hadoop.io.Text; //导入方法依赖的package包/类
private void write(DataOutput out) throws IOException {
  WritableUtils.writeVInt(out, values.length);
  WritableUtils.writeVLong(out, written);
  for (int i = 0; i < values.length; ++i) {
    Text.writeString(out, values[i].getClass().getName());
  }
  for (int i = 0; i < values.length; ++i) {
    if (has(i)) {
      values[i].write(out);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:TestTupleWritable.java

示例11: write

import org.apache.hadoop.io.Text; //导入方法依赖的package包/类
@Override
public void write(DataOutput out) throws IOException {
  Text.writeString(out, path);

  final boolean dir = isDir();
  out.writeBoolean(dir);
  if (dir) {
    out.writeInt(children.length);
    for(String c : children) {
      Text.writeString(out, c);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:14,代码来源:HadoopArchives.java

示例12: write

import org.apache.hadoop.io.Text; //导入方法依赖的package包/类
@Override
public void write(DataOutput out) throws IOException {
  Text.writeString(out, name);
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:5,代码来源:WALEntry.java

示例13: write

import org.apache.hadoop.io.Text; //导入方法依赖的package包/类
@Override
public void write(DataOutput out) throws IOException {
  Text.writeString(out, (protocol == null) ? "" : protocol);
}
 
开发者ID:spafka,项目名称:spark_deep,代码行数:5,代码来源:ConnectionHeader.java

示例14: write

import org.apache.hadoop.io.Text; //导入方法依赖的package包/类
@Override
public void write(DataOutput output) throws IOException {
  Text.writeString(output, this.getId());
  output.writeBoolean(this.isSubPartition);
  output.writeLong(this.blocks);
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:7,代码来源:OraOopOracleDataChunkPartition.java

示例15: write

import org.apache.hadoop.io.Text; //导入方法依赖的package包/类
public void write(DataOutput out) throws IOException {
  Text.writeString(out, mapId);
  WritableUtils.writeVLong(out, compressedLength);
  WritableUtils.writeVLong(out, uncompressedLength);
  WritableUtils.writeVInt(out, forReduce);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:7,代码来源:ShuffleHeader.java


注:本文中的org.apache.hadoop.io.Text.writeString方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。