本文整理汇总了Java中org.apache.hadoop.io.Text.writeString方法的典型用法代码示例。如果您正苦于以下问题:Java Text.writeString方法的具体用法?Java Text.writeString怎么用?Java Text.writeString使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.io.Text
的用法示例。
在下文中一共展示了Text.writeString方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: write
import org.apache.hadoop.io.Text; //导入方法依赖的package包/类
public synchronized void write(DataOutput out) throws IOException {
jobid.write(out);
out.writeFloat(setupProgress);
out.writeFloat(mapProgress);
out.writeFloat(reduceProgress);
out.writeFloat(cleanupProgress);
WritableUtils.writeEnum(out, runState);
out.writeLong(startTime);
Text.writeString(out, user);
WritableUtils.writeEnum(out, priority);
Text.writeString(out, schedulingInfo);
out.writeLong(finishTime);
out.writeBoolean(isRetired);
Text.writeString(out, historyFile);
Text.writeString(out, jobName);
Text.writeString(out, trackingUrl);
Text.writeString(out, jobFile);
out.writeBoolean(isUber);
// Serialize the job's ACLs
out.writeInt(jobACLs.size());
for (Entry<JobACL, AccessControlList> entry : jobACLs.entrySet()) {
WritableUtils.writeEnum(out, entry.getKey());
entry.getValue().write(out);
}
}
示例2: write
import org.apache.hadoop.io.Text; //导入方法依赖的package包/类
@Override
public void write(DataOutput out) throws IOException {
Text.writeString(out, getPath().toString(), Text.DEFAULT_MAX_LEN);
out.writeLong(getLen());
out.writeBoolean(isDirectory());
out.writeShort(getReplication());
out.writeLong(getBlockSize());
out.writeLong(getModificationTime());
out.writeLong(getAccessTime());
getPermission().write(out);
Text.writeString(out, getOwner(), Text.DEFAULT_MAX_LEN);
Text.writeString(out, getGroup(), Text.DEFAULT_MAX_LEN);
out.writeBoolean(isSymlink());
if (isSymlink()) {
Text.writeString(out, getSymlink().toString(), Text.DEFAULT_MAX_LEN);
}
}
示例3: write
import org.apache.hadoop.io.Text; //导入方法依赖的package包/类
@Override
/** {@inheritDoc} */
public void write(DataOutput output) throws IOException {
output.writeInt(splitId);
if (this.oracleDataChunks == null) {
output.writeInt(0);
} else {
output.writeInt(this.oracleDataChunks.size());
for (OraOopOracleDataChunk dataChunk : this.oracleDataChunks) {
Text.writeString(output, dataChunk.getClass().getName());
dataChunk.write(output);
}
}
}
示例4: write
import org.apache.hadoop.io.Text; //导入方法依赖的package包/类
public void write(DataOutput out) throws IOException {
Text.writeString(out, jobFile);
taskId.write(out);
out.writeInt(partition);
out.writeInt(numSlotsRequired);
taskStatus.write(out);
skipRanges.write(out);
out.writeBoolean(skipping);
out.writeBoolean(jobCleanup);
if (jobCleanup) {
WritableUtils.writeEnum(out, jobRunStateForCleanup);
}
out.writeBoolean(jobSetup);
out.writeBoolean(writeSkipRecs);
out.writeBoolean(taskCleanup);
Text.writeString(out, user);
out.writeInt(encryptedSpillKey.length);
extraData.write(out);
out.write(encryptedSpillKey);
}
示例5: writeHeaderAndWrapStream
import org.apache.hadoop.io.Text; //导入方法依赖的package包/类
/**
* Write out a header to the given stream that indicates the chosen
* compression codec, and return the same stream wrapped with that codec.
* If no codec is specified, simply adds buffering to the stream, so that
* the returned stream is always buffered.
*
* @param os The stream to write header to and wrap. This stream should
* be unbuffered.
* @return A stream wrapped with the specified compressor, or buffering
* if compression is not enabled.
* @throws IOException if an IO error occurs or the compressor cannot be
* instantiated
*/
DataOutputStream writeHeaderAndWrapStream(OutputStream os)
throws IOException {
DataOutputStream dos = new DataOutputStream(os);
dos.writeBoolean(imageCodec != null);
if (imageCodec != null) {
String codecClassName = imageCodec.getClass().getCanonicalName();
Text.writeString(dos, codecClassName);
return new DataOutputStream(imageCodec.createOutputStream(os));
} else {
// use a buffered output stream
return new DataOutputStream(new BufferedOutputStream(os));
}
}
示例6: write
import org.apache.hadoop.io.Text; //导入方法依赖的package包/类
@Override
/**
* {@inheritDoc}
*/
public void write(DataOutput out) throws IOException {
out.writeBoolean(this.isNew);
Text.writeString(out, this.sqoopRecord.getClass().getName());
this.sqoopRecord.write(out);
}
示例7: writeClass
import org.apache.hadoop.io.Text; //导入方法依赖的package包/类
/** Writes the encoded class code as defined in CLASS_TO_CODE, or
* the whole class name if not defined in the mapping.
*/
static void writeClass(DataOutput out, Class<?> c) throws IOException {
Integer code = CLASS_TO_CODE.get(c);
if (code == null) {
WritableUtils.writeVInt(out, NOT_ENCODED);
Text.writeString(out, c.getName());
} else {
WritableUtils.writeVInt(out, code);
}
}
示例8: write
import org.apache.hadoop.io.Text; //导入方法依赖的package包/类
public void write(DataOutput out) throws IOException {
WritableUtils.writeVInt(out, locations.length);
for (int i = 0; i < locations.length; i++) {
Text.writeString(out, locations[i]);
}
WritableUtils.writeVLong(out, startOffset);
WritableUtils.writeVLong(out, inputDataLength);
}
示例9: write
import org.apache.hadoop.io.Text; //导入方法依赖的package包/类
@Override
public void write(DataOutput out) throws IOException {
out.writeLong(bytes);
out.writeInt(nLoc);
for (int i = 0; i < nLoc; ++i) {
Text.writeString(out, locations[i]);
}
}
示例10: write
import org.apache.hadoop.io.Text; //导入方法依赖的package包/类
private void write(DataOutput out) throws IOException {
WritableUtils.writeVInt(out, values.length);
WritableUtils.writeVLong(out, written);
for (int i = 0; i < values.length; ++i) {
Text.writeString(out, values[i].getClass().getName());
}
for (int i = 0; i < values.length; ++i) {
if (has(i)) {
values[i].write(out);
}
}
}
示例11: write
import org.apache.hadoop.io.Text; //导入方法依赖的package包/类
@Override
public void write(DataOutput out) throws IOException {
Text.writeString(out, path);
final boolean dir = isDir();
out.writeBoolean(dir);
if (dir) {
out.writeInt(children.length);
for(String c : children) {
Text.writeString(out, c);
}
}
}
示例12: write
import org.apache.hadoop.io.Text; //导入方法依赖的package包/类
@Override
public void write(DataOutput out) throws IOException {
Text.writeString(out, name);
}
示例13: write
import org.apache.hadoop.io.Text; //导入方法依赖的package包/类
@Override
public void write(DataOutput out) throws IOException {
Text.writeString(out, (protocol == null) ? "" : protocol);
}
示例14: write
import org.apache.hadoop.io.Text; //导入方法依赖的package包/类
@Override
public void write(DataOutput output) throws IOException {
Text.writeString(output, this.getId());
output.writeBoolean(this.isSubPartition);
output.writeLong(this.blocks);
}
示例15: write
import org.apache.hadoop.io.Text; //导入方法依赖的package包/类
public void write(DataOutput out) throws IOException {
Text.writeString(out, mapId);
WritableUtils.writeVLong(out, compressedLength);
WritableUtils.writeVLong(out, uncompressedLength);
WritableUtils.writeVInt(out, forReduce);
}