本文整理汇总了Java中org.apache.hadoop.io.WritableUtils.readEnum方法的典型用法代码示例。如果您正苦于以下问题:Java WritableUtils.readEnum方法的具体用法?Java WritableUtils.readEnum怎么用?Java WritableUtils.readEnum使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.io.WritableUtils
的用法示例。
在下文中一共展示了WritableUtils.readEnum方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: readFields
import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
public void readFields(DataInput in) throws IOException {
this.taskid.readFields(in);
setProgress(in.readFloat());
this.numSlots = in.readInt();
this.runState = WritableUtils.readEnum(in, State.class);
setDiagnosticInfo(StringInterner.weakIntern(Text.readString(in)));
setStateString(StringInterner.weakIntern(Text.readString(in)));
this.phase = WritableUtils.readEnum(in, Phase.class);
this.startTime = in.readLong();
this.finishTime = in.readLong();
counters = new Counters();
this.includeAllCounters = in.readBoolean();
this.outputSize = in.readLong();
counters.readFields(in);
nextRecordRange.readFields(in);
}
示例2: readFields
import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
public void readFields(DataInput in) throws IOException {
this.taskid.readFields(in);
this.progress = in.readFloat();
this.state = StringInterner.weakIntern(Text.readString(in));
this.startTime = in.readLong();
this.finishTime = in.readLong();
diagnostics = WritableUtils.readStringArray(in);
counters = new Counters();
counters.readFields(in);
currentStatus = WritableUtils.readEnum(in, TIPStatus.class);
if (currentStatus == TIPStatus.RUNNING) {
int num = WritableUtils.readVInt(in);
for (int i = 0; i < num; i++) {
TaskAttemptID t = new TaskAttemptID();
t.readFields(in);
runningAttempts.add(t);
}
} else if (currentStatus == TIPStatus.COMPLETE) {
successfulAttempt.readFields(in);
}
}
示例3: readFields
import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
@Override
public void readFields(DataInput in) throws IOException {
queueName = StringInterner.weakIntern(Text.readString(in));
queueState = WritableUtils.readEnum(in, QueueState.class);
schedulingInfo = StringInterner.weakIntern(Text.readString(in));
int length = in.readInt();
stats = new JobStatus[length];
for (int i = 0; i < length; i++) {
stats[i] = new JobStatus();
stats[i].readFields(in);
}
int count = in.readInt();
children.clear();
for (int i = 0; i < count; i++) {
QueueInfo childQueueInfo = new QueueInfo();
childQueueInfo.readFields(in);
children.add(childQueueInfo);
}
}
示例4: readFields
import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
@Override
public void readFields(DataInput in) throws IOException {
column = Bytes.readByteArray(in);
dataType = WritableUtils.readEnum(in, DataType.class);
if (in.readBoolean()) {
startType = WritableUtils.readEnum(in, CompareOp.class);
startValue = Bytes.readByteArray(in);
} else {
startType = CompareOp.NO_OP;
startValue = null;
}
if (in.readBoolean()) {
stopType = WritableUtils.readEnum(in, CompareOp.class);
stopValue = Bytes.readByteArray(in);
} else {
stopType = CompareOp.NO_OP;
stopValue = null;
}
}
示例5: readFields
import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
@Override
@InterfaceAudience.Private
public void readFields(DataInput in) throws IOException {
blockSize = in.readLong();
bytesPerChecksum = in.readInt();
writePacketSize = in.readInt();
replication = in.readShort();
fileBufferSize = in.readInt();
checksumType = WritableUtils.readEnum(in, DataChecksum.Type.class);
}
示例6: readFields
import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
public void readFields(DataInput in) throws IOException {
jobFile = StringInterner.weakIntern(Text.readString(in));
taskId = TaskAttemptID.read(in);
partition = in.readInt();
numSlotsRequired = in.readInt();
taskStatus.readFields(in);
skipRanges.readFields(in);
currentRecIndexIterator = skipRanges.skipRangeIterator();
currentRecStartIndex = currentRecIndexIterator.next();
skipping = in.readBoolean();
jobCleanup = in.readBoolean();
if (jobCleanup) {
jobRunStateForCleanup =
WritableUtils.readEnum(in, JobStatus.State.class);
}
jobSetup = in.readBoolean();
writeSkipRecs = in.readBoolean();
taskCleanup = in.readBoolean();
if (taskCleanup) {
setPhase(TaskStatus.Phase.CLEANUP);
}
user = StringInterner.weakIntern(Text.readString(in));
int len = in.readInt();
encryptedSpillKey = new byte[len];
extraData.readFields(in);
in.readFully(encryptedSpillKey);
}
示例7: readFields
import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
public void readFields(DataInput in) throws IOException {
numActiveTrackers = in.readInt();
int numTrackerNames = in.readInt();
if (numTrackerNames > 0) {
for (int i = 0; i < numTrackerNames; i++) {
String name = StringInterner.weakIntern(Text.readString(in));
activeTrackers.add(name);
}
}
numBlacklistedTrackers = in.readInt();
int blackListTrackerInfoSize = in.readInt();
if(blackListTrackerInfoSize > 0) {
for (int i = 0; i < blackListTrackerInfoSize; i++) {
BlackListInfo info = new BlackListInfo();
info.readFields(in);
blacklistedTrackersInfo.add(info);
}
}
numExcludedNodes = in.readInt();
ttExpiryInterval = in.readLong();
map_tasks = in.readInt();
reduce_tasks = in.readInt();
max_map_tasks = in.readInt();
max_reduce_tasks = in.readInt();
status = WritableUtils.readEnum(in, JobTrackerStatus.class);
grayListedTrackers = in.readInt();
}
示例8: readFields
import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
public synchronized void readFields(DataInput in) throws IOException {
this.jobid = new JobID();
this.jobid.readFields(in);
this.setupProgress = in.readFloat();
this.mapProgress = in.readFloat();
this.reduceProgress = in.readFloat();
this.cleanupProgress = in.readFloat();
this.runState = WritableUtils.readEnum(in, State.class);
this.startTime = in.readLong();
this.user = StringInterner.weakIntern(Text.readString(in));
this.priority = WritableUtils.readEnum(in, JobPriority.class);
this.schedulingInfo = StringInterner.weakIntern(Text.readString(in));
this.finishTime = in.readLong();
this.isRetired = in.readBoolean();
this.historyFile = StringInterner.weakIntern(Text.readString(in));
this.jobName = StringInterner.weakIntern(Text.readString(in));
this.trackingUrl = StringInterner.weakIntern(Text.readString(in));
this.jobFile = StringInterner.weakIntern(Text.readString(in));
this.isUber = in.readBoolean();
// De-serialize the job's ACLs
int numACLs = in.readInt();
for (int i = 0; i < numACLs; i++) {
JobACL aclType = WritableUtils.readEnum(in, JobACL.class);
AccessControlList acl = new AccessControlList(" ");
acl.readFields(in);
this.jobACLs.put(aclType, acl);
}
}
示例9: readFields
import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
public void readFields(DataInput in) throws IOException {
taskId.readFields(in);
idWithinJob = WritableUtils.readVInt(in);
isMap = in.readBoolean();
status = WritableUtils.readEnum(in, Status.class);
taskTrackerHttp = WritableUtils.readString(in);
taskRunTime = WritableUtils.readVInt(in);
eventId = WritableUtils.readVInt(in);
}
示例10: readFields
import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
@Override
public void readFields(DataInput dataInput) throws IOException {
super.readFields(dataInput);
qualifier = Bytes.readByteArray(dataInput);
type = WritableUtils.readEnum(dataInput, DataType.class);
this.offset = dataInput.readInt();
this.length = dataInput.readInt();
}
示例11: readFields
import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
@Override
public void readFields(DataInput in) throws IOException {
this.value.readFields(in);
this.kpi = WritableUtils.readEnum(in, KpiType.class);
}
示例12: readFields
import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
@Override
public void readFields(DataInput in) throws IOException {
super.readFields(in);
jobId.readFields(in);
type = WritableUtils.readEnum(in, TaskType.class);
}
示例13: processINodesUC
import org.apache.hadoop.io.WritableUtils; //导入方法依赖的package包/类
/**
* Process the INodes under construction section of the fsimage.
*
* @param in DataInputStream to process
* @param v Visitor to walk over inodes
* @param skipBlocks Walk over each block?
*/
private void processINodesUC(DataInputStream in, ImageVisitor v,
boolean skipBlocks) throws IOException {
int numINUC = in.readInt();
v.visitEnclosingElement(ImageElement.INODES_UNDER_CONSTRUCTION,
ImageElement.NUM_INODES_UNDER_CONSTRUCTION, numINUC);
for(int i = 0; i < numINUC; i++) {
v.visitEnclosingElement(ImageElement.INODE_UNDER_CONSTRUCTION);
byte [] name = FSImageSerialization.readBytes(in);
String n = new String(name, "UTF8");
v.visit(ImageElement.INODE_PATH, n);
if (NameNodeLayoutVersion.supports(Feature.ADD_INODE_ID, imageVersion)) {
long inodeId = in.readLong();
v.visit(ImageElement.INODE_ID, inodeId);
}
v.visit(ImageElement.REPLICATION, in.readShort());
v.visit(ImageElement.MODIFICATION_TIME, formatDate(in.readLong()));
v.visit(ImageElement.PREFERRED_BLOCK_SIZE, in.readLong());
int numBlocks = in.readInt();
processBlocks(in, v, numBlocks, skipBlocks);
processPermission(in, v);
v.visit(ImageElement.CLIENT_NAME, FSImageSerialization.readString(in));
v.visit(ImageElement.CLIENT_MACHINE, FSImageSerialization.readString(in));
// Skip over the datanode descriptors, which are still stored in the
// file but are not used by the datanode or loaded into memory
int numLocs = in.readInt();
for(int j = 0; j < numLocs; j++) {
in.readShort();
in.readLong();
in.readLong();
in.readLong();
in.readInt();
FSImageSerialization.readString(in);
FSImageSerialization.readString(in);
WritableUtils.readEnum(in, AdminStates.class);
}
v.leaveEnclosingElement(); // INodeUnderConstruction
}
v.leaveEnclosingElement(); // INodesUnderConstruction
}