本文整理汇总了Java中org.apache.hadoop.util.StringInterner.weakIntern方法的典型用法代码示例。如果您正苦于以下问题:Java StringInterner.weakIntern方法的具体用法?Java StringInterner.weakIntern怎么用?Java StringInterner.weakIntern使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.util.StringInterner
的用法示例。
在下文中一共展示了StringInterner.weakIntern方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1:
import org.apache.hadoop.util.StringInterner; //导入方法依赖的package包/类
private void handleReduceAttemptFinishedEvent
(ReduceAttemptFinishedEvent event) {
TaskInfo taskInfo = info.tasksMap.get(event.getTaskId());
TaskAttemptInfo attemptInfo =
taskInfo.attemptsMap.get(event.getAttemptId());
attemptInfo.finishTime = event.getFinishTime();
attemptInfo.status = StringInterner.weakIntern(event.getTaskStatus());
attemptInfo.state = StringInterner.weakIntern(event.getState());
attemptInfo.shuffleFinishTime = event.getShuffleFinishTime();
attemptInfo.sortFinishTime = event.getSortFinishTime();
attemptInfo.counters = event.getCounters();
attemptInfo.hostname = StringInterner.weakIntern(event.getHostname());
attemptInfo.port = event.getPort();
attemptInfo.rackname = StringInterner.weakIntern(event.getRackName());
info.completedTaskAttemptsMap.put(event.getAttemptId(), attemptInfo);
}
示例2: readFields
import org.apache.hadoop.util.StringInterner; //导入方法依赖的package包/类
public void readFields(DataInput in) throws IOException {
this.taskid.readFields(in);
this.progress = in.readFloat();
this.state = StringInterner.weakIntern(Text.readString(in));
this.startTime = in.readLong();
this.finishTime = in.readLong();
diagnostics = WritableUtils.readStringArray(in);
counters = new Counters();
counters.readFields(in);
currentStatus = WritableUtils.readEnum(in, TIPStatus.class);
if (currentStatus == TIPStatus.RUNNING) {
int num = WritableUtils.readVInt(in);
for (int i = 0; i < num; i++) {
TaskAttemptID t = new TaskAttemptID();
t.readFields(in);
runningAttempts.add(t);
}
} else if (currentStatus == TIPStatus.COMPLETE) {
successfulAttempt.readFields(in);
}
}
示例3: readFields
import org.apache.hadoop.util.StringInterner; //导入方法依赖的package包/类
@Override
public void readFields(DataInput in) throws IOException {
queueName = StringInterner.weakIntern(Text.readString(in));
queueState = WritableUtils.readEnum(in, QueueState.class);
schedulingInfo = StringInterner.weakIntern(Text.readString(in));
int length = in.readInt();
stats = new JobStatus[length];
for (int i = 0; i < length; i++) {
stats[i] = new JobStatus();
stats[i].readFields(in);
}
int count = in.readInt();
children.clear();
for (int i = 0; i < count; i++) {
QueueInfo childQueueInfo = new QueueInfo();
childQueueInfo.readFields(in);
children.add(childQueueInfo);
}
}
示例4: reportDiagnosticInfo
import org.apache.hadoop.util.StringInterner; //导入方法依赖的package包/类
@Override
public void reportDiagnosticInfo(TaskAttemptID taskAttemptID, String diagnosticInfo)
throws IOException {
diagnosticInfo = StringInterner.weakIntern(diagnosticInfo);
LOG.info("Diagnostics report from " + taskAttemptID.toString() + ": "
+ diagnosticInfo);
org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID =
TypeConverter.toYarn(taskAttemptID);
taskHeartbeatHandler.progressing(attemptID);
// This is mainly used for cases where we want to propagate exception traces
// of tasks that fail.
// This call exists as a hadoop mapreduce legacy wherein all changes in
// counters/progress/phase/output-size are reported through statusUpdate()
// call but not diagnosticInformation.
context.getEventHandler().handle(
new TaskAttemptDiagnosticsUpdateEvent(attemptID, diagnosticInfo));
}
示例5: processFinishedContainer
import org.apache.hadoop.util.StringInterner; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
@VisibleForTesting
void processFinishedContainer(ContainerStatus container) {
LOG.info("Received completed container " + container.getContainerId());
TaskAttemptId attemptID = assignedRequests.get(container.getContainerId());
if (attemptID == null) {
LOG.error("Container complete event for unknown container "
+ container.getContainerId());
} else {
pendingRelease.remove(container.getContainerId());
assignedRequests.remove(attemptID);
// Send the diagnostics
String diagnostic = StringInterner.weakIntern(container.getDiagnostics());
eventHandler.handle(new TaskAttemptDiagnosticsUpdateEvent(attemptID,
diagnostic));
// send the container completed event to Task attempt
eventHandler.handle(createContainerFinishedEvent(container, attemptID));
}
}
示例6: getSplitDetails
import org.apache.hadoop.util.StringInterner; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
private static <T> T getSplitDetails(FSDataInputStream inFile, long offset, Configuration configuration)
throws IOException {
inFile.seek(offset);
String className = StringInterner.weakIntern(Text.readString(inFile));
Class<T> cls;
try {
cls = (Class<T>) configuration.getClassByName(className);
} catch (ClassNotFoundException ce) {
IOException wrap = new IOException("Split class " + className +
" not found");
wrap.initCause(ce);
throw wrap;
}
SerializationFactory factory = new SerializationFactory(configuration);
Deserializer<T> deserializer =
(Deserializer<T>) factory.getDeserializer(cls);
deserializer.open(inFile);
T split = deserializer.deserialize(null);
return split;
}
示例7: readFields
import org.apache.hadoop.util.StringInterner; //导入方法依赖的package包/类
public void readFields(DataInput in) throws IOException {
jobFile = StringInterner.weakIntern(Text.readString(in));
taskId = TaskAttemptID.read(in);
partition = in.readInt();
numSlotsRequired = in.readInt();
taskStatus.readFields(in);
skipRanges.readFields(in);
currentRecIndexIterator = skipRanges.skipRangeIterator();
currentRecStartIndex = currentRecIndexIterator.next();
skipping = in.readBoolean();
jobCleanup = in.readBoolean();
if (jobCleanup) {
jobRunStateForCleanup =
WritableUtils.readEnum(in, JobStatus.State.class);
}
jobSetup = in.readBoolean();
writeSkipRecs = in.readBoolean();
taskCleanup = in.readBoolean();
if (taskCleanup) {
setPhase(TaskStatus.Phase.CLEANUP);
}
user = StringInterner.weakIntern(Text.readString(in));
extraData.readFields(in);
}
示例8: handleFinishContainers
import org.apache.hadoop.util.StringInterner; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
private void handleFinishContainers(List<ContainerStatus> finishedContainers) {
for (ContainerStatus cont : finishedContainers) {
LOG.info("Received completed container:" + cont);
Id id = assignedContainerToIDMap.get(cont.getContainerId());
if (id == null) {
LOG.error("Container complete event for unknown container id " + cont.getContainerId());
} else {
assignedContainerToIDMap.remove(cont.getContainerId());
idToContainerMap.remove(id);
//dispatch container exit message to corresponding components
String diagnostics = StringInterner.weakIntern(cont.getDiagnostics());
if (id instanceof PSAttemptId) {
context.getEventHandler().handle(
new PSAttemptDiagnosticsUpdateEvent(diagnostics, (PSAttemptId) id));
context.getEventHandler().handle(createContainerFinishedEvent(cont, (PSAttemptId) id));
} else if (id instanceof PSAgentAttemptId) {
context.getEventHandler().handle(
new PSAgentAttemptDiagnosticsUpdateEvent((PSAgentAttemptId) id, diagnostics));
context.getEventHandler().handle(
createContainerFinishedEvent(cont, (PSAgentAttemptId) id));
} else if(id instanceof WorkerAttemptId){
context.getEventHandler().handle(
new WorkerAttemptDiagnosticsUpdateEvent((WorkerAttemptId) id, diagnostics));
context.getEventHandler().handle(
createContainerFinishedEvent(cont, (WorkerAttemptId) id));
}
}
}
}
示例9: handleJobFailedEvent
import org.apache.hadoop.util.StringInterner; //导入方法依赖的package包/类
private void handleJobFailedEvent(JobUnsuccessfulCompletionEvent event) {
info.finishTime = event.getFinishTime();
info.finishedMaps = event.getFinishedMaps();
info.finishedReduces = event.getFinishedReduces();
info.jobStatus = StringInterner.weakIntern(event.getStatus());
info.errorInfo = StringInterner.weakIntern(event.getDiagnostics());
}
示例10: handleTaskAttemptFinishedEvent
import org.apache.hadoop.util.StringInterner; //导入方法依赖的package包/类
private void handleTaskAttemptFinishedEvent(TaskAttemptFinishedEvent event) {
TaskInfo taskInfo = info.tasksMap.get(event.getTaskId());
TaskAttemptInfo attemptInfo =
taskInfo.attemptsMap.get(event.getAttemptId());
attemptInfo.finishTime = event.getFinishTime();
attemptInfo.status = StringInterner.weakIntern(event.getTaskStatus());
attemptInfo.state = StringInterner.weakIntern(event.getState());
attemptInfo.counters = event.getCounters();
attemptInfo.hostname = StringInterner.weakIntern(event.getHostname());
info.completedTaskAttemptsMap.put(event.getAttemptId(), attemptInfo);
}
示例11: handleAMStartedEvent
import org.apache.hadoop.util.StringInterner; //导入方法依赖的package包/类
private void handleAMStartedEvent(AMStartedEvent event) {
AMInfo amInfo = new AMInfo();
amInfo.appAttemptId = event.getAppAttemptId();
amInfo.startTime = event.getStartTime();
amInfo.containerId = event.getContainerId();
amInfo.nodeManagerHost = StringInterner.weakIntern(event.getNodeManagerHost());
amInfo.nodeManagerPort = event.getNodeManagerPort();
amInfo.nodeManagerHttpPort = event.getNodeManagerHttpPort();
if (info.amInfos == null) {
info.amInfos = new LinkedList<AMInfo>();
}
info.amInfos.add(amInfo);
info.latestAmInfo = amInfo;
}
示例12: handleTaskFailedEvent
import org.apache.hadoop.util.StringInterner; //导入方法依赖的package包/类
private void handleTaskFailedEvent(TaskFailedEvent event) {
TaskInfo taskInfo = info.tasksMap.get(event.getTaskId());
taskInfo.status = TaskStatus.State.FAILED.toString();
taskInfo.finishTime = event.getFinishTime();
taskInfo.error = StringInterner.weakIntern(event.getError());
taskInfo.failedDueToAttemptId = event.getFailedAttemptID();
taskInfo.counters = event.getCounters();
}
示例13: getAssignedContainerMgrAddress
import org.apache.hadoop.util.StringInterner; //导入方法依赖的package包/类
@Override
public String getAssignedContainerMgrAddress() {
readLock.lock();
try {
return container == null ? null : StringInterner.weakIntern(container
.getNodeId().toString());
} finally {
readLock.unlock();
}
}
示例14: ContainerRemoteLaunchEvent
import org.apache.hadoop.util.StringInterner; //导入方法依赖的package包/类
public ContainerRemoteLaunchEvent(TaskAttemptId taskAttemptID,
ContainerLaunchContext containerLaunchContext,
Container allocatedContainer, Task remoteTask) {
super(taskAttemptID, allocatedContainer.getId(), StringInterner
.weakIntern(allocatedContainer.getNodeId().toString()),
allocatedContainer.getContainerToken(),
ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH);
this.allocatedContainer = allocatedContainer;
this.containerLaunchContext = containerLaunchContext;
this.task = remoteTask;
}
示例15: readFields
import org.apache.hadoop.util.StringInterner; //导入方法依赖的package包/类
public void readFields(DataInput in) throws IOException {
jobFile = StringInterner.weakIntern(Text.readString(in));
taskId = TaskAttemptID.read(in);
partition = in.readInt();
numSlotsRequired = in.readInt();
taskStatus.readFields(in);
skipRanges.readFields(in);
currentRecIndexIterator = skipRanges.skipRangeIterator();
currentRecStartIndex = currentRecIndexIterator.next();
skipping = in.readBoolean();
jobCleanup = in.readBoolean();
if (jobCleanup) {
jobRunStateForCleanup =
WritableUtils.readEnum(in, JobStatus.State.class);
}
jobSetup = in.readBoolean();
writeSkipRecs = in.readBoolean();
taskCleanup = in.readBoolean();
if (taskCleanup) {
setPhase(TaskStatus.Phase.CLEANUP);
}
user = StringInterner.weakIntern(Text.readString(in));
int len = in.readInt();
encryptedSpillKey = new byte[len];
extraData.readFields(in);
in.readFully(encryptedSpillKey);
}