本文整理汇总了Java中org.apache.hadoop.util.StringInterner类的典型用法代码示例。如果您正苦于以下问题:Java StringInterner类的具体用法?Java StringInterner怎么用?Java StringInterner使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
StringInterner类属于org.apache.hadoop.util包,在下文中一共展示了StringInterner类的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: cleanContainer
import org.apache.hadoop.util.StringInterner; //导入依赖的package包/类
@SuppressWarnings("unchecked")
private void cleanContainer() {
AngelDeployMode deployMode = context.getDeployMode();
ContainerLauncherEvent launchEvent = null;
if (deployMode == AngelDeployMode.LOCAL) {
launchEvent =
new LocalContainerLauncherEvent(ContainerLauncherEventType.CONTAINER_REMOTE_CLEANUP, id);
} else {
launchEvent =
new YarnContainerLauncherEvent(id, container.getId(), StringInterner.weakIntern(container
.getNodeId().toString()), container.getContainerToken(),
ContainerLauncherEventType.CONTAINER_REMOTE_CLEANUP);
}
context.getEventHandler().handle(launchEvent);
}
示例2: reportDiagnosticInfo
import org.apache.hadoop.util.StringInterner; //导入依赖的package包/类
@Override
public void reportDiagnosticInfo(TaskAttemptID taskAttemptID, String diagnosticInfo)
throws IOException {
diagnosticInfo = StringInterner.weakIntern(diagnosticInfo);
LOG.info("Diagnostics report from " + taskAttemptID.toString() + ": "
+ diagnosticInfo);
org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID =
TypeConverter.toYarn(taskAttemptID);
taskHeartbeatHandler.progressing(attemptID);
// This is mainly used for cases where we want to propagate exception traces
// of tasks that fail.
// This call exists as a hadoop mapreduce legacy wherein all changes in
// counters/progress/phase/output-size are reported through statusUpdate()
// call but not diagnosticInformation.
context.getEventHandler().handle(
new TaskAttemptDiagnosticsUpdateEvent(attemptID, diagnosticInfo));
}
示例3: sendLaunchedEvents
import org.apache.hadoop.util.StringInterner; //导入依赖的package包/类
@SuppressWarnings("unchecked")
private void sendLaunchedEvents() {
JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptId.getTaskId()
.getJobId());
jce.addCounterUpdate(attemptId.getTaskId().getTaskType() == TaskType.MAP ?
JobCounter.TOTAL_LAUNCHED_MAPS : JobCounter.TOTAL_LAUNCHED_REDUCES, 1);
eventHandler.handle(jce);
LOG.info("TaskAttempt: [" + attemptId
+ "] using containerId: [" + container.getId() + " on NM: ["
+ StringInterner.weakIntern(container.getNodeId().toString()) + "]");
TaskAttemptStartedEvent tase =
new TaskAttemptStartedEvent(TypeConverter.fromYarn(attemptId),
TypeConverter.fromYarn(attemptId.getTaskId().getTaskType()),
launchTime, trackerName, httpPort, shufflePort, container.getId(),
locality.toString(), avataar.toString());
eventHandler.handle(
new JobHistoryEvent(attemptId.getTaskId().getJobId(), tase));
}
示例4: transition
import org.apache.hadoop.util.StringInterner; //导入依赖的package包/类
@SuppressWarnings("unchecked")
@Override
public void transition(TaskAttemptImpl taskAttempt,
TaskAttemptEvent event) {
// unregister it to TaskAttemptListener so that it stops listening
// for it
taskAttempt.taskAttemptListener.unregister(
taskAttempt.attemptId, taskAttempt.jvmID);
if (event instanceof TaskAttemptKillEvent) {
taskAttempt.addDiagnosticInfo(
((TaskAttemptKillEvent) event).getMessage());
}
taskAttempt.reportedStatus.progress = 1.0f;
taskAttempt.updateProgressSplits();
//send the cleanup event to containerLauncher
taskAttempt.eventHandler.handle(new ContainerLauncherEvent(
taskAttempt.attemptId,
taskAttempt.container.getId(), StringInterner
.weakIntern(taskAttempt.container.getNodeId().toString()),
taskAttempt.container.getContainerToken(),
ContainerLauncher.EventType.CONTAINER_REMOTE_CLEANUP));
}
示例5: handleTaskAttemptCompletion
import org.apache.hadoop.util.StringInterner; //导入依赖的package包/类
private void handleTaskAttemptCompletion(TaskAttemptId attemptId,
TaskAttemptCompletionEventStatus status) {
TaskAttempt attempt = attempts.get(attemptId);
//raise the completion event only if the container is assigned
// to nextAttemptNumber
if (attempt.getNodeHttpAddress() != null) {
TaskAttemptCompletionEvent tce = recordFactory
.newRecordInstance(TaskAttemptCompletionEvent.class);
tce.setEventId(-1);
String scheme = (encryptedShuffle) ? "https://" : "http://";
tce.setMapOutputServerAddress(StringInterner.weakIntern(scheme
+ attempt.getNodeHttpAddress().split(":")[0] + ":"
+ attempt.getShufflePort()));
tce.setStatus(status);
tce.setAttemptId(attempt.getID());
int runTime = 0;
if (attempt.getFinishTime() != 0 && attempt.getLaunchTime() !=0)
runTime = (int)(attempt.getFinishTime() - attempt.getLaunchTime());
tce.setAttemptRunTime(runTime);
//raise the event to job so that it adds the completion event to its
//data structures
eventHandler.handle(new JobTaskAttemptCompletedEvent(tce));
}
}
示例6: readFields
import org.apache.hadoop.util.StringInterner; //导入依赖的package包/类
public void readFields(DataInput in) throws IOException {
this.taskid.readFields(in);
setProgress(in.readFloat());
this.numSlots = in.readInt();
this.runState = WritableUtils.readEnum(in, State.class);
setDiagnosticInfo(StringInterner.weakIntern(Text.readString(in)));
setStateString(StringInterner.weakIntern(Text.readString(in)));
this.phase = WritableUtils.readEnum(in, Phase.class);
this.startTime = in.readLong();
this.finishTime = in.readLong();
counters = new Counters();
this.includeAllCounters = in.readBoolean();
this.outputSize = in.readLong();
counters.readFields(in);
nextRecordRange.readFields(in);
}
示例7: readFields
import org.apache.hadoop.util.StringInterner; //导入依赖的package包/类
public void readFields(DataInput in) throws IOException {
this.taskid.readFields(in);
this.progress = in.readFloat();
this.state = StringInterner.weakIntern(Text.readString(in));
this.startTime = in.readLong();
this.finishTime = in.readLong();
diagnostics = WritableUtils.readStringArray(in);
counters = new Counters();
counters.readFields(in);
currentStatus = WritableUtils.readEnum(in, TIPStatus.class);
if (currentStatus == TIPStatus.RUNNING) {
int num = WritableUtils.readVInt(in);
for (int i = 0; i < num; i++) {
TaskAttemptID t = new TaskAttemptID();
t.readFields(in);
runningAttempts.add(t);
}
} else if (currentStatus == TIPStatus.COMPLETE) {
successfulAttempt.readFields(in);
}
}
示例8: readFields
import org.apache.hadoop.util.StringInterner; //导入依赖的package包/类
@Override
public void readFields(DataInput in) throws IOException {
queueName = StringInterner.weakIntern(Text.readString(in));
queueState = WritableUtils.readEnum(in, QueueState.class);
schedulingInfo = StringInterner.weakIntern(Text.readString(in));
int length = in.readInt();
stats = new JobStatus[length];
for (int i = 0; i < length; i++) {
stats[i] = new JobStatus();
stats[i].readFields(in);
}
int count = in.readInt();
children.clear();
for (int i = 0; i < count; i++) {
QueueInfo childQueueInfo = new QueueInfo();
childQueueInfo.readFields(in);
children.add(childQueueInfo);
}
}
示例9:
import org.apache.hadoop.util.StringInterner; //导入依赖的package包/类
private void handleReduceAttemptFinishedEvent
(ReduceAttemptFinishedEvent event) {
TaskInfo taskInfo = info.tasksMap.get(event.getTaskId());
TaskAttemptInfo attemptInfo =
taskInfo.attemptsMap.get(event.getAttemptId());
attemptInfo.finishTime = event.getFinishTime();
attemptInfo.status = StringInterner.weakIntern(event.getTaskStatus());
attemptInfo.state = StringInterner.weakIntern(event.getState());
attemptInfo.shuffleFinishTime = event.getShuffleFinishTime();
attemptInfo.sortFinishTime = event.getSortFinishTime();
attemptInfo.counters = event.getCounters();
attemptInfo.hostname = StringInterner.weakIntern(event.getHostname());
attemptInfo.port = event.getPort();
attemptInfo.rackname = StringInterner.weakIntern(event.getRackName());
info.completedTaskAttemptsMap.put(event.getAttemptId(), attemptInfo);
}
示例10: fromAvro
import org.apache.hadoop.util.StringInterner; //导入依赖的package包/类
static Counters fromAvro(JhCounters counters) {
Counters result = new Counters();
if(counters != null) {
for (JhCounterGroup g : counters.getGroups()) {
CounterGroup group =
result.addGroup(StringInterner.weakIntern(g.getName().toString()),
StringInterner.weakIntern(g.getDisplayName().toString()));
for (JhCounter c : g.getCounts()) {
group.addCounter(StringInterner.weakIntern(c.getName().toString()),
StringInterner.weakIntern(c.getDisplayName().toString()),
c.getValue());
}
}
}
return result;
}
示例11: readFields
import org.apache.hadoop.util.StringInterner; //导入依赖的package包/类
public void readFields(DataInput in) throws IOException {
jobFile = StringInterner.weakIntern(Text.readString(in));
taskId = TaskAttemptID.read(in);
partition = in.readInt();
numSlotsRequired = in.readInt();
taskStatus.readFields(in);
skipRanges.readFields(in);
currentRecIndexIterator = skipRanges.skipRangeIterator();
currentRecStartIndex = currentRecIndexIterator.next();
skipping = in.readBoolean();
jobCleanup = in.readBoolean();
if (jobCleanup) {
jobRunStateForCleanup =
WritableUtils.readEnum(in, JobStatus.State.class);
}
jobSetup = in.readBoolean();
writeSkipRecs = in.readBoolean();
taskCleanup = in.readBoolean();
if (taskCleanup) {
setPhase(TaskStatus.Phase.CLEANUP);
}
user = StringInterner.weakIntern(Text.readString(in));
extraData.readFields(in);
}
示例12: getSplitDetails
import org.apache.hadoop.util.StringInterner; //导入依赖的package包/类
@SuppressWarnings("unchecked")
private static <T> T getSplitDetails(FSDataInputStream inFile, long offset, Configuration configuration)
throws IOException {
inFile.seek(offset);
String className = StringInterner.weakIntern(Text.readString(inFile));
Class<T> cls;
try {
cls = (Class<T>) configuration.getClassByName(className);
} catch (ClassNotFoundException ce) {
IOException wrap = new IOException("Split class " + className +
" not found");
wrap.initCause(ce);
throw wrap;
}
SerializationFactory factory = new SerializationFactory(configuration);
Deserializer<T> deserializer =
(Deserializer<T>) factory.getDeserializer(cls);
deserializer.open(inFile);
T split = deserializer.deserialize(null);
return split;
}
示例13: sendContainerCleanup
import org.apache.hadoop.util.StringInterner; //导入依赖的package包/类
@SuppressWarnings("unchecked")
private static void sendContainerCleanup(TaskAttemptImpl taskAttempt,
TaskAttemptEvent event) {
if (event instanceof TaskAttemptKillEvent) {
taskAttempt.addDiagnosticInfo(
((TaskAttemptKillEvent) event).getMessage());
}
//send the cleanup event to containerLauncher
taskAttempt.eventHandler.handle(new ContainerLauncherEvent(
taskAttempt.attemptId,
taskAttempt.container.getId(), StringInterner
.weakIntern(taskAttempt.container.getNodeId().toString()),
taskAttempt.container.getContainerToken(),
ContainerLauncher.EventType.CONTAINER_REMOTE_CLEANUP,
event.getType() == TaskAttemptEventType.TA_TIMED_OUT));
}