本文整理汇总了Java中org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt.getState方法的典型用法代码示例。如果您正苦于以下问题:Java TaskAttempt.getState方法的具体用法?Java TaskAttempt.getState怎么用?Java TaskAttempt.getState使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt
的用法示例。
在下文中一共展示了TaskAttempt.getState方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: updateStatus
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; //导入方法依赖的package包/类
private void updateStatus(MRApp app, TaskAttempt attempt, Phase phase) {
TaskAttemptStatusUpdateEvent.TaskAttemptStatus status = new TaskAttemptStatusUpdateEvent.TaskAttemptStatus();
status.counters = new Counters();
status.fetchFailedMaps = new ArrayList<TaskAttemptId>();
status.id = attempt.getID();
status.mapFinishTime = 0;
status.phase = phase;
status.progress = 0.5f;
status.shuffleFinishTime = 0;
status.sortFinishTime = 0;
status.stateString = "OK";
status.taskState = attempt.getState();
TaskAttemptStatusUpdateEvent event = new TaskAttemptStatusUpdateEvent(attempt.getID(),
status);
app.getContext().getEventHandler().handle(event);
}
示例2: getSuccessfulAttempt
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; //导入方法依赖的package包/类
private TaskAttempt getSuccessfulAttempt(Task task) {
for (TaskAttempt attempt : task.getAttempts().values()) {
if (attempt.getState() == TaskAttemptState.SUCCEEDED) {
return attempt;
}
}
return null;
}
示例3: selectBestAttempt
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; //导入方法依赖的package包/类
private TaskAttempt selectBestAttempt() {
if (successfulAttempt != null) {
return attempts.get(successfulAttempt);
}
float progress = 0f;
TaskAttempt result = null;
for (TaskAttempt at : attempts.values()) {
switch (at.getState()) {
// ignore all failed task attempts
case FAILED:
case KILLED:
continue;
}
if (result == null) {
result = at; //The first time around
}
// calculate the best progress
float attemptProgress = at.getProgress();
if (attemptProgress > progress) {
result = at;
progress = attemptProgress;
}
}
return result;
}
示例4: isFinished
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; //导入方法依赖的package包/类
@Override
public boolean isFinished() {
for (TaskAttempt attempt : attempts.values()) {
if (attempt.getState() == TaskAttemptState.SUCCEEDED) {
return true;
}
}
return false;
}
示例5: updateAttempt
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; //导入方法依赖的package包/类
@Override
public void updateAttempt(TaskAttemptStatus status, long timestamp) {
TaskAttemptId attemptID = status.id;
TaskId taskID = attemptID.getTaskId();
JobId jobID = taskID.getJobId();
Job job = context.getJob(jobID);
if (job == null) {
return;
}
Task task = job.getTask(taskID);
if (task == null) {
return;
}
Long boxedStart = startTimes.get(attemptID);
long start = boxedStart == null ? Long.MIN_VALUE : boxedStart;
TaskAttempt taskAttempt = task.getAttempt(attemptID);
if (taskAttempt.getState() == TaskAttemptState.SUCCEEDED) {
boolean isNew = false;
// is this a new success?
synchronized (doneTasks) {
if (!doneTasks.contains(task)) {
doneTasks.add(task);
isNew = true;
}
}
// It's a new completion
// Note that if a task completes twice [because of a previous speculation
// and a race, or a success followed by loss of the machine with the
// local data] we only count the first one.
if (isNew) {
long finish = timestamp;
if (start > 1L && finish > 1L && start <= finish) {
long duration = finish - start;
DataStatistics statistics
= dataStatisticsForTask(taskID);
if (statistics != null) {
statistics.add(duration);
}
}
}
}
}
示例6: updateAttempt
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; //导入方法依赖的package包/类
@Override
public void updateAttempt(TaskAttemptStatus status, long timestamp) {
super.updateAttempt(status, timestamp);
TaskAttemptId attemptID = status.id;
TaskId taskID = attemptID.getTaskId();
JobId jobID = taskID.getJobId();
Job job = context.getJob(jobID);
if (job == null) {
return;
}
Task task = job.getTask(taskID);
if (task == null) {
return;
}
TaskAttempt taskAttempt = task.getAttempt(attemptID);
if (taskAttempt == null) {
return;
}
Long boxedStart = startTimes.get(attemptID);
long start = boxedStart == null ? Long.MIN_VALUE : boxedStart;
// We need to do two things.
// 1: If this is a completion, we accumulate statistics in the superclass
// 2: If this is not a completion, we learn more about it.
// This is not a completion, but we're cooking.
//
if (taskAttempt.getState() == TaskAttemptState.RUNNING) {
// See if this task is already in the registry
AtomicLong estimateContainer = attemptRuntimeEstimates.get(taskAttempt);
AtomicLong estimateVarianceContainer
= attemptRuntimeEstimateVariances.get(taskAttempt);
if (estimateContainer == null) {
if (attemptRuntimeEstimates.get(taskAttempt) == null) {
attemptRuntimeEstimates.put(taskAttempt, new AtomicLong());
estimateContainer = attemptRuntimeEstimates.get(taskAttempt);
}
}
if (estimateVarianceContainer == null) {
attemptRuntimeEstimateVariances.putIfAbsent(taskAttempt, new AtomicLong());
estimateVarianceContainer = attemptRuntimeEstimateVariances.get(taskAttempt);
}
long estimate = -1;
long varianceEstimate = -1;
// This code assumes that we'll never consider starting a third
// speculative task attempt if two are already running for this task
if (start > 0 && timestamp > start) {
estimate = (long) ((timestamp - start) / Math.max(0.0001, status.progress));
varianceEstimate = (long) (estimate * status.progress / 10);
}
if (estimateContainer != null) {
estimateContainer.set(estimate);
}
if (estimateVarianceContainer != null) {
estimateVarianceContainer.set(varianceEstimate);
}
}
}
示例7: getState
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; //导入方法依赖的package包/类
@Override
public TaskAttemptState getState() {
if (overridingState != null) {
return overridingState;
}
TaskAttemptState result
= getProgress() < 1.0F ? TaskAttemptState.RUNNING : TaskAttemptState.SUCCEEDED;
if (result == TaskAttemptState.SUCCEEDED) {
overridingState = TaskAttemptState.SUCCEEDED;
System.out.println("MyTaskAttemptImpl.getState() -- attempt " + myAttemptID + " finished.");
slotsInUse.addAndGet(- taskTypeSlots(myAttemptID.getTaskId().getTaskType()));
(myAttemptID.getTaskId().getTaskType() == TaskType.MAP
? completedMaps : completedReduces).getAndIncrement();
// check for a spectacularly successful speculation
TaskId taskID = myAttemptID.getTaskId();
Task task = myJob.getTask(taskID);
for (TaskAttempt otherAttempt : task.getAttempts().values()) {
if (otherAttempt != this
&& otherAttempt.getState() == TaskAttemptState.RUNNING) {
// we had two instances running. Try to determine how much
// we might have saved by speculation
if (getID().getId() > otherAttempt.getID().getId()) {
// the speculation won
successfulSpeculations.getAndIncrement();
float hisProgress = otherAttempt.getProgress();
long hisStartTime = ((MyTaskAttemptImpl)otherAttempt).startMockTime;
System.out.println("TLTRE:A speculation finished at time "
+ clock.getTime()
+ ". The stalled attempt is at " + (hisProgress * 100.0)
+ "% progress, and it started at "
+ hisStartTime + ", which is "
+ (clock.getTime() - hisStartTime) + " ago.");
long originalTaskEndEstimate
= (hisStartTime
+ estimator.estimatedRuntime(otherAttempt.getID()));
System.out.println(
"TLTRE: We would have expected the original attempt to take "
+ estimator.estimatedRuntime(otherAttempt.getID())
+ ", finishing at " + originalTaskEndEstimate);
long estimatedSavings = originalTaskEndEstimate - clock.getTime();
taskTimeSavedBySpeculation.addAndGet(estimatedSavings);
System.out.println("TLTRE: The task is " + task.getID());
slotsInUse.addAndGet(- taskTypeSlots(myAttemptID.getTaskId().getTaskType()));
((MyTaskAttemptImpl)otherAttempt).overridingState
= TaskAttemptState.KILLED;
} else {
System.out.println(
"TLTRE: The normal attempt beat the speculation in "
+ task.getID());
}
}
}
}
return result;
}
示例8: countTasksAndAttempts
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; //导入方法依赖的package包/类
/**
* Go through a job and update the member variables with counts for
* information to output in the page.
*
* @param job
* the job to get counts for.
*/
private void countTasksAndAttempts(Job job) {
numReduces = 0;
numMaps = 0;
final Map<TaskId, Task> tasks = job.getTasks();
if (tasks == null) {
return;
}
for (Task task : tasks.values()) {
// Attempts counts
Map<TaskAttemptId, TaskAttempt> attempts = task.getAttempts();
int successful, failed, killed;
for (TaskAttempt attempt : attempts.values()) {
successful = 0;
failed = 0;
killed = 0;
if (TaskAttemptStateUI.NEW.correspondsTo(attempt.getState())) {
// Do Nothing
} else if (TaskAttemptStateUI.RUNNING.correspondsTo(attempt.getState())) {
// Do Nothing
} else if (TaskAttemptStateUI.SUCCESSFUL.correspondsTo(attempt
.getState())) {
++successful;
} else if (TaskAttemptStateUI.FAILED.correspondsTo(attempt.getState())) {
++failed;
} else if (TaskAttemptStateUI.KILLED.correspondsTo(attempt.getState())) {
++killed;
}
switch (task.getType()) {
case MAP:
successfulMapAttempts += successful;
failedMapAttempts += failed;
killedMapAttempts += killed;
if (attempt.getState() == TaskAttemptState.SUCCEEDED) {
numMaps++;
avgMapTime += (attempt.getFinishTime() - attempt.getLaunchTime());
}
break;
case REDUCE:
successfulReduceAttempts += successful;
failedReduceAttempts += failed;
killedReduceAttempts += killed;
if (attempt.getState() == TaskAttemptState.SUCCEEDED) {
numReduces++;
avgShuffleTime += (attempt.getShuffleFinishTime() - attempt
.getLaunchTime());
avgMergeTime += attempt.getSortFinishTime()
- attempt.getShuffleFinishTime();
avgReduceTime += (attempt.getFinishTime() - attempt
.getSortFinishTime());
}
break;
}
}
}
if (numMaps > 0) {
avgMapTime = avgMapTime / numMaps;
}
if (numReduces > 0) {
avgReduceTime = avgReduceTime / numReduces;
avgShuffleTime = avgShuffleTime / numReduces;
avgMergeTime = avgMergeTime / numReduces;
}
}