本文整理汇总了Java中org.apache.hadoop.mapred.TaskStatus.Phase.REDUCE属性的典型用法代码示例。如果您正苦于以下问题:Java Phase.REDUCE属性的具体用法?Java Phase.REDUCE怎么用?Java Phase.REDUCE使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类org.apache.hadoop.mapred.TaskStatus.Phase
的用法示例。
在下文中一共展示了Phase.REDUCE属性的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getRate
public double getRate(Phase p) {
if (p == Phase.MAP) {
return this.mapRate;
} else if (p == Phase.SHUFFLE) {
return this.copyRate;
} else if (p == Phase.SORT) {
return this.sortRate;
} else if (p == Phase.REDUCE) {
return this.reduceRate;
} else {
throw new RuntimeException("Invalid phase " + p);
}
}
示例2: makeRunning
private void makeRunning(TaskAttemptID taskId, TaskInProgress tip,
String taskTracker, long startTime) {
Phase phase = tip.isMapTask() ? Phase.MAP : Phase.REDUCE;
addRunningTaskToTIP(tip, taskId, new TaskTrackerStatus(taskTracker,
JobInProgress.convertTrackerNameToHostName(taskTracker)), true);
TaskStatus status = TaskStatus.createTaskStatus(tip.isMapTask(), taskId,
0.0f, 1, TaskStatus.State.RUNNING, "", "", taskTracker,
phase, new Counters());
status.setStartTime(startTime);
updateTaskStatus(tip, status);
}
示例3: handleKillTaskAction
/**
* Kills a task attempt.
*
* @param action contains the task attempt to kill
* @param now current simulation time
* @return new events generated in response, empty
*/
private List<SimulatorEvent> handleKillTaskAction(KillTaskAction action, long now) {
TaskAttemptID taskId = action.getTaskID();
// we don't have a nice(r) toString() in Hadoop's TaskActions
if (LOG.isDebugEnabled()) {
LOG.debug("Handling kill task action, taskId=" + taskId + ", now=" + now);
}
SimulatorTaskInProgress tip = tasks.get(taskId);
// Safety check: We might get a KillTaskAction even for completed reduces
if (tip == null) {
return SimulatorEngine.EMPTY_EVENTS;
}
progressTaskStatus(tip, now); // make progress up to date
TaskStatus finalStatus = (TaskStatus)tip.getTaskStatus().clone();
finalStatus.setFinishTime(now);
finalStatus.setRunState(State.KILLED);
finishRunningTask(finalStatus, now);
if (finalStatus.getIsMap() || finalStatus.getPhase() == Phase.REDUCE) {
// if we have already created a task attempt completion event we remember
// the task id, so that we can safely ignore the event when its delivered
orphanTaskCompletions.add(taskId);
}
return SimulatorEngine.EMPTY_EVENTS;
}
示例4: expectReduceTask
public void expectReduceTask(SimulatorTaskTracker taskTracker,
TaskAttemptID taskId, long mapDone,
long reduceRuntime) {
long reduceDone = mapDone + reduceRuntime;
org.apache.hadoop.mapred.TaskAttemptID taskIdOldApi =
org.apache.hadoop.mapred.TaskAttemptID.downgrade(taskId);
ReduceTaskStatus status = new ReduceTaskStatus(taskIdOldApi, 1.0f, 1,
State.SUCCEEDED, null, null, null, Phase.REDUCE, null);
status.setFinishTime(reduceDone);
TaskAttemptCompletionEvent completionEvent =
new TaskAttemptCompletionEvent(taskTracker, status);
addExpected(mapDone, completionEvent);
}
示例5: expectReduceTask
public void expectReduceTask(SimulatorTaskTracker taskTracker,
TaskAttemptID taskId, long mapDone,
long reduceRuntime) {
long reduceDone = mapDone + reduceRuntime;
org.apache.hadoop.mapred.TaskAttemptID taskIdOldApi =
org.apache.hadoop.mapred.TaskAttemptID.downgrade(taskId);
ReduceTaskStatus status = new ReduceTaskStatus(taskIdOldApi, 1.0f, 1,
State.SUCCEEDED, null, null, null, Phase.REDUCE, null);
status.setStartTime(mapDone);
status.setFinishTime(reduceDone);
TaskAttemptCompletionEvent completionEvent =
new TaskAttemptCompletionEvent(taskTracker, status);
addExpected(mapDone, completionEvent);
}
示例6: runReduceTask
public void runReduceTask(String taskTrackerName, TaskAttemptID taskId,
long reduceStart, long mapDoneDelay,
long reduceRuntime, long killHeartbeat) {
long mapDone = nextHeartbeat(reduceStart + mapDoneDelay);
long reduceDone = mapDone + reduceRuntime;
long reduceEndHeartbeat = nextHeartbeat(reduceDone);
final boolean isKilled = (killHeartbeat>=0);
if (isKilled) {
reduceEndHeartbeat = nextHeartbeat(killHeartbeat + 1);
}
LOG.debug("reduceStart=" + reduceStart + ", mapDone=" + mapDone +
", reduceDone=" + reduceDone +
", reduceEndHeartbeat=" + reduceEndHeartbeat +
", killHeartbeat=" + killHeartbeat);
final int numSlotsRequired = 1;
org.apache.hadoop.mapred.TaskAttemptID taskIdOldApi =
org.apache.hadoop.mapred.TaskAttemptID.downgrade(taskId);
Task task = new ReduceTask("dummyjobfile", taskIdOldApi, 0, 0,
numSlotsRequired);
// all byte counters are 0
TaskInfo taskInfo = new TaskInfo(0, 0, 0, 0, 0);
ReduceTaskAttemptInfo taskAttemptInfo =
new ReduceTaskAttemptInfo(State.SUCCEEDED, taskInfo, 0, 0,
reduceRuntime);
TaskTrackerAction action =
new SimulatorLaunchTaskAction(task, taskAttemptInfo);
heartbeats.get(reduceStart).get(taskTrackerName).addTaskTrackerAction(
action);
if (!isKilled || mapDone < killHeartbeat) {
action = new AllMapsCompletedTaskAction(task.getTaskID());
heartbeats.get(mapDone).get(taskTrackerName).addTaskTrackerAction(
action);
}
if (isKilled) {
action = new KillTaskAction(taskIdOldApi);
heartbeats.get(killHeartbeat).get(taskTrackerName).addTaskTrackerAction(
action);
}
for(long simulationTime = reduceStart + heartbeatInterval;
simulationTime <= reduceEndHeartbeat;
simulationTime += heartbeatInterval) {
State state = simulationTime < reduceEndHeartbeat ?
State.RUNNING : State.SUCCEEDED;
if (simulationTime == reduceEndHeartbeat && isKilled) {
state = State.KILLED;
}
// mapDone is when the all maps done event delivered
Phase phase = simulationTime <= mapDone ? Phase.SHUFFLE : Phase.REDUCE;
ReduceTaskStatus reduceStatus = new ReduceTaskStatus(
task.getTaskID(), 0.0f, 0, state, "", "", null, phase, null);
heartbeats.get(simulationTime).get(taskTrackerName).addTaskReport(
reduceStatus);
}
}