本文整理汇总了Java中org.apache.hadoop.mapred.TaskStatus.Phase.SHUFFLE属性的典型用法代码示例。如果您正苦于以下问题:Java Phase.SHUFFLE属性的具体用法?Java Phase.SHUFFLE怎么用?Java Phase.SHUFFLE使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类org.apache.hadoop.mapred.TaskStatus.Phase
的用法示例。
在下文中一共展示了Phase.SHUFFLE属性的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getRate
public double getRate(Phase p) {
if (p == Phase.MAP) {
return this.mapRate;
} else if (p == Phase.SHUFFLE) {
return this.copyRate;
} else if (p == Phase.SORT) {
return this.sortRate;
} else if (p == Phase.REDUCE) {
return this.reduceRate;
} else {
throw new RuntimeException("Invalid phase " + p);
}
}
示例2: handleAllMapsCompletedTaskAction
/**
* Starts "running" the REDUCE phase of reduce upon being notified that
* all map tasks are (successfully) done.
*
* @param action contains the notification for one of the reduce tasks
* @param now current simulation time
* @return new events generated, a single TaskAttemptCompletionEvent for the
* reduce
*/
private List<SimulatorEvent> handleAllMapsCompletedTaskAction(
AllMapsCompletedTaskAction action, long now) {
if (LOG.isDebugEnabled()) {
LOG.debug("Handling all maps completed task action " + action);
}
TaskAttemptID taskId = action.getTaskID();
SimulatorTaskInProgress tip = tasks.get(taskId);
// If tip is null here it is because the task attempt to be notified is
// unknown to this TaskTracker.
TaskStatus status = tip.getTaskStatus();
if (status.getIsMap()) {
throw new IllegalStateException(
"Invalid AllMapsCompletedTaskAction, task attempt "
+ "to be notified is a map: " + taskId + " " + status);
}
if (status.getPhase() != Phase.SHUFFLE) {
throw new IllegalArgumentException(
"Reducer task attempt already notified: " + taskId + " " + status);
}
// Warning: setPhase() uses System.currentTimeMillis() internally to
// set shuffle and sort times, but we overwrite that manually anyway
status.setPhase(Phase.REDUCE);
status.setShuffleFinishTime(now);
status.setSortFinishTime(now);
// Forecast the completion of this reduce
TaskAttemptCompletionEvent e = createTaskAttemptCompletionEvent(tip, now);
return Collections.<SimulatorEvent>singletonList(e);
}
示例3: findLaunchTaskActions
int findLaunchTaskActions(HeartbeatResponse response) {
TaskTrackerAction[] actions = response.getActions();
int numLaunchTaskActions = 0;
// HashSet<> numLaunchTaskActions
for (TaskTrackerAction action : actions) {
if (action instanceof SimulatorLaunchTaskAction) {
Task task = ((SimulatorLaunchTaskAction) action).getTask();
numLaunchTaskActions++;
TaskAttemptID taskId = task.getTaskID();
if (tasks.containsKey(taskId)) {
// already have this task..do not need to generate new status
continue;
}
TaskStatus status;
if (task.isMapTask()) {
status = new MapTaskStatus(taskId, 0f, 1, State.RUNNING, "", "",
taskTrackerName, Phase.MAP, new Counters());
} else {
status = new ReduceTaskStatus(taskId, 0f, 1, State.RUNNING, "", "",
taskTrackerName, Phase.SHUFFLE, new Counters());
}
status.setRunState(State.SUCCEEDED);
status.setStartTime(this.now);
SimulatorTaskInProgress tip = new SimulatorTaskInProgress(
(SimulatorLaunchTaskAction) action, status, this.now);
tasks.put(taskId, tip);
}
}
return numLaunchTaskActions;
}
示例4: handleSimulatorLaunchTaskAction
/**
* Launches a task on the simulated task tracker.
*
* @param action SimulatorLaunchTaskAction sent by the job tracker
* @param now current simulation time
* @return new events generated, a TaskAttemptCompletionEvent for map
* tasks, empty otherwise
*/
private List<SimulatorEvent> handleSimulatorLaunchTaskAction(
SimulatorLaunchTaskAction action, long now) {
if (LOG.isDebugEnabled()) {
LOG.debug("Handling launch task action " + action);
}
// First, create statuses and update used slots for map and reduce
// task separately
Task task = action.getTask();
TaskAttemptID taskId = task.getTaskID();
if (tasks.containsKey(taskId)) {
throw new IllegalArgumentException("Multiple launch of task id =" + taskId);
}
// Ctor of MapTaskStatus and ReduceTaskStatus need deprecated
// o.a.h.mapred.TaskAttemptID, hence the downgrade
org.apache.hadoop.mapred.TaskAttemptID taskIdOldApi =
org.apache.hadoop.mapred.TaskAttemptID.downgrade(taskId);
TaskStatus status;
int numSlotsRequired = task.getNumSlotsRequired();
Counters emptyCounters = new Counters();
if (task.isMapTask()) {
status = new MapTaskStatus(taskIdOldApi, 0f, numSlotsRequired,
State.RUNNING, "", "", taskTrackerName,
Phase.MAP, emptyCounters);
usedMapSlots += numSlotsRequired;
if (usedMapSlots > maxMapSlots) {
throw new IllegalStateException("usedMapSlots exceeds maxMapSlots: " +
usedMapSlots + " > " + maxMapSlots);
}
} else {
status = new ReduceTaskStatus(taskIdOldApi, 0f, numSlotsRequired,
State.RUNNING, "", "", taskTrackerName,
Phase.SHUFFLE, emptyCounters);
usedReduceSlots += numSlotsRequired;
if (usedReduceSlots > maxReduceSlots) {
throw new IllegalStateException("usedReduceSlots exceeds usedReduceSlots: " +
usedReduceSlots + " > " + usedReduceSlots);
}
}
// Second, create and store a TIP
status.setStartTime(now);
SimulatorTaskInProgress tip =
new SimulatorTaskInProgress(action, status, now);
tasks.put(taskId, tip);
// Third, schedule events for ourselves
if (task.isMapTask()) {
// we know when this task attempts ends iff it is a map
TaskAttemptCompletionEvent e = createTaskAttemptCompletionEvent(tip, now);
return Collections.<SimulatorEvent>singletonList(e);
} else {
// reduce, completion time can only be determined when all maps are done
return SimulatorEngine.EMPTY_EVENTS;
}
}
示例5: runReduceTask
public void runReduceTask(String taskTrackerName, TaskAttemptID taskId,
long reduceStart, long mapDoneDelay,
long reduceRuntime, long killHeartbeat) {
long mapDone = nextHeartbeat(reduceStart + mapDoneDelay);
long reduceDone = mapDone + reduceRuntime;
long reduceEndHeartbeat = nextHeartbeat(reduceDone);
final boolean isKilled = (killHeartbeat>=0);
if (isKilled) {
reduceEndHeartbeat = nextHeartbeat(killHeartbeat + 1);
}
LOG.debug("reduceStart=" + reduceStart + ", mapDone=" + mapDone +
", reduceDone=" + reduceDone +
", reduceEndHeartbeat=" + reduceEndHeartbeat +
", killHeartbeat=" + killHeartbeat);
final int numSlotsRequired = 1;
org.apache.hadoop.mapred.TaskAttemptID taskIdOldApi =
org.apache.hadoop.mapred.TaskAttemptID.downgrade(taskId);
Task task = new ReduceTask("dummyjobfile", taskIdOldApi, 0, 0,
numSlotsRequired);
// all byte counters are 0
TaskInfo taskInfo = new TaskInfo(0, 0, 0, 0, 0);
ReduceTaskAttemptInfo taskAttemptInfo =
new ReduceTaskAttemptInfo(State.SUCCEEDED, taskInfo, 0, 0,
reduceRuntime);
TaskTrackerAction action =
new SimulatorLaunchTaskAction(task, taskAttemptInfo);
heartbeats.get(reduceStart).get(taskTrackerName).addTaskTrackerAction(
action);
if (!isKilled || mapDone < killHeartbeat) {
action = new AllMapsCompletedTaskAction(task.getTaskID());
heartbeats.get(mapDone).get(taskTrackerName).addTaskTrackerAction(
action);
}
if (isKilled) {
action = new KillTaskAction(taskIdOldApi);
heartbeats.get(killHeartbeat).get(taskTrackerName).addTaskTrackerAction(
action);
}
for(long simulationTime = reduceStart + heartbeatInterval;
simulationTime <= reduceEndHeartbeat;
simulationTime += heartbeatInterval) {
State state = simulationTime < reduceEndHeartbeat ?
State.RUNNING : State.SUCCEEDED;
if (simulationTime == reduceEndHeartbeat && isKilled) {
state = State.KILLED;
}
// mapDone is when the all maps done event delivered
Phase phase = simulationTime <= mapDone ? Phase.SHUFFLE : Phase.REDUCE;
ReduceTaskStatus reduceStatus = new ReduceTaskStatus(
task.getTaskID(), 0.0f, 0, state, "", "", null, phase, null);
heartbeats.get(simulationTime).get(taskTrackerName).addTaskReport(
reduceStatus);
}
}