本文整理汇总了Java中org.apache.hadoop.tools.rumen.ReduceTaskAttemptInfo类的典型用法代码示例。如果您正苦于以下问题:Java ReduceTaskAttemptInfo类的具体用法?Java ReduceTaskAttemptInfo怎么用?Java ReduceTaskAttemptInfo使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
ReduceTaskAttemptInfo类属于org.apache.hadoop.tools.rumen包,在下文中一共展示了ReduceTaskAttemptInfo类的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getTaskAttemptInfo
import org.apache.hadoop.tools.rumen.ReduceTaskAttemptInfo; //导入依赖的package包/类
@SuppressWarnings({ "deprecation", "incomplete-switch" })
@Override
public TaskAttemptInfo getTaskAttemptInfo(
TaskType taskType, int taskNumber, int taskAttemptNumber) {
switch (taskType) {
case MAP:
return new MapTaskAttemptInfo(
State.SUCCEEDED,
new TaskInfo(
m_bytesIn[taskNumber], m_recsIn[taskNumber],
m_bytesOut[taskNumber], m_recsOut[taskNumber], -1),
100);
case REDUCE:
return new ReduceTaskAttemptInfo(
State.SUCCEEDED,
new TaskInfo(
r_bytesIn[taskNumber], r_recsIn[taskNumber],
r_bytesOut[taskNumber], r_recsOut[taskNumber], -1),
100, 100, 100);
}
throw new UnsupportedOperationException();
}
示例2: getTaskAttemptInfo
import org.apache.hadoop.tools.rumen.ReduceTaskAttemptInfo; //导入依赖的package包/类
@Override
public TaskAttemptInfo getTaskAttemptInfo(
TaskType taskType, int taskNumber, int taskAttemptNumber) {
switch (taskType) {
case MAP:
return new MapTaskAttemptInfo(
State.SUCCEEDED,
new TaskInfo(
m_bytesIn[taskNumber], m_recsIn[taskNumber],
m_bytesOut[taskNumber], m_recsOut[taskNumber], -1),
100);
case REDUCE:
return new ReduceTaskAttemptInfo(
State.SUCCEEDED,
new TaskInfo(
r_bytesIn[taskNumber], r_recsIn[taskNumber],
r_bytesOut[taskNumber], r_recsOut[taskNumber], -1),
100, 100, 100);
}
throw new UnsupportedOperationException();
}
示例3: getTaskAttemptInfo
import org.apache.hadoop.tools.rumen.ReduceTaskAttemptInfo; //导入依赖的package包/类
@Override
public TaskAttemptInfo getTaskAttemptInfo(
TaskType taskType, int taskNumber, int taskAttemptNumber) {
switch (taskType) {
case MAP:
return new MapTaskAttemptInfo(
State.SUCCEEDED, new TaskInfo(
m_bytesIn[taskNumber], m_recsIn[taskNumber],
m_bytesOut[taskNumber], m_recsOut[taskNumber], -1),100);
case REDUCE:
return new ReduceTaskAttemptInfo(
State.SUCCEEDED, new TaskInfo(
r_bytesIn[taskNumber], r_recsIn[taskNumber],
r_bytesOut[taskNumber], r_recsOut[taskNumber], -1),100,100,100);
}
throw new UnsupportedOperationException();
}
示例4: getReduceTaskAttemptInfo
import org.apache.hadoop.tools.rumen.ReduceTaskAttemptInfo; //导入依赖的package包/类
/**
* Given the reduce taskAttemptID, returns the TaskAttemptInfo. Deconstructs
* the reduce taskAttemptID and looks up the jobStory with the parts taskType,
* id of task, id of task attempt.
*
* @param taskTracker
* tasktracker
* @param taskAttemptID
* task-attempt
* @return TaskAttemptInfo for the reduce task-attempt
*/
private TaskAttemptInfo getReduceTaskAttemptInfo(TaskTracker taskTracker,
TaskAttemptID taskAttemptID) {
assert (!taskAttemptID.isMap());
TaskID taskId = taskAttemptID.getTaskID();
TaskType taskType;
if (taskAttemptID.isMap()) {
taskType = TaskType.MAP;
} else {
taskType = TaskType.REDUCE;
}
TaskAttemptInfo taskAttemptInfo = jobStory.getTaskAttemptInfo(taskType,
taskId.getId(), taskAttemptID.getId());
if (LOG.isDebugEnabled()) {
LOG.debug("get an attempt: "
+ taskAttemptID.toString()
+ ", state="
+ taskAttemptInfo.getRunState()
+ ", runtime="
+ ((taskAttemptID.isMap()) ? taskAttemptInfo.getRuntime()
: ((ReduceTaskAttemptInfo) taskAttemptInfo).getReduceRuntime()));
}
return taskAttemptInfo;
}
示例5: getReduceTaskAttemptInfo
import org.apache.hadoop.tools.rumen.ReduceTaskAttemptInfo; //导入依赖的package包/类
/**
* Given the reduce taskAttemptID, returns the TaskAttemptInfo. Deconstructs
* the reduce taskAttemptID and looks up the jobStory with the parts taskType,
* id of task, id of task attempt.
*
* @param taskTracker
* tasktracker
* @param taskAttemptID
* task-attempt
* @return TaskAttemptInfo for the reduce task-attempt
*/
private TaskAttemptInfo getReduceTaskAttemptInfo(TaskTracker taskTracker,
TaskAttemptID taskAttemptID) {
assert (taskAttemptID.getTaskType() == TaskType.REDUCE);
TaskID taskId = taskAttemptID.getTaskID();
TaskType taskType = taskAttemptID.getTaskType();
TaskAttemptInfo taskAttemptInfo = jobStory.getTaskAttemptInfo(taskType,
taskId.getId(), taskAttemptID.getId());
if (LOG.isDebugEnabled()) {
LOG.debug("get an attempt: "
+ taskAttemptID.toString()
+ ", state="
+ taskAttemptInfo.getRunState()
+ ", runtime="
+ ((taskType == TaskType.MAP) ? taskAttemptInfo.getRuntime()
: ((ReduceTaskAttemptInfo) taskAttemptInfo).getReduceRuntime()));
}
return taskAttemptInfo;
}
示例6: SimulatorTaskInProgress
import org.apache.hadoop.tools.rumen.ReduceTaskAttemptInfo; //导入依赖的package包/类
/**
* Constructs an object by copying most of the fields from a
* SimulatorTaskAction.
*/
public SimulatorTaskInProgress(SimulatorLaunchTaskAction action,
TaskStatus taskStatus, long now) {
this.taskStatus = taskStatus;
this.taskAttempInfo = action.getTaskAttemptInfo();
if (taskStatus.getIsMap()) {
this.userSpaceRunTime = taskAttempInfo.getRuntime();
} else {
this.userSpaceRunTime =
((ReduceTaskAttemptInfo)taskAttempInfo).getReduceRuntime();
}
}
示例7: runReduceTask
import org.apache.hadoop.tools.rumen.ReduceTaskAttemptInfo; //导入依赖的package包/类
public void runReduceTask(String taskTrackerName, TaskAttemptID taskId,
long reduceStart, long mapDoneDelay,
long reduceRuntime, long killHeartbeat) {
long mapDone = nextHeartbeat(reduceStart + mapDoneDelay);
long reduceDone = mapDone + reduceRuntime;
long reduceEndHeartbeat = nextHeartbeat(reduceDone);
final boolean isKilled = (killHeartbeat>=0);
if (isKilled) {
reduceEndHeartbeat = nextHeartbeat(killHeartbeat + 1);
}
LOG.debug("reduceStart=" + reduceStart + ", mapDone=" + mapDone +
", reduceDone=" + reduceDone +
", reduceEndHeartbeat=" + reduceEndHeartbeat +
", killHeartbeat=" + killHeartbeat);
final int numSlotsRequired = 1;
org.apache.hadoop.mapred.TaskAttemptID taskIdOldApi =
org.apache.hadoop.mapred.TaskAttemptID.downgrade(taskId);
Task task = new ReduceTask("dummyjobfile", taskIdOldApi, 0, 0,
numSlotsRequired);
// all byte counters are 0
TaskInfo taskInfo = new TaskInfo(0, 0, 0, 0, 0);
ReduceTaskAttemptInfo taskAttemptInfo =
new ReduceTaskAttemptInfo(State.SUCCEEDED, taskInfo, 0, 0,
reduceRuntime);
TaskTrackerAction action =
new SimulatorLaunchTaskAction(task, taskAttemptInfo);
heartbeats.get(reduceStart).get(taskTrackerName).addTaskTrackerAction(
action);
if (!isKilled || mapDone < killHeartbeat) {
action = new AllMapsCompletedTaskAction(task.getTaskID());
heartbeats.get(mapDone).get(taskTrackerName).addTaskTrackerAction(
action);
}
if (isKilled) {
action = new KillTaskAction(taskIdOldApi);
heartbeats.get(killHeartbeat).get(taskTrackerName).addTaskTrackerAction(
action);
}
for(long simulationTime = reduceStart + heartbeatInterval;
simulationTime <= reduceEndHeartbeat;
simulationTime += heartbeatInterval) {
State state = simulationTime < reduceEndHeartbeat ?
State.RUNNING : State.SUCCEEDED;
if (simulationTime == reduceEndHeartbeat && isKilled) {
state = State.KILLED;
}
// mapDone is when the all maps done event delivered
Phase phase = simulationTime <= mapDone ? Phase.SHUFFLE : Phase.REDUCE;
ReduceTaskStatus reduceStatus = new ReduceTaskStatus(
task.getTaskID(), 0.0f, 0, state, "", "", null, phase, null);
heartbeats.get(simulationTime).get(taskTrackerName).addTaskReport(
reduceStatus);
}
}