本文整理汇总了Java中org.apache.hadoop.tools.rumen.MapTaskAttemptInfo类的典型用法代码示例。如果您正苦于以下问题:Java MapTaskAttemptInfo类的具体用法?Java MapTaskAttemptInfo怎么用?Java MapTaskAttemptInfo使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
MapTaskAttemptInfo类属于org.apache.hadoop.tools.rumen包,在下文中一共展示了MapTaskAttemptInfo类的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getTaskAttemptInfo
import org.apache.hadoop.tools.rumen.MapTaskAttemptInfo; //导入依赖的package包/类
@SuppressWarnings({ "deprecation", "incomplete-switch" })
@Override
public TaskAttemptInfo getTaskAttemptInfo(
TaskType taskType, int taskNumber, int taskAttemptNumber) {
switch (taskType) {
case MAP:
return new MapTaskAttemptInfo(
State.SUCCEEDED,
new TaskInfo(
m_bytesIn[taskNumber], m_recsIn[taskNumber],
m_bytesOut[taskNumber], m_recsOut[taskNumber], -1),
100);
case REDUCE:
return new ReduceTaskAttemptInfo(
State.SUCCEEDED,
new TaskInfo(
r_bytesIn[taskNumber], r_recsIn[taskNumber],
r_bytesOut[taskNumber], r_recsOut[taskNumber], -1),
100, 100, 100);
}
throw new UnsupportedOperationException();
}
示例2: getTaskAttemptInfo
import org.apache.hadoop.tools.rumen.MapTaskAttemptInfo; //导入依赖的package包/类
@Override
public TaskAttemptInfo getTaskAttemptInfo(
TaskType taskType, int taskNumber, int taskAttemptNumber) {
switch (taskType) {
case MAP:
return new MapTaskAttemptInfo(
State.SUCCEEDED,
new TaskInfo(
m_bytesIn[taskNumber], m_recsIn[taskNumber],
m_bytesOut[taskNumber], m_recsOut[taskNumber], -1),
100);
case REDUCE:
return new ReduceTaskAttemptInfo(
State.SUCCEEDED,
new TaskInfo(
r_bytesIn[taskNumber], r_recsIn[taskNumber],
r_bytesOut[taskNumber], r_recsOut[taskNumber], -1),
100, 100, 100);
}
throw new UnsupportedOperationException();
}
示例3: getTaskAttemptInfo
import org.apache.hadoop.tools.rumen.MapTaskAttemptInfo; //导入依赖的package包/类
@Override
public TaskAttemptInfo getTaskAttemptInfo(
TaskType taskType, int taskNumber, int taskAttemptNumber) {
switch (taskType) {
case MAP:
return new MapTaskAttemptInfo(
State.SUCCEEDED, new TaskInfo(
m_bytesIn[taskNumber], m_recsIn[taskNumber],
m_bytesOut[taskNumber], m_recsOut[taskNumber], -1),100);
case REDUCE:
return new ReduceTaskAttemptInfo(
State.SUCCEEDED, new TaskInfo(
r_bytesIn[taskNumber], r_recsIn[taskNumber],
r_bytesOut[taskNumber], r_recsOut[taskNumber], -1),100,100,100);
}
throw new UnsupportedOperationException();
}
示例4: runMapTask
import org.apache.hadoop.tools.rumen.MapTaskAttemptInfo; //导入依赖的package包/类
public void runMapTask(String taskTrackerName, TaskAttemptID taskId,
long mapStart, long mapRuntime, long killHeartbeat) {
long mapDone = mapStart + mapRuntime;
long mapEndHeartbeat = nextHeartbeat(mapDone);
final boolean isKilled = (killHeartbeat>=0);
if (isKilled) {
mapEndHeartbeat = nextHeartbeat(killHeartbeat + 1);
}
LOG.debug("mapStart=" + mapStart + ", mapDone=" + mapDone +
", mapEndHeartbeat=" + mapEndHeartbeat +
", killHeartbeat=" + killHeartbeat);
final int numSlotsRequired = 1;
org.apache.hadoop.mapred.TaskAttemptID taskIdOldApi =
org.apache.hadoop.mapred.TaskAttemptID.downgrade(taskId);
Task task = new MapTask("dummyjobfile", taskIdOldApi, 0, "dummysplitclass",
null, numSlotsRequired);
// all byte counters are 0
TaskInfo taskInfo = new TaskInfo(0, 0, 0, 0, 0);
MapTaskAttemptInfo taskAttemptInfo =
new MapTaskAttemptInfo(State.SUCCEEDED, taskInfo, mapRuntime);
TaskTrackerAction action =
new SimulatorLaunchTaskAction(task, taskAttemptInfo);
heartbeats.get(mapStart).get(taskTrackerName).addTaskTrackerAction(action);
if (isKilled) {
action = new KillTaskAction(taskIdOldApi);
heartbeats.get(killHeartbeat).get(taskTrackerName).addTaskTrackerAction(
action);
}
for(long simulationTime = mapStart + heartbeatInterval;
simulationTime <= mapEndHeartbeat;
simulationTime += heartbeatInterval) {
State state = simulationTime < mapEndHeartbeat ?
State.RUNNING : State.SUCCEEDED;
if (simulationTime == mapEndHeartbeat && isKilled) {
state = State.KILLED;
}
MapTaskStatus mapStatus = new MapTaskStatus(
task.getTaskID(), 0.0f, 0, state, "", "", null, Phase.MAP, null);
heartbeats.get(simulationTime).get(taskTrackerName).addTaskReport(
mapStatus);
}
}
示例5: runMapTask
import org.apache.hadoop.tools.rumen.MapTaskAttemptInfo; //导入依赖的package包/类
public void runMapTask(String taskTrackerName, TaskAttemptID taskId,
long mapStart, long mapRuntime, long killHeartbeat) {
long mapDone = mapStart + mapRuntime;
long mapEndHeartbeat = nextHeartbeat(mapDone);
final boolean isKilled = (killHeartbeat>=0);
if (isKilled) {
mapEndHeartbeat = nextHeartbeat(killHeartbeat + 1);
}
LOG.debug("mapStart=" + mapStart + ", mapDone=" + mapDone +
", mapEndHeartbeat=" + mapEndHeartbeat +
", killHeartbeat=" + killHeartbeat);
final int numSlotsRequired = 1;
org.apache.hadoop.mapred.TaskAttemptID taskIdOldApi =
org.apache.hadoop.mapred.TaskAttemptID.downgrade(taskId);
Task task = new MapTask("dummyjobfile", taskIdOldApi, 0, new TaskSplitIndex(),
numSlotsRequired);
// all byte counters are 0
TaskInfo taskInfo = new TaskInfo(0, 0, 0, 0, 0);
MapTaskAttemptInfo taskAttemptInfo =
new MapTaskAttemptInfo(State.SUCCEEDED, taskInfo, mapRuntime);
TaskTrackerAction action =
new SimulatorLaunchTaskAction(task, taskAttemptInfo);
heartbeats.get(mapStart).get(taskTrackerName).addTaskTrackerAction(action);
if (isKilled) {
action = new KillTaskAction(taskIdOldApi);
heartbeats.get(killHeartbeat).get(taskTrackerName).addTaskTrackerAction(
action);
}
for(long simulationTime = mapStart + heartbeatInterval;
simulationTime <= mapEndHeartbeat;
simulationTime += heartbeatInterval) {
State state = simulationTime < mapEndHeartbeat ?
State.RUNNING : State.SUCCEEDED;
if (simulationTime == mapEndHeartbeat && isKilled) {
state = State.KILLED;
}
MapTaskStatus mapStatus = new MapTaskStatus(
task.getTaskID(), 0.0f, 0, state, "", "", null, Phase.MAP, null);
heartbeats.get(simulationTime).get(taskTrackerName).addTaskReport(
mapStatus);
}
}