本文整理汇总了Java中org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo类的典型用法代码示例。如果您正苦于以下问题:Java TaskInfo类的具体用法?Java TaskInfo怎么用?Java TaskInfo使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
TaskInfo类属于org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser包,在下文中一共展示了TaskInfo类的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: StubbedJob
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo; //导入依赖的package包/类
public StubbedJob(JobId jobId, ApplicationAttemptId applicationAttemptId,
Configuration conf, EventHandler eventHandler, boolean newApiCommitter,
String user, int numSplits, AppContext appContext) {
super(jobId, applicationAttemptId, conf, eventHandler,
null, new JobTokenSecretManager(), new Credentials(),
new SystemClock(), Collections.<TaskId, TaskInfo> emptyMap(),
MRAppMetrics.create(), null, newApiCommitter, user,
System.currentTimeMillis(), null, appContext, null, null);
initTransition = getInitTransition(numSplits);
localFactory = stateMachineFactory.addTransition(JobStateInternal.NEW,
EnumSet.of(JobStateInternal.INITED, JobStateInternal.FAILED),
JobEventType.JOB_INIT,
// This is abusive.
initTransition);
// This "this leak" is okay because the retained pointer is in an
// instance variable.
localStateMachine = localFactory.make(this);
}
示例2: loadAllTasks
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo; //导入依赖的package包/类
private void loadAllTasks() {
if (tasksLoaded.get()) {
return;
}
tasksLock.lock();
try {
if (tasksLoaded.get()) {
return;
}
for (Map.Entry<TaskID, TaskInfo> entry : jobInfo.getAllTasks().entrySet()) {
TaskId yarnTaskID = TypeConverter.toYarn(entry.getKey());
TaskInfo taskInfo = entry.getValue();
Task task = new CompletedTask(yarnTaskID, taskInfo);
tasks.put(yarnTaskID, task);
if (task.getType() == TaskType.MAP) {
mapTasks.put(task.getID(), task);
} else if (task.getType() == TaskType.REDUCE) {
reduceTasks.put(task.getID(), task);
}
}
tasksLoaded.set(true);
} finally {
tasksLock.unlock();
}
}
示例3: computeFinishedMaps
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo; //导入依赖的package包/类
private long computeFinishedMaps(JobInfo jobInfo, int numMaps,
int numSuccessfulMaps) {
if (numMaps == numSuccessfulMaps) {
return jobInfo.getFinishedMaps();
}
long numFinishedMaps = 0;
Map<org.apache.hadoop.mapreduce.TaskID, TaskInfo> taskInfos = jobInfo
.getAllTasks();
for (TaskInfo taskInfo : taskInfos.values()) {
if (TaskState.SUCCEEDED.toString().equals(taskInfo.getTaskStatus())) {
++numFinishedMaps;
}
}
return numFinishedMaps;
}
示例4: createTaskEntity
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo; //导入依赖的package包/类
private TimelineEntity createTaskEntity(TaskInfo taskInfo) {
TimelineEntity task = new TimelineEntity();
task.setEntityType(TASK);
task.setEntityId(taskInfo.getTaskId().toString());
task.setStartTime(taskInfo.getStartTime());
task.addOtherInfo("START_TIME", taskInfo.getStartTime());
task.addOtherInfo("FINISH_TIME", taskInfo.getFinishTime());
task.addOtherInfo("TASK_TYPE", taskInfo.getTaskType());
task.addOtherInfo("TASK_STATUS", taskInfo.getTaskStatus());
task.addOtherInfo("ERROR_INFO", taskInfo.getError());
LOG.info("converted task " + taskInfo.getTaskId() +
" to a timeline entity");
return task;
}
示例5: StubbedJob
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo; //导入依赖的package包/类
public StubbedJob(JobId jobId, ApplicationAttemptId applicationAttemptId,
Configuration conf, EventHandler eventHandler,
boolean newApiCommitter, String user, int numSplits) {
super(jobId, applicationAttemptId, conf, eventHandler,
null, new JobTokenSecretManager(), new Credentials(),
new SystemClock(), Collections.<TaskId, TaskInfo> emptyMap(),
MRAppMetrics.create(), null, newApiCommitter, user,
System.currentTimeMillis(), null, null, null, null);
initTransition = getInitTransition(numSplits);
localFactory = stateMachineFactory.addTransition(JobStateInternal.NEW,
EnumSet.of(JobStateInternal.INITED, JobStateInternal.FAILED),
JobEventType.JOB_INIT,
// This is abusive.
initTransition);
// This "this leak" is okay because the retained pointer is in an
// instance variable.
localStateMachine = localFactory.make(this);
}
示例6: TaskRecoverEvent
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo; //导入依赖的package包/类
public TaskRecoverEvent(TaskId taskID, TaskInfo taskInfo,
OutputCommitter committer, boolean recoverTaskOutput) {
super(taskID, TaskEventType.T_RECOVER);
this.taskInfo = taskInfo;
this.committer = committer;
this.recoverTaskOutput = recoverTaskOutput;
}
示例7: scheduleTasks
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo; //导入依赖的package包/类
protected void scheduleTasks(Set<TaskId> taskIDs,
boolean recoverTaskOutput) {
for (TaskId taskID : taskIDs) {
TaskInfo taskInfo = completedTasksFromPreviousRun.remove(taskID);
if (taskInfo != null) {
eventHandler.handle(new TaskRecoverEvent(taskID, taskInfo,
committer, recoverTaskOutput));
} else {
eventHandler.handle(new TaskEvent(taskID, TaskEventType.T_SCHEDULE));
}
}
}
示例8: printTasks
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo; //导入依赖的package包/类
private void printTasks(TaskType taskType, String status) {
Map<TaskID, JobHistoryParser.TaskInfo> tasks = job.getAllTasks();
StringBuffer header = new StringBuffer();
header.append("\n").append(status).append(" ");
header.append(taskType).append(" task list for ").append(jobId);
header.append("\nTaskId\t\tStartTime\tFinishTime\tError");
if (TaskType.MAP.equals(taskType)) {
header.append("\tInputSplits");
}
header.append("\n====================================================");
StringBuffer taskList = new StringBuffer();
for (JobHistoryParser.TaskInfo task : tasks.values()) {
if (taskType.equals(task.getTaskType()) &&
(status.equals(task.getTaskStatus())
|| status.equalsIgnoreCase("ALL"))) {
taskList.setLength(0);
taskList.append(task.getTaskId());
taskList.append("\t").append(StringUtils.getFormattedTimeWithDiff(
dateFormat, task.getStartTime(), 0));
taskList.append("\t").append(StringUtils.getFormattedTimeWithDiff(
dateFormat, task.getFinishTime(),
task.getStartTime()));
taskList.append("\t").append(task.getError());
if (TaskType.MAP.equals(taskType)) {
taskList.append("\t").append(task.getSplitLocations());
}
if (taskList != null) {
System.out.println(header.toString());
System.out.println(taskList.toString());
}
}
}
}
示例9: FilteredJob
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo; //导入依赖的package包/类
/** Apply the filter (status) on the parsed job and generate summary */
public FilteredJob(JobInfo job, String status) {
filter = status;
Map<TaskID, JobHistoryParser.TaskInfo> tasks = job.getAllTasks();
for (JobHistoryParser.TaskInfo task : tasks.values()) {
Map<TaskAttemptID, JobHistoryParser.TaskAttemptInfo> attempts =
task.getAllTaskAttempts();
for (JobHistoryParser.TaskAttemptInfo attempt : attempts.values()) {
if (attempt.getTaskStatus().equals(status)) {
String hostname = attempt.getHostname();
TaskID id = attempt.getAttemptId().getTaskID();
Set<TaskID> set = badNodesToFilteredTasks.get(hostname);
if (set == null) {
set = new TreeSet<TaskID>();
set.add(id);
badNodesToFilteredTasks.put(hostname, set);
}else{
set.add(id);
}
}
}
}
}
示例10: createTaskAndTaskAttemptEntities
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo; //导入依赖的package包/类
private Set<TimelineEntity> createTaskAndTaskAttemptEntities(JobInfo jobInfo) {
Set<TimelineEntity> entities = new HashSet<>();
Map<TaskID,TaskInfo> taskInfoMap = jobInfo.getAllTasks();
LOG.info("job " + jobInfo.getJobId()+ " has " + taskInfoMap.size() +
" tasks");
for (TaskInfo taskInfo: taskInfoMap.values()) {
TimelineEntity task = createTaskEntity(taskInfo);
entities.add(task);
// add the task attempts from this task
Set<TimelineEntity> taskAttempts = createTaskAttemptEntities(taskInfo);
entities.addAll(taskAttempts);
}
return entities;
}
示例11: createTaskAttemptEntities
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo; //导入依赖的package包/类
private Set<TimelineEntity> createTaskAttemptEntities(TaskInfo taskInfo) {
Set<TimelineEntity> taskAttempts = new HashSet<TimelineEntity>();
Map<TaskAttemptID,TaskAttemptInfo> taskAttemptInfoMap =
taskInfo.getAllTaskAttempts();
LOG.info("task " + taskInfo.getTaskId() + " has " +
taskAttemptInfoMap.size() + " task attempts");
for (TaskAttemptInfo taskAttemptInfo: taskAttemptInfoMap.values()) {
TimelineEntity taskAttempt = createTaskAttemptEntity(taskAttemptInfo);
taskAttempts.add(taskAttempt);
}
return taskAttempts;
}