本文整理汇总了Java中org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo.getCounters方法的典型用法代码示例。如果您正苦于以下问题:Java TaskAttemptInfo.getCounters方法的具体用法?Java TaskAttemptInfo.getCounters怎么用?Java TaskAttemptInfo.getCounters使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo
的用法示例。
在下文中一共展示了TaskAttemptInfo.getCounters方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: addReducePhaseDetails
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo; //导入方法依赖的package包/类
/**
* This method is responsible for populating the reduce phase details.
* @return TaskOutputDetails contains the details of the reduce phase.
*/
private TaskOutputDetails addReducePhaseDetails(
Entry<TaskAttemptID, TaskAttemptInfo> task, long referencedZeroTime) {
TaskAttemptInfo taskAttemptInfo = (TaskAttemptInfo) (task.getValue());
TaskOutputDetails taskOutputDetails = new TaskOutputDetails();
taskOutputDetails.setTaskStatus(taskAttemptInfo.getTaskStatus());
taskOutputDetails.setTaskType(taskAttemptInfo.getTaskType().toString());
taskOutputDetails.setTaskID(taskAttemptInfo.getAttemptId().getTaskID().toString());
taskOutputDetails.setLocation(taskAttemptInfo.getHostname());
Counters counters = taskAttemptInfo.getCounters();
CounterGroup mapReduceTaskCounters = counters.getGroup("org.apache.hadoop.mapreduce.TaskCounter");
Counter reduceOutputRecords = mapReduceTaskCounters.findCounter("REDUCE_OUTPUT_RECORDS");
taskOutputDetails.setOutputRecords(reduceOutputRecords.getValue());
Counter reduceOutputBytes = mapReduceTaskCounters.findCounter("SPILLED_RECORDS");
taskOutputDetails.setOutputBytes(reduceOutputBytes.getValue());
long shuffleStartTime = (taskAttemptInfo.getStartTime()- referencedZeroTime)/CONVERSION_FACTOR_MILLISECS_TO_SECS;
taskOutputDetails.setStartPoint(shuffleStartTime);
taskOutputDetails.setShuffleStart(shuffleStartTime);
long shuffleEnd = ((taskAttemptInfo.getShuffleFinishTime()-referencedZeroTime)/CONVERSION_FACTOR_MILLISECS_TO_SECS);
taskOutputDetails.setShuffleEnd(shuffleEnd);
taskOutputDetails.setSortStart(shuffleEnd);
long sortEnd = (taskAttemptInfo.getSortFinishTime()-referencedZeroTime)/CONVERSION_FACTOR_MILLISECS_TO_SECS;
taskOutputDetails.setSortEnd(sortEnd);
taskOutputDetails.setReduceStart(sortEnd);
taskOutputDetails.setReduceEnd((taskAttemptInfo.getFinishTime()-referencedZeroTime)/CONVERSION_FACTOR_MILLISECS_TO_SECS);
taskOutputDetails.setEndPoint(taskOutputDetails.getReduceEnd());
long dataFlowRate = reduceOutputBytes.getValue() / (taskOutputDetails.getReduceEnd()-shuffleStartTime);
taskOutputDetails.setDataFlowRate(dataFlowRate);
Counter physicalMemoryBytes = mapReduceTaskCounters.findCounter("PHYSICAL_MEMORY_BYTES");
ResourceUsageMetrics rum = new ResourceUsageMetrics();
rum.setPhysicalMemoryUsage(physicalMemoryBytes.getValue());
taskOutputDetails.setResourceUsageMetrics(rum);
return taskOutputDetails;
}
示例2: addMapPhaseDetails
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo; //导入方法依赖的package包/类
/**
* Adds detail for a Map phase.
* @param task2
*
* @param task2 the tasks
* @param referencedZeroTime
* @param referencedZeroTime the start time
* @param additionalJobInfo
* @return the phase details
*/
private TaskOutputDetails addMapPhaseDetails(Entry<TaskAttemptID, TaskAttemptInfo> task, long referencedZeroTime) {
TaskAttemptInfo taskAttemptInfo = (TaskAttemptInfo) (task.getValue());
TaskOutputDetails taskOutputDetails = new TaskOutputDetails();
taskOutputDetails.setTaskStatus(taskAttemptInfo.getTaskStatus());
taskOutputDetails.setTaskType(taskAttemptInfo.getTaskType().toString());
taskOutputDetails.setTaskID(taskAttemptInfo.getAttemptId().getTaskID().toString());
long startPoint = (taskAttemptInfo.getStartTime() - referencedZeroTime) / CONVERSION_FACTOR_MILLISECS_TO_SECS;
taskOutputDetails.setStartPoint(startPoint);
long endPoint = (taskAttemptInfo.getMapFinishTime() - referencedZeroTime) / CONVERSION_FACTOR_MILLISECS_TO_SECS;
taskOutputDetails.setEndPoint(endPoint);
taskOutputDetails.setTimeTaken(endPoint - startPoint);
taskOutputDetails.setLocation(taskAttemptInfo.getHostname());
Counters counters = taskAttemptInfo.getCounters();
CounterGroup fileSystemCounters = counters.getGroup("org.apache.hadoop.mapreduce.FileSystemCounter");
Counter inputBytes = fileSystemCounters.findCounter("HDFS_BYTES_READ");
long dataFlowRate = inputBytes.getValue() / (endPoint - startPoint);
taskOutputDetails.setDataFlowRate(dataFlowRate);
CounterGroup mapReduceTaskCounters = counters.getGroup("org.apache.hadoop.mapreduce.TaskCounter");
Counter mapOutputRecords = mapReduceTaskCounters.findCounter("MAP_OUTPUT_RECORDS");
Counter physicalMemoryBytes = mapReduceTaskCounters.findCounter("PHYSICAL_MEMORY_BYTES");
ResourceUsageMetrics rum = new ResourceUsageMetrics();
rum.setPhysicalMemoryUsage(physicalMemoryBytes.getValue());
taskOutputDetails.setResourceUsageMetrics(rum);
taskOutputDetails.setOutputRecords(mapOutputRecords.getValue());
Counter mapOutputBytes = mapReduceTaskCounters.findCounter("MAP_OUTPUT_BYTES");
taskOutputDetails.setOutputBytes(mapOutputBytes.getValue());
return taskOutputDetails;
}
示例3: validateTaskAttemptLevelKeyValuesFormat
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo; //导入方法依赖的package包/类
private static void validateTaskAttemptLevelKeyValuesFormat(JobInfo job) {
Map<TaskID, TaskInfo> tasks = job.getAllTasks();
// For each task
for (TaskInfo task : tasks.values()) {
// validate info of each attempt
for (TaskAttemptInfo attempt : task.getAllTaskAttempts().values()) {
TaskAttemptID id = attempt.getAttemptId();
assertNotNull(id);
long startTime = attempt.getStartTime();
assertTrue("Invalid Start time", startTime > 0);
long finishTime = attempt.getFinishTime();
assertTrue("Task FINISH_TIME is < START_TIME in history file",
startTime < finishTime);
// Make sure that the Task type exists and it is valid
TaskType type = attempt.getTaskType();
assertTrue("Unknown Task type \"" + type + "\" is seen in " +
"history file for task attempt " + id,
(type.equals(TaskType.MAP) || type.equals(TaskType.REDUCE) ||
type.equals(TaskType.JOB_CLEANUP) ||
type.equals(TaskType.JOB_SETUP)));
// Validate task status
String status = attempt.getTaskStatus();
assertTrue("Unexpected TASK_STATUS \"" + status + "\" is seen in" +
" history file for task attempt " + id,
(status.equals(TaskStatus.State.SUCCEEDED.toString()) ||
status.equals(TaskStatus.State.FAILED.toString()) ||
status.equals(TaskStatus.State.KILLED.toString())));
// Successful Reduce Task Attempts should have valid SHUFFLE_FINISHED
// time and SORT_FINISHED time
if (type.equals(TaskType.REDUCE) &&
status.equals(TaskStatus.State.SUCCEEDED.toString())) {
long shuffleFinishTime = attempt.getShuffleFinishTime();
assertTrue(startTime < shuffleFinishTime);
long sortFinishTime = attempt.getSortFinishTime();
assertTrue(shuffleFinishTime < sortFinishTime);
}
else if (type.equals(TaskType.MAP) &&
status.equals(TaskStatus.State.SUCCEEDED.toString())) {
// Successful MAP Task Attempts should have valid MAP_FINISHED time
long mapFinishTime = attempt.getMapFinishTime();
assertTrue(startTime < mapFinishTime);
}
// check if hostname is valid
String hostname = attempt.getHostname();
Matcher m = hostNamePattern.matcher(hostname);
assertTrue("Unexpected Host name of task attempt " + id, m.matches());
// check if trackername is valid
String trackerName = attempt.getTrackerName();
m = trackerNamePattern.matcher(trackerName);
assertTrue("Unexpected tracker name of task attempt " + id,
m.matches());
if (!status.equals("KILLED")) {
// check if http port is valid
int httpPort = attempt.getHttpPort();
assertTrue(httpPort > 0);
}
// check if counters are parsable
Counters counters = attempt.getCounters();
assertNotNull(counters);
}
}
}