本文整理汇总了Java中org.apache.hadoop.mapreduce.TaskReport类的典型用法代码示例。如果您正苦于以下问题:Java TaskReport类的具体用法?Java TaskReport怎么用?Java TaskReport使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
TaskReport类属于org.apache.hadoop.mapreduce包,在下文中一共展示了TaskReport类的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: displayTasks
import org.apache.hadoop.mapreduce.TaskReport; //导入依赖的package包/类
/**
* Display the information about a job's tasks, of a particular type and
* in a particular state
*
* @param job the job
* @param type the type of the task (map/reduce/setup/cleanup)
* @param state the state of the task
* (pending/running/completed/failed/killed)
*/
protected void displayTasks(Job job, String type, String state)
throws IOException, InterruptedException {
TaskReport[] reports = job.getTaskReports(TaskType.valueOf(
org.apache.hadoop.util.StringUtils.toUpperCase(type)));
for (TaskReport report : reports) {
TIPStatus status = report.getCurrentStatus();
if ((state.equalsIgnoreCase("pending") && status ==TIPStatus.PENDING) ||
(state.equalsIgnoreCase("running") && status ==TIPStatus.RUNNING) ||
(state.equalsIgnoreCase("completed") && status == TIPStatus.COMPLETE) ||
(state.equalsIgnoreCase("failed") && status == TIPStatus.FAILED) ||
(state.equalsIgnoreCase("killed") && status == TIPStatus.KILLED)) {
printTaskAttempts(report);
}
}
}
示例2: displayTasks
import org.apache.hadoop.mapreduce.TaskReport; //导入依赖的package包/类
/**
* Display the information about a job's tasks, of a particular type and
* in a particular state
*
* @param job the job
* @param type the type of the task (map/reduce/setup/cleanup)
* @param state the state of the task
* (pending/running/completed/failed/killed)
* @throws IOException when there is an error communicating with the master
* @throws InterruptedException
* @throws IllegalArgumentException if an invalid type/state is passed
*/
protected void displayTasks(Job job, String type, String state)
throws IOException, InterruptedException {
TaskReport[] reports=null;
reports = job.getTaskReports(TaskType.valueOf(
org.apache.hadoop.util.StringUtils.toUpperCase(type)));
for (TaskReport report : reports) {
TIPStatus status = report.getCurrentStatus();
if ((state.equalsIgnoreCase("pending") && status ==TIPStatus.PENDING) ||
(state.equalsIgnoreCase("running") && status ==TIPStatus.RUNNING) ||
(state.equalsIgnoreCase("completed") && status == TIPStatus.COMPLETE) ||
(state.equalsIgnoreCase("failed") && status == TIPStatus.FAILED) ||
(state.equalsIgnoreCase("killed") && status == TIPStatus.KILLED)) {
printTaskAttempts(report);
}
}
}
示例3: printJobStatus
import org.apache.hadoop.mapreduce.TaskReport; //导入依赖的package包/类
public JobStatus printJobStatus(YARNRunner yarnRunner, JobID jobID) throws IOException, InterruptedException {
JobStatus jobStatus;
jobStatus = yarnRunner.getJobStatus(jobID);
// print overall job M/R progresses
LOGGER.info("\nJob " + jobStatus.getJobName() + "in queue (" + jobStatus.getQueue() + ")" + " progress M/R: " + jobStatus.getMapProgress() + "/" + jobStatus.getReduceProgress());
LOGGER.info("Tracking URL : " + jobStatus.getTrackingUrl());
LOGGER.info("Reserved memory : " + jobStatus.getReservedMem() + ", used memory : "+ jobStatus.getUsedMem() + " and used slots : "+ jobStatus.getNumUsedSlots());
// list map & reduce tasks statuses and progress
TaskReport[] reports = yarnRunner.getTaskReports(jobID, TaskType.MAP);
for (int i = 0; i < reports.length; i++) {
LOGGER.info("MAP: Status " + reports[i].getCurrentStatus() + " with task ID " + reports[i].getTaskID() + ", and progress " + reports[i].getProgress());
}
reports = yarnRunner.getTaskReports(jobID, TaskType.REDUCE);
for (int i = 0; i < reports.length; i++) {
LOGGER.info("REDUCE: " + reports[i].getCurrentStatus() + " with task ID " + reports[i].getTaskID() + ", and progress " + reports[i].getProgress());
}
return jobStatus;
}
示例4: PartitionedInputResult
import org.apache.hadoop.mapreduce.TaskReport; //导入依赖的package包/类
PartitionedInputResult(Path partitionedInputData, Counters counters, int shards, TaskReport[] taskReports) {
_partitionedInputData = partitionedInputData;
_counters = counters;
_rowIdsFromNewData = new long[shards];
_rowIdsToUpdateFromNewData = new long[shards];
_rowIdsFromIndex = new long[shards];
for (TaskReport tr : taskReports) {
int id = tr.getTaskID().getId();
Counters taskCounters = tr.getTaskCounters();
Counter total = taskCounters.findCounter(BlurIndexCounter.ROW_IDS_FROM_NEW_DATA);
_rowIdsFromNewData[id] = total.getValue();
Counter update = taskCounters.findCounter(BlurIndexCounter.ROW_IDS_TO_UPDATE_FROM_NEW_DATA);
_rowIdsToUpdateFromNewData[id] = update.getValue();
Counter index = taskCounters.findCounter(BlurIndexCounter.ROW_IDS_FROM_INDEX);
_rowIdsFromIndex[id] = index.getValue();
}
}
示例5: testMapTaskReportsWithNullJob
import org.apache.hadoop.mapreduce.TaskReport; //导入依赖的package包/类
@Test
public void testMapTaskReportsWithNullJob() throws Exception {
TestJobClient client = new TestJobClient(new JobConf());
Cluster mockCluster = mock(Cluster.class);
client.setCluster(mockCluster);
JobID id = new JobID("test",0);
when(mockCluster.getJob(id)).thenReturn(null);
TaskReport[] result = client.getMapTaskReports(id);
assertEquals(0, result.length);
verify(mockCluster).getJob(id);
}
示例6: testReduceTaskReportsWithNullJob
import org.apache.hadoop.mapreduce.TaskReport; //导入依赖的package包/类
@Test
public void testReduceTaskReportsWithNullJob() throws Exception {
TestJobClient client = new TestJobClient(new JobConf());
Cluster mockCluster = mock(Cluster.class);
client.setCluster(mockCluster);
JobID id = new JobID("test",0);
when(mockCluster.getJob(id)).thenReturn(null);
TaskReport[] result = client.getReduceTaskReports(id);
assertEquals(0, result.length);
verify(mockCluster).getJob(id);
}
示例7: testSetupTaskReportsWithNullJob
import org.apache.hadoop.mapreduce.TaskReport; //导入依赖的package包/类
@Test
public void testSetupTaskReportsWithNullJob() throws Exception {
TestJobClient client = new TestJobClient(new JobConf());
Cluster mockCluster = mock(Cluster.class);
client.setCluster(mockCluster);
JobID id = new JobID("test",0);
when(mockCluster.getJob(id)).thenReturn(null);
TaskReport[] result = client.getSetupTaskReports(id);
assertEquals(0, result.length);
verify(mockCluster).getJob(id);
}
示例8: testCleanupTaskReportsWithNullJob
import org.apache.hadoop.mapreduce.TaskReport; //导入依赖的package包/类
@Test
public void testCleanupTaskReportsWithNullJob() throws Exception {
TestJobClient client = new TestJobClient(new JobConf());
Cluster mockCluster = mock(Cluster.class);
client.setCluster(mockCluster);
JobID id = new JobID("test",0);
when(mockCluster.getJob(id)).thenReturn(null);
TaskReport[] result = client.getCleanupTaskReports(id);
assertEquals(0, result.length);
verify(mockCluster).getJob(id);
}
示例9: printTaskAttempts
import org.apache.hadoop.mapreduce.TaskReport; //导入依赖的package包/类
private void printTaskAttempts(TaskReport report) {
if (report.getCurrentStatus() == TIPStatus.COMPLETE) {
System.out.println(report.getSuccessfulTaskAttemptId());
} else if (report.getCurrentStatus() == TIPStatus.RUNNING) {
for (TaskAttemptID t :
report.getRunningTaskAttemptIds()) {
System.out.println(t);
}
}
}
示例10: doRun
import org.apache.hadoop.mapreduce.TaskReport; //导入依赖的package包/类
public boolean doRun(Config upcolConfig) throws Exception {
JobConf jobConf = new JobConf(getConf(), UpdateColumnJob.class);
jobConf.setKeepFailedTaskFiles(false);
jobConf.setNumReduceTasks(0);
String jobName = String.format("indexr-upcol-%s-%s-%s",
upcolConfig.table,
LocalDateTime.now().format(timeFormatter),
RandomStringUtils.randomAlphabetic(5));
jobConf.setJobName(jobName);
jobConf.set(CONFKEY, JsonUtil.toJson(upcolConfig));
Path workDir = new Path(jobConf.getWorkingDirectory(), jobName);
jobConf.setWorkingDirectory(workDir);
Job job = Job.getInstance(jobConf);
job.setInputFormatClass(SegmentInputFormat.class);
job.setMapperClass(UpColSegmentMapper.class);
job.setJarByClass(UpdateColumnJob.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
job.setMapSpeculativeExecution(false);
job.setOutputFormatClass(UpColSegmentOutputFormat.class);
job.submit();
boolean ok = job.waitForCompletion(true);
if (!ok) {
TaskReport[] reports = job.getTaskReports(TaskType.MAP);
if (reports != null) {
for (TaskReport report : reports) {
log.error("Error in task [%s] : %s", report.getTaskId(), Arrays.toString(report.getDiagnostics()));
}
}
}
return ok;
}
示例11: displayTasks
import org.apache.hadoop.mapreduce.TaskReport; //导入依赖的package包/类
/**
* Display the information about a job's tasks, of a particular type and
* in a particular state
*
* @param job the job
* @param type the type of the task (map/reduce/setup/cleanup)
* @param state the state of the task
* (pending/running/completed/failed/killed)
*/
protected void displayTasks(Job job, String type, String state)
throws IOException, InterruptedException {
TaskReport[] reports = job.getTaskReports(TaskType.valueOf(type.toUpperCase()));
for (TaskReport report : reports) {
TIPStatus status = report.getCurrentStatus();
if ((state.equalsIgnoreCase("pending") && status ==TIPStatus.PENDING) ||
(state.equalsIgnoreCase("running") && status ==TIPStatus.RUNNING) ||
(state.equalsIgnoreCase("completed") && status == TIPStatus.COMPLETE) ||
(state.equalsIgnoreCase("failed") && status == TIPStatus.FAILED) ||
(state.equalsIgnoreCase("killed") && status == TIPStatus.KILLED)) {
printTaskAttempts(report);
}
}
}
示例12: displayTasks
import org.apache.hadoop.mapreduce.TaskReport; //导入依赖的package包/类
/**
* Display the information about a job's tasks, of a particular type and
* in a particular state
*
* @param job the job
* @param type the type of the task (map/reduce/setup/cleanup)
* @param state the state of the task
* (pending/running/completed/failed/killed)
*/
protected void displayTasks(Job job, String type, String state)
throws IOException, InterruptedException {
TaskReport[] reports = job.getTaskReports(TaskType.valueOf(type.toUpperCase()));
for (TaskReport report : reports) {
TIPStatus status = report.getCurrentStatus();
if ((state.equals("pending") && status ==TIPStatus.PENDING) ||
(state.equals("running") && status ==TIPStatus.RUNNING) ||
(state.equals("completed") && status == TIPStatus.COMPLETE) ||
(state.equals("failed") && status == TIPStatus.FAILED) ||
(state.equals("killed") && status == TIPStatus.KILLED)) {
printTaskAttempts(report);
}
}
}