本文整理汇总了Java中org.apache.hadoop.mapred.TaskCompletionEvent类的典型用法代码示例。如果您正苦于以下问题:Java TaskCompletionEvent类的具体用法?Java TaskCompletionEvent怎么用?Java TaskCompletionEvent使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
TaskCompletionEvent类属于org.apache.hadoop.mapred包,在下文中一共展示了TaskCompletionEvent类的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getMapAttemptCompletionEvents
import org.apache.hadoop.mapred.TaskCompletionEvent; //导入依赖的package包/类
@Override
public TaskCompletionEvent[] getMapAttemptCompletionEvents(
int startIndex, int maxEvents) {
TaskCompletionEvent[] events = EMPTY_TASK_COMPLETION_EVENTS;
readLock.lock();
try {
if (mapAttemptCompletionEvents.size() > startIndex) {
int actualMax = Math.min(maxEvents,
(mapAttemptCompletionEvents.size() - startIndex));
events = mapAttemptCompletionEvents.subList(startIndex,
actualMax + startIndex).toArray(events);
}
return events;
} finally {
readLock.unlock();
}
}
示例2: fromYarn
import org.apache.hadoop.mapred.TaskCompletionEvent; //导入依赖的package包/类
public static TaskCompletionEvent.Status fromYarn(
TaskAttemptCompletionEventStatus newStatus) {
switch (newStatus) {
case FAILED:
return TaskCompletionEvent.Status.FAILED;
case KILLED:
return TaskCompletionEvent.Status.KILLED;
case OBSOLETE:
return TaskCompletionEvent.Status.OBSOLETE;
case SUCCEEDED:
return TaskCompletionEvent.Status.SUCCEEDED;
case TIPFAILED:
return TaskCompletionEvent.Status.TIPFAILED;
}
throw new YarnRuntimeException("Unrecognized status: " + newStatus);
}
示例3: resolve
import org.apache.hadoop.mapred.TaskCompletionEvent; //导入依赖的package包/类
@Override
public void resolve(TaskCompletionEvent event) {
switch (event.getTaskStatus()) {
case SUCCEEDED:
URI u = getBaseURI(reduceId, event.getTaskTrackerHttp());
addKnownMapOutput(u.getHost() + ":" + u.getPort(),
u.toString(),
event.getTaskAttemptId());
maxMapRuntime = Math.max(maxMapRuntime, event.getTaskRunTime());
break;
case FAILED:
case KILLED:
case OBSOLETE:
obsoleteMapOutput(event.getTaskAttemptId());
LOG.info("Ignoring obsolete output of " + event.getTaskStatus() +
" map-task: '" + event.getTaskAttemptId() + "'");
break;
case TIPFAILED:
tipFailed(event.getTaskAttemptId().getTaskID());
LOG.info("Ignoring output of failed map TIP: '" +
event.getTaskAttemptId() + "'");
break;
}
}
示例4: runJob
import org.apache.hadoop.mapred.TaskCompletionEvent; //导入依赖的package包/类
@SuppressWarnings({ "deprecation", "unchecked" })
public static TaskCompletionEvent[] runJob(JobConf conf, Class mapperClass,
boolean enableNoFetchEmptyMapOutputs) throws Exception {
conf.setMapperClass(mapperClass);
conf.setReducerClass(IdentityReducer.class);
conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(Text.class);
conf.setNumMapTasks(3);
conf.setNumReduceTasks(1);
conf.setInputFormat(FakeIF.class);
conf.setBoolean("mapred.enable.no.fetch.map.outputs", enableNoFetchEmptyMapOutputs);
FileInputFormat.setInputPaths(conf, new Path("/in"));
final Path outp = new Path("/out");
FileOutputFormat.setOutputPath(conf, outp);
RunningJob job = null;
job = JobClient.runJob(conf);
assertTrue(job.isSuccessful());
return job.getTaskCompletionEvents(0);
}
示例5: getMapAttemptCompletionEvents
import org.apache.hadoop.mapred.TaskCompletionEvent; //导入依赖的package包/类
@Override
public TaskCompletionEvent[] getMapAttemptCompletionEvents(
int startIndex, int maxEvents) {
TaskCompletionEvent[] events = EMPTY_TASK_COMPLETION_EVENTS;
readLock.lock();
try {
if (mapAttemptCompletionEvents.size() > startIndex) {
for(TaskCompletionEvent event:mapAttemptCompletionEvents){
LOG.info("map completion event"+event.getTaskId().toString()+"status:"+event.getStatus().toString());
}
int actualMax = Math.min(maxEvents,
(mapAttemptCompletionEvents.size() - startIndex));
events = mapAttemptCompletionEvents.subList(startIndex,
actualMax + startIndex).toArray(events);
}
return events;
} finally {
readLock.unlock();
}
}
示例6: resolve
import org.apache.hadoop.mapred.TaskCompletionEvent; //导入依赖的package包/类
@Override
public void resolve(TaskCompletionEvent event) {
switch (event.getTaskStatus()) {
case SUCCEEDED:
URI u = getBaseURI(reduceId, event.getTaskTrackerHttp());
addKnownMapOutput(u.getHost() + ":" + u.getPort(),
u.toString(),
event.getTaskAttemptId());
maxMapRuntime = Math.max(maxMapRuntime, event.getTaskRunTime());
break;
case FAILED:
case OBSOLETE:
obsoleteMapOutput(event.getTaskAttemptId());
LOG.info("Ignoring obsolete output of " + event.getTaskStatus() +
" map-task: '" + event.getTaskAttemptId() + "'");
break;
case TIPFAILED:
case KILLED:
tipFailed(event.getTaskAttemptId().getTaskID());
LOG.info("Ignoring output of failed map TIP: '" +
event.getTaskAttemptId() + "'");
break;
}
}
示例7: abortJob
import org.apache.hadoop.mapred.TaskCompletionEvent; //导入依赖的package包/类
@Override
public void abortJob(JobContext context, JobStatus.State runState) throws java.io.IOException {
super.abortJob(context, runState);
final JobClient jobClient = new JobClient(new JobConf(context.getConfiguration()));
final RunningJob job = jobClient.getJob((org.apache.hadoop.mapred.JobID) JobID.forName(context.getConfiguration().get("mapred.job.id")));
String diag = "";
for (final TaskCompletionEvent event : job.getTaskCompletionEvents(0))
switch (event.getTaskStatus()) {
case SUCCEEDED:
break;
default:
diag += "Diagnostics for: " + event.getTaskTrackerHttp() + "\n";
for (final String s : job.getTaskDiagnostics(event.getTaskAttemptId()))
diag += s + "\n";
diag += "\n";
break;
}
updateStatus(diag, context.getConfiguration().getInt("boa.hadoop.jobid", 0));
}
示例8: runJob
import org.apache.hadoop.mapred.TaskCompletionEvent; //导入依赖的package包/类
public static TaskCompletionEvent[] runJob(JobConf conf, Class mapperClass,
boolean enableNoFetchEmptyMapOutputs) throws Exception {
conf.setMapperClass(mapperClass);
conf.setReducerClass(IdentityReducer.class);
conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(Text.class);
conf.setNumMapTasks(3);
conf.setNumReduceTasks(1);
conf.setInputFormat(FakeIF.class);
conf.setBoolean("mapred.enable.no.fetch.map.outputs", enableNoFetchEmptyMapOutputs);
FileInputFormat.setInputPaths(conf, new Path("/in"));
final Path outp = new Path("/out");
FileOutputFormat.setOutputPath(conf, outp);
RunningJob job = null;
job = JobClient.runJob(conf);
assertTrue(job.isSuccessful());
return job.getTaskCompletionEvents(0);
}
示例9: getTTlogURL
import org.apache.hadoop.mapred.TaskCompletionEvent; //导入依赖的package包/类
private String getTTlogURL(String jobId) throws Exception {
JobConf jobConf = new JobConf(getConf());
JobClient jobClient = new JobClient(jobConf);
RunningJob job = jobClient.getJob(JobID.forName(jobId));
if (job == null) {
LOG.warn("No running job for job id: " + jobId);
return null;
}
TaskCompletionEvent[] tasks = job.getTaskCompletionEvents(0);
// 0th even is setup, 1 event is launcher, 2 event is cleanup
if (tasks != null && tasks.length == 3 && tasks[1] != null) {
return tasks[1].getTaskTrackerHttp() + "/tasklog?attemptid="
+ tasks[1].getTaskAttemptId() + "&all=true";
} else {
LOG.warn("No running task for job: " + jobId);
}
return null;
}
示例10: getTTlogURL
import org.apache.hadoop.mapred.TaskCompletionEvent; //导入依赖的package包/类
private String getTTlogURL(String jobId) throws Exception {
JobConf jobConf = new JobConf(getConf());
JobClient jobClient = new JobClient(jobConf);
RunningJob job = jobClient.getJob(JobID.forName(jobId));
if (job == null) {
LOG.warn("No running job for job id: " + jobId);
return null;
}
TaskCompletionEvent[] tasks = job.getTaskCompletionEvents(0);
// 0th even is setup, 1 event is launcher, 2 event is cleanup
if (tasks != null && tasks.length == 3 && tasks[1] != null) {
return tasks[1].getTaskTrackerHttp() + "/tasklog?attemptid="
+ tasks[1].getTaskAttemptId() + "&all=true";
} else {
LOG.warn("No running task for job: " + jobId);
}
return null;
}
示例11: getMockedCompletionEventsUpdate
import org.apache.hadoop.mapred.TaskCompletionEvent; //导入依赖的package包/类
private MapTaskCompletionEventsUpdate getMockedCompletionEventsUpdate(
int startIdx, int numEvents) {
ArrayList<TaskCompletionEvent> tceList =
new ArrayList<TaskCompletionEvent>(numEvents);
for (int i = 0; i < numEvents; ++i) {
int eventIdx = startIdx + i;
TaskCompletionEvent tce = new TaskCompletionEvent(eventIdx,
new TaskAttemptID("12345", 1, TaskType.MAP, eventIdx, 0),
eventIdx, true, TaskCompletionEvent.Status.SUCCEEDED,
"http://somehost:8888");
tceList.add(tce);
}
TaskCompletionEvent[] events = {};
return new MapTaskCompletionEventsUpdate(tceList.toArray(events), false);
}
示例12: getMapAttemptCompletionEvents
import org.apache.hadoop.mapred.TaskCompletionEvent; //导入依赖的package包/类
@Override
public synchronized TaskCompletionEvent[] getMapAttemptCompletionEvents(
int startIndex, int maxEvents) {
if (mapCompletionEvents == null) {
constructTaskAttemptCompletionEvents();
}
return TypeConverter.fromYarn(getAttemptCompletionEvents(
mapCompletionEvents, startIndex, maxEvents));
}
示例13: testGetTaskAttemptCompletionEvent
import org.apache.hadoop.mapred.TaskCompletionEvent; //导入依赖的package包/类
/**
* Simple test of some methods of CompletedJob
* @throws Exception
*/
@Test (timeout=30000)
public void testGetTaskAttemptCompletionEvent() throws Exception{
HistoryFileInfo info = mock(HistoryFileInfo.class);
when(info.getConfFile()).thenReturn(fullConfPath);
completedJob =
new CompletedJob(conf, jobId, fullHistoryPath, loadTasks, "user",
info, jobAclsManager);
TaskCompletionEvent[] events= completedJob.getMapAttemptCompletionEvents(0,1000);
assertEquals(10, completedJob.getMapAttemptCompletionEvents(0,10).length);
int currentEventId=0;
for (TaskCompletionEvent taskAttemptCompletionEvent : events) {
int eventId= taskAttemptCompletionEvent.getEventId();
assertTrue(eventId>=currentEventId);
currentEventId=eventId;
}
assertNull(completedJob.loadConfFile() );
// job name
assertEquals("Sleep job",completedJob.getName());
// queue name
assertEquals("default",completedJob.getQueueName());
// progress
assertEquals(1.0, completedJob.getProgress(),0.001);
// 12 rows in answer
assertEquals(12,completedJob.getTaskAttemptCompletionEvents(0,1000).length);
// select first 10 rows
assertEquals(10,completedJob.getTaskAttemptCompletionEvents(0,10).length);
// select 5-10 rows include 5th
assertEquals(7,completedJob.getTaskAttemptCompletionEvents(5,10).length);
// without errors
assertEquals(1,completedJob.getDiagnostics().size());
assertEquals("",completedJob.getDiagnostics().get(0));
assertEquals(0, completedJob.getJobACLs().size());
}