本文整理汇总了Java中org.apache.hadoop.mapred.TaskCompletionEvent.getTaskStatus方法的典型用法代码示例。如果您正苦于以下问题:Java TaskCompletionEvent.getTaskStatus方法的具体用法?Java TaskCompletionEvent.getTaskStatus怎么用?Java TaskCompletionEvent.getTaskStatus使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.mapred.TaskCompletionEvent
的用法示例。
在下文中一共展示了TaskCompletionEvent.getTaskStatus方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: resolve
import org.apache.hadoop.mapred.TaskCompletionEvent; //导入方法依赖的package包/类
@Override
public void resolve(TaskCompletionEvent event) {
switch (event.getTaskStatus()) {
case SUCCEEDED:
URI u = getBaseURI(reduceId, event.getTaskTrackerHttp());
addKnownMapOutput(u.getHost() + ":" + u.getPort(),
u.toString(),
event.getTaskAttemptId());
maxMapRuntime = Math.max(maxMapRuntime, event.getTaskRunTime());
break;
case FAILED:
case KILLED:
case OBSOLETE:
obsoleteMapOutput(event.getTaskAttemptId());
LOG.info("Ignoring obsolete output of " + event.getTaskStatus() +
" map-task: '" + event.getTaskAttemptId() + "'");
break;
case TIPFAILED:
tipFailed(event.getTaskAttemptId().getTaskID());
LOG.info("Ignoring output of failed map TIP: '" +
event.getTaskAttemptId() + "'");
break;
}
}
示例2: resolve
import org.apache.hadoop.mapred.TaskCompletionEvent; //导入方法依赖的package包/类
@Override
public void resolve(TaskCompletionEvent event) {
switch (event.getTaskStatus()) {
case SUCCEEDED:
URI u = getBaseURI(reduceId, event.getTaskTrackerHttp());
addKnownMapOutput(u.getHost() + ":" + u.getPort(),
u.toString(),
event.getTaskAttemptId());
maxMapRuntime = Math.max(maxMapRuntime, event.getTaskRunTime());
break;
case FAILED:
case OBSOLETE:
obsoleteMapOutput(event.getTaskAttemptId());
LOG.info("Ignoring obsolete output of " + event.getTaskStatus() +
" map-task: '" + event.getTaskAttemptId() + "'");
break;
case TIPFAILED:
case KILLED:
tipFailed(event.getTaskAttemptId().getTaskID());
LOG.info("Ignoring output of failed map TIP: '" +
event.getTaskAttemptId() + "'");
break;
}
}
示例3: abortJob
import org.apache.hadoop.mapred.TaskCompletionEvent; //导入方法依赖的package包/类
@Override
public void abortJob(JobContext context, JobStatus.State runState) throws java.io.IOException {
super.abortJob(context, runState);
final JobClient jobClient = new JobClient(new JobConf(context.getConfiguration()));
final RunningJob job = jobClient.getJob((org.apache.hadoop.mapred.JobID) JobID.forName(context.getConfiguration().get("mapred.job.id")));
String diag = "";
for (final TaskCompletionEvent event : job.getTaskCompletionEvents(0))
switch (event.getTaskStatus()) {
case SUCCEEDED:
break;
default:
diag += "Diagnostics for: " + event.getTaskTrackerHttp() + "\n";
for (final String s : job.getTaskDiagnostics(event.getTaskAttemptId()))
diag += s + "\n";
diag += "\n";
break;
}
updateStatus(diag, context.getConfiguration().getInt("boa.hadoop.jobid", 0));
}
示例4: checkComplete
import org.apache.hadoop.mapred.TaskCompletionEvent; //导入方法依赖的package包/类
/**
* Checks if the map-reduce job has completed.
*
* @return true if the job completed, false otherwise.
* @throws java.io.IOException
*/
public boolean checkComplete() throws IOException {
JobID jobID = runningJob.getID();
if (runningJob.isComplete()) {
// delete job directory
final String jobdir = jobconf.get(JOB_DIR_LABEL);
if (jobdir != null) {
final Path jobpath = new Path(jobdir);
jobpath.getFileSystem(jobconf).delete(jobpath, true);
}
if (runningJob.isSuccessful()) {
LOG.info("Job Complete(Succeeded): " + jobID);
} else {
LOG.info("Job Complete(Failed): " + jobID);
}
return true;
} else {
String report = (" job " + jobID +
" map " + StringUtils.formatPercent(runningJob.mapProgress(), 0) +
" reduce " +
StringUtils.formatPercent(runningJob.reduceProgress(), 0));
if (!report.equals(lastReport)) {
LOG.info(report);
lastReport = report;
}
TaskCompletionEvent[] events =
runningJob.getTaskCompletionEvents(jobEventCounter);
jobEventCounter += events.length;
for (TaskCompletionEvent event : events) {
if (event.getTaskStatus() == TaskCompletionEvent.Status.FAILED) {
LOG.info(" Job " + jobID + " " + event.toString());
}
}
return false;
}
}
示例5: checkComplete
import org.apache.hadoop.mapred.TaskCompletionEvent; //导入方法依赖的package包/类
/** Checks if the map-reduce job has completed.
*
* @return true if the job completed, false otherwise.
* @throws IOException
*/
public boolean checkComplete() throws IOException {
JobID jobID = runningJob.getID();
if (runningJob.isComplete()) {
// delete job directory
final String jobdir = jobconf.get(JOB_DIR_LABEL);
if (jobdir != null) {
final Path jobpath = new Path(jobdir);
jobpath.getFileSystem(jobconf).delete(jobpath, true);
}
if (runningJob.isSuccessful()) {
LOG.info("Job Complete(Succeeded): " + jobID);
} else {
LOG.info("Job Complete(Failed): " + jobID);
}
raidPolicyPathPairList.clear();
Counters ctrs = runningJob.getCounters();
if (ctrs != null) {
RaidNodeMetrics metrics = RaidNodeMetrics.getInstance(RaidNodeMetrics.DEFAULT_NAMESPACE_ID);
if (ctrs.findCounter(Counter.FILES_FAILED) != null) {
long filesFailed = ctrs.findCounter(Counter.FILES_FAILED).getValue();
metrics.raidFailures.inc(filesFailed);
}
long slotSeconds = ctrs.findCounter(
JobInProgress.Counter.SLOTS_MILLIS_MAPS).getValue() / 1000;
metrics.raidSlotSeconds.inc(slotSeconds);
}
return true;
} else {
String report = (" job " + jobID +
" map " + StringUtils.formatPercent(runningJob.mapProgress(), 0)+
" reduce " + StringUtils.formatPercent(runningJob.reduceProgress(), 0));
if (!report.equals(lastReport)) {
LOG.info(report);
lastReport = report;
}
TaskCompletionEvent[] events =
runningJob.getTaskCompletionEvents(jobEventCounter);
jobEventCounter += events.length;
for(TaskCompletionEvent event : events) {
if (event.getTaskStatus() == TaskCompletionEvent.Status.FAILED) {
LOG.info(" Job " + jobID + " " + event.toString());
}
}
return false;
}
}
示例6: getMapCompletionEvents
import org.apache.hadoop.mapred.TaskCompletionEvent; //导入方法依赖的package包/类
/**
* Queries the {@link TaskTracker} for a set of map-completion events
* from a given event ID.
* @throws IOException
*/
private int getMapCompletionEvents() throws IOException {
int numNewMaps = 0;
MapTaskCompletionEventsUpdate update =
umbilical.getMapCompletionEvents((org.apache.hadoop.mapred.JobID)
reduce.getJobID(),
fromEventId,
MAX_EVENTS_TO_FETCH,
(org.apache.hadoop.mapred.TaskAttemptID)
reduce);
TaskCompletionEvent events[] = update.getMapTaskCompletionEvents();
LOG.debug("Got " + events.length + " map completion events from " +
fromEventId);
// Check if the reset is required.
// Since there is no ordering of the task completion events at the
// reducer, the only option to sync with the new jobtracker is to reset
// the events index
if (update.shouldReset()) {
fromEventId = 0;
scheduler.resetKnownMaps();
}
// Update the last seen event ID
fromEventId += events.length;
// Process the TaskCompletionEvents:
// 1. Save the SUCCEEDED maps in knownOutputs to fetch the outputs.
// 2. Save the OBSOLETE/FAILED/KILLED maps in obsoleteOutputs to stop
// fetching from those maps.
// 3. Remove TIPFAILED maps from neededOutputs since we don't need their
// outputs at all.
for (TaskCompletionEvent event : events) {
switch (event.getTaskStatus()) {
case SUCCEEDED:
URI u = getBaseURI(event.getTaskTrackerHttp());
scheduler.addKnownMapOutput(u.getHost() + ":" + u.getPort(),
u.toString(),
event.getTaskAttemptId());
numNewMaps ++;
int duration = event.getTaskRunTime();
if (duration > maxMapRuntime) {
maxMapRuntime = duration;
scheduler.informMaxMapRunTime(maxMapRuntime);
}
break;
case FAILED:
case KILLED:
case OBSOLETE:
scheduler.obsoleteMapOutput(event.getTaskAttemptId());
LOG.info("Ignoring obsolete output of " + event.getTaskStatus() +
" map-task: '" + event.getTaskAttemptId() + "'");
break;
case TIPFAILED:
scheduler.tipFailed(event.getTaskAttemptId().getTaskID());
LOG.info("Ignoring output of failed map TIP: '" +
event.getTaskAttemptId() + "'");
break;
}
}
return numNewMaps;
}