本文整理汇总了Java中org.apache.hadoop.mapred.JobHistory.Values类的典型用法代码示例。如果您正苦于以下问题:Java Values类的具体用法?Java Values怎么用?Java Values使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
Values类属于org.apache.hadoop.mapred.JobHistory包,在下文中一共展示了Values类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: logInited
import org.apache.hadoop.mapred.JobHistory.Values; //导入依赖的package包/类
/**
* Logs launch time of job.
*
* @param startTime start time of job.
* @param totalMaps total maps assigned by jobtracker.
* @param totalReduces total reduces.
*/
public void logInited(long startTime, int totalMaps, int totalReduces) {
if (disableHistory) {
return;
}
if (null != writers) {
log(writers, RecordTypes.Job,
new Keys[] {Keys.JOBID, Keys.LAUNCH_TIME, Keys.TOTAL_MAPS,
Keys.TOTAL_REDUCES, Keys.JOB_STATUS},
new String[] {jobId.toString(), String.valueOf(startTime),
String.valueOf(totalMaps),
String.valueOf(totalReduces),
Values.PREP.name()});
}
}
示例2: logFailed
import org.apache.hadoop.mapred.JobHistory.Values; //导入依赖的package包/类
/**
* Logs job failed event. Closes the job history log file.
* @param timestamp time when job failure was detected in ms.
* @param finishedMaps no finished map tasks.
* @param finishedReduces no of finished reduce tasks.
*/
public void logFailed(long timestamp, int finishedMaps,
int finishedReduces, Counters counters) {
if (disableHistory) {
return;
}
if (null != writers) {
log(writers, RecordTypes.Job,
new Keys[] {Keys.JOBID, Keys.FINISH_TIME,
Keys.JOB_STATUS, Keys.FINISHED_MAPS,
Keys.FINISHED_REDUCES, Keys.COUNTERS},
new String[] {jobId.toString(),
String.valueOf(timestamp),
Values.FAILED.name(),
String.valueOf(finishedMaps),
String.valueOf(finishedReduces),
counters.makeEscapedCompactString()},
true);
closeAndClear(writers);
}
}
示例3: logKilled
import org.apache.hadoop.mapred.JobHistory.Values; //导入依赖的package包/类
/**
* Logs job killed event. Closes the job history log file.
*
* @param timestamp
* time when job killed was issued in ms.
* @param finishedMaps
* no finished map tasks.
* @param finishedReduces
* no of finished reduce tasks.
*/
public void logKilled(long timestamp, int finishedMaps,
int finishedReduces, Counters counters) {
if (disableHistory) {
return;
}
if (null != writers) {
log(writers, RecordTypes.Job,
new Keys[] {Keys.JOBID,
Keys.FINISH_TIME, Keys.JOB_STATUS, Keys.FINISHED_MAPS,
Keys.FINISHED_REDUCES, Keys.COUNTERS },
new String[] {jobId.toString(),
String.valueOf(timestamp), Values.KILLED.name(),
String.valueOf(finishedMaps),
String.valueOf(finishedReduces),
counters.makeEscapedCompactString()},
true);
closeAndClear(writers);
}
}
示例4: logTaskFinished
import org.apache.hadoop.mapred.JobHistory.Values; //导入依赖的package包/类
/**
* Log finish time of task.
* @param taskId task id
* @param taskType MAP or REDUCE
* @param finishTime finish timeof task in ms
*/
public void logTaskFinished(TaskID taskId, String taskType,
long finishTime, Counters counters) {
if (disableHistory) {
return;
}
JobID id = taskId.getJobID();
if (!this.jobId.equals(id)) {
throw new RuntimeException("JobId from task: " + id +
" does not match expected: " + jobId);
}
if (null != writers) {
log(writers, RecordTypes.Task,
new Keys[]{Keys.TASKID, Keys.TASK_TYPE,
Keys.TASK_STATUS, Keys.FINISH_TIME,
Keys.COUNTERS},
new String[]{ taskId.toString(), taskType, Values.SUCCESS.name(),
String.valueOf(finishTime),
counters.makeEscapedCompactString()});
}
}
示例5: processTaskAttempt
import org.apache.hadoop.mapred.JobHistory.Values; //导入依赖的package包/类
/**
* Adds a task-attempt in the listener
*/
private void processTaskAttempt(String taskAttemptId,
JobHistory.TaskAttempt attempt) {
TaskAttemptID id = TaskAttemptID.forName(taskAttemptId);
// Check if the transaction for this attempt can be committed
String taskStatus = attempt.get(Keys.TASK_STATUS);
if (taskStatus.length() > 0) {
// This means this is an update event
if (taskStatus.equals(Values.SUCCESS.name())) {
// Mark this attempt as hanging
hangingAttempts.put(id.getTaskID().toString(), taskAttemptId);
addSuccessfulAttempt(jip, id, attempt);
} else {
addUnsuccessfulAttempt(jip, id, attempt);
numEventsRecovered += 2;
}
} else {
createTaskAttempt(jip, id, attempt);
}
}
示例6: checkAndInit
import org.apache.hadoop.mapred.JobHistory.Values; //导入依赖的package包/类
private void checkAndInit() throws IOException {
String jobStatus = this.job.get(Keys.JOB_STATUS);
if (Values.PREP.name().equals(jobStatus)) {
hasUpdates = true;
LOG.info("Calling init from RM for job " + jip.getJobID().toString());
try {
initJob(jip);
} catch (Throwable t) {
LOG.error("Job initialization failed : \n"
+ StringUtils.stringifyException(t));
jip.status.setFailureInfo("Job Initialization failed: \n"
+ StringUtils.stringifyException(t));
failJob(jip);
throw new IOException(t);
}
}
}
示例7: getTaskType
import org.apache.hadoop.mapred.JobHistory.Values; //导入依赖的package包/类
/**
* Get the task type for logging it to {@link JobHistory}.
*/
private String getTaskType(TaskInProgress tip) {
if (tip.isJobCleanupTask()) {
return Values.CLEANUP.name();
} else if (tip.isJobSetupTask()) {
return Values.SETUP.name();
} else if (tip.isMapTask()) {
return Values.MAP.name();
} else {
return Values.REDUCE.name();
}
}
示例8: addUnsuccessfulAttempt
import org.apache.hadoop.mapred.JobHistory.Values; //导入依赖的package包/类
private void addUnsuccessfulAttempt(JobInProgress job,
TaskAttemptID attemptId,
JobHistory.TaskAttempt attempt) {
// I. Get the required info
TaskID taskId = attemptId.getTaskID();
TaskInProgress tip = job.getTaskInProgress(taskId);
long attemptFinishTime = attempt.getLong(Keys.FINISH_TIME);
TaskStatus taskStatus = (TaskStatus)tip.getTaskStatus(attemptId).clone();
taskStatus.setFinishTime(attemptFinishTime);
// Reset the progress
taskStatus.setProgress(0.0f);
String stateString = attempt.get(Keys.STATE_STRING);
taskStatus.setStateString(stateString);
boolean hasFailed =
attempt.get(Keys.TASK_STATUS).equals(Values.FAILED.name());
// Set the state failed/killed
if (hasFailed) {
taskStatus.setRunState(TaskStatus.State.FAILED);
} else {
taskStatus.setRunState(TaskStatus.State.KILLED);
}
// Get/Set the error msg
String diagInfo = attempt.get(Keys.ERROR);
taskStatus.setDiagnosticInfo(diagInfo); // diag info
synchronized (JobTracker.this) {
// II. Update the task status
job.updateTaskStatus(tip, taskStatus);
}
// III. Prevent the task from expiry
expireLaunchingTasks.removeTask(attemptId);
}
示例9: logStarted
import org.apache.hadoop.mapred.JobHistory.Values; //导入依赖的package包/类
/**
* Logs job as running
*/
public void logStarted() {
if (disableHistory) {
return;
}
if (null != writers) {
log(writers, RecordTypes.Job,
new Keys[] {Keys.JOBID, Keys.JOB_STATUS},
new String[] {jobId.toString(),
Values.RUNNING.name()});
}
}
示例10: logFinished
import org.apache.hadoop.mapred.JobHistory.Values; //导入依赖的package包/类
/**
* Log job finished. closes the job file in history.
* @param finishTime finish time of job in ms.
* @param finishedMaps no of maps successfully finished.
* @param finishedReduces no of reduces finished sucessfully.
* @param failedMaps no of failed map tasks. (includes killed)
* @param failedReduces no of failed reduce tasks. (includes killed)
* @param killedMaps no of killed map tasks.
* @param killedReduces no of killed reduce tasks.
* @param counters the counters from the job
*/
public void logFinished(long finishTime,
int finishedMaps, int finishedReduces,
int failedMaps, int failedReduces,
int killedMaps, int killedReduces,
Counters mapCounters,
Counters reduceCounters,
Counters counters) {
if (disableHistory) {
return;
}
if (null != writers) {
log(writers, RecordTypes.Job,
new Keys[] {Keys.JOBID, Keys.FINISH_TIME,
Keys.JOB_STATUS, Keys.FINISHED_MAPS,
Keys.FINISHED_REDUCES,
Keys.FAILED_MAPS, Keys.FAILED_REDUCES,
Keys.KILLED_MAPS, Keys.KILLED_REDUCES,
Keys.MAP_COUNTERS, Keys.REDUCE_COUNTERS,
Keys.COUNTERS},
new String[] {jobId.toString(), Long.toString(finishTime),
Values.SUCCESS.name(),
String.valueOf(finishedMaps),
String.valueOf(finishedReduces),
String.valueOf(failedMaps),
String.valueOf(failedReduces),
String.valueOf(killedMaps),
String.valueOf(killedReduces),
mapCounters.makeEscapedCompactString(),
reduceCounters.makeEscapedCompactString(),
counters.makeEscapedCompactString()},
true);
closeAndClear(writers);
}
// NOTE: history cleaning stuff deleted from here. We should do that
// somewhere else!
}
示例11: logTaskFailed
import org.apache.hadoop.mapred.JobHistory.Values; //导入依赖的package包/类
/**
* Log the task failure
*
* @param taskId the task that failed
* @param taskType the type of the task
* @param time the time of the failure
* @param error the error the task failed with
* @param failedDueToAttempt The attempt that caused the failure, if any
*/
public void logTaskFailed(TaskID taskId, String taskType, long time,
String error,
TaskAttemptID failedDueToAttempt) {
if (disableHistory) {
return;
}
JobID id = taskId.getJobID();
if (!this.jobId.equals(id)) {
throw new RuntimeException("JobId from task: " + id +
" does not match expected: " + jobId);
}
if (null != writers) {
String failedAttempt = failedDueToAttempt == null ?
"" :
failedDueToAttempt.toString();
log(writers, RecordTypes.Task,
new Keys[]{Keys.TASKID, Keys.TASK_TYPE,
Keys.TASK_STATUS, Keys.FINISH_TIME,
Keys.ERROR, Keys.TASK_ATTEMPT_ID},
new String[]{ taskId.toString(), taskType,
Values.FAILED.name(),
String.valueOf(time) , error,
failedAttempt});
}
}
示例12: logMapTaskFinished
import org.apache.hadoop.mapred.JobHistory.Values; //导入依赖的package包/类
/**
* Log finish time of map task attempt.
*
* @param taskAttemptId task attempt id
* @param finishTime finish time
* @param hostName host name
* @param taskType Whether the attempt is cleanup or setup or map
* @param stateString state string of the task attempt
* @param counter counters of the task attempt
*/
public void logMapTaskFinished(TaskAttemptID taskAttemptId,
long finishTime,
String hostName,
String taskType,
String stateString,
Counters counter) {
if (disableHistory) {
return;
}
JobID id = taskAttemptId.getJobID();
if (!this.jobId.equals(id)) {
throw new RuntimeException("JobId from task: " + id +
" does not match expected: " + jobId);
}
if (null != writers) {
log(writers, RecordTypes.MapAttempt,
new Keys[]{ Keys.TASK_TYPE, Keys.TASKID,
Keys.TASK_ATTEMPT_ID, Keys.TASK_STATUS,
Keys.FINISH_TIME, Keys.HOSTNAME,
Keys.STATE_STRING, Keys.COUNTERS},
new String[]{taskType,
taskAttemptId.getTaskID().toString(),
taskAttemptId.toString(),
Values.SUCCESS.name(),
String.valueOf(finishTime), hostName,
stateString,
counter.makeEscapedCompactString()});
}
}
示例13: logMapTaskFailed
import org.apache.hadoop.mapred.JobHistory.Values; //导入依赖的package包/类
/**
* Log task attempt failed event.
*
* @param taskAttemptId task attempt id
* @param timestamp timestamp
* @param hostName hostname of this task attempt.
* @param error error message if any for this task attempt.
* @param taskType Whether the attempt is cleanup or setup or map
*/
public void logMapTaskFailed(TaskAttemptID taskAttemptId,
long timestamp, String hostName,
String error, String taskType) {
if (disableHistory) {
return;
}
JobID id = taskAttemptId.getJobID();
if (!this.jobId.equals(id)) {
throw new RuntimeException("JobId from task: " + id +
" does not match expected: " + jobId);
}
if (null != writers) {
log(writers, RecordTypes.MapAttempt,
new Keys[]{Keys.TASK_TYPE, Keys.TASKID,
Keys.TASK_ATTEMPT_ID, Keys.TASK_STATUS,
Keys.FINISH_TIME, Keys.HOSTNAME, Keys.ERROR},
new String[]{ taskType,
taskAttemptId.getTaskID().toString(),
taskAttemptId.toString(),
Values.FAILED.name(),
String.valueOf(timestamp),
hostName, error});
}
}
示例14: logMapTaskKilled
import org.apache.hadoop.mapred.JobHistory.Values; //导入依赖的package包/类
/**
* Log task attempt killed event.
*
* @param taskAttemptId task attempt id
* @param timestamp timestamp
* @param hostName hostname of this task attempt.
* @param error error message if any for this task attempt.
* @param taskType Whether the attempt is cleanup or setup or map
*/
public void logMapTaskKilled(TaskAttemptID taskAttemptId,
long timestamp, String hostName,
String error, String taskType) {
if (disableHistory) {
return;
}
JobID id = taskAttemptId.getJobID();
if (!this.jobId.equals(id)) {
throw new RuntimeException("JobId from task: " + id +
" does not match expected: " + jobId);
}
if (null != writers) {
log(writers, RecordTypes.MapAttempt,
new Keys[]{Keys.TASK_TYPE, Keys.TASKID,
Keys.TASK_ATTEMPT_ID, Keys.TASK_STATUS,
Keys.FINISH_TIME, Keys.HOSTNAME,
Keys.ERROR},
new String[]{ taskType,
taskAttemptId.getTaskID().toString(),
taskAttemptId.toString(),
Values.KILLED.name(),
String.valueOf(timestamp),
hostName, error});
}
}
示例15: logReduceTaskFinished
import org.apache.hadoop.mapred.JobHistory.Values; //导入依赖的package包/类
/**
* Log finished event of this task.
*
* @param taskAttemptId task attempt id
* @param shuffleFinished shuffle finish time
* @param sortFinished sort finish time
* @param finishTime finish time of task
* @param hostName host name where task attempt executed
* @param taskType Whether the attempt is cleanup or setup or reduce
* @param stateString the state string of the attempt
* @param counter counters of the attempt
*/
public void logReduceTaskFinished(TaskAttemptID taskAttemptId,
long shuffleFinished,
long sortFinished, long finishTime,
String hostName, String taskType,
String stateString, Counters counter) {
if (disableHistory) {
return;
}
JobID id = taskAttemptId.getJobID();
if (!this.jobId.equals(id)) {
throw new RuntimeException("JobId from task: " + id +
" does not match expected: " + jobId);
}
if (null != writers) {
log(writers, RecordTypes.ReduceAttempt,
new Keys[]{ Keys.TASK_TYPE, Keys.TASKID,
Keys.TASK_ATTEMPT_ID, Keys.TASK_STATUS,
Keys.SHUFFLE_FINISHED, Keys.SORT_FINISHED,
Keys.FINISH_TIME, Keys.HOSTNAME,
Keys.STATE_STRING, Keys.COUNTERS},
new String[]{taskType,
taskAttemptId.getTaskID().toString(),
taskAttemptId.toString(),
Values.SUCCESS.name(),
String.valueOf(shuffleFinished),
String.valueOf(sortFinished),
String.valueOf(finishTime), hostName,
stateString,
counter.makeEscapedCompactString()});
}
}