本文整理汇总了Java中org.apache.hadoop.mapred.JobHistory.Keys类的典型用法代码示例。如果您正苦于以下问题:Java Keys类的具体用法?Java Keys怎么用?Java Keys使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
Keys类属于org.apache.hadoop.mapred.JobHistory包,在下文中一共展示了Keys类的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: updateTip
import org.apache.hadoop.mapred.JobHistory.Keys; //导入依赖的package包/类
private void updateTip(TaskInProgress tip, JobHistory.Task task) {
long startTime = task.getLong(Keys.START_TIME);
if (startTime != 0) {
tip.setExecStartTime(startTime);
}
long finishTime = task.getLong(Keys.FINISH_TIME);
// For failed tasks finish-time will be missing
if (finishTime != 0) {
tip.setExecFinishTime(finishTime);
}
String cause = task.get(Keys.TASK_ATTEMPT_ID);
if (cause.length() > 0) {
// This means that the this is a FAILED events
TaskAttemptID id = TaskAttemptID.forName(cause);
TaskStatus status = tip.getTaskStatus(id);
synchronized (JobTracker.this) {
// This will add the tip failed event in the new log
tip.getJob().failedTask(tip, id, status.getDiagnosticInfo(),
status.getPhase(), status.getRunState(),
status.getTaskTracker());
}
}
}
示例2: handle
import org.apache.hadoop.mapred.JobHistory.Keys; //导入依赖的package包/类
public void handle(JobHistory.RecordTypes recType, Map<Keys, String> values)
throws IOException {
if (recType.equals(JobHistory.RecordTypes.MapAttempt) ||
recType.equals(JobHistory.RecordTypes.ReduceAttempt)) {
if (failureType.equals(values.get(Keys.TASK_STATUS)) ) {
String hostName = values.get(Keys.HOSTNAME);
String taskid = values.get(Keys.TASKID);
Set<String> tasks = badNodesToNumFailedTasks.get(hostName);
if (null == tasks ){
tasks = new TreeSet<String>();
tasks.add(taskid);
badNodesToNumFailedTasks.put(hostName, tasks);
}else{
tasks.add(taskid);
}
}
}
}
示例3: getLastSuccessfulTaskAttempt
import org.apache.hadoop.mapred.JobHistory.Keys; //导入依赖的package包/类
private java.util.Map<JobHistory.Keys, String> getLastSuccessfulTaskAttempt(JobHistory.Task task) {
Map<String, JobHistory.TaskAttempt> taskAttempts = task.getTaskAttempts();
int size = taskAttempts.size();
java.util.Iterator<Map.Entry<String, JobHistory.TaskAttempt>> kv = taskAttempts.entrySet().iterator();
for (int i=0; i<size; i++) {
// CHECK_IT: Only one SUCCESSFUL TASK ATTEMPT
Map.Entry<String, JobHistory.TaskAttempt> tae = kv.next();
JobHistory.TaskAttempt attempt = tae.getValue();
if (attempt.getValues().get(JobHistory.Keys.TASK_STATUS).equals("SUCCESS")) {
return attempt.getValues();
}
}
return null;
}
示例4: logInited
import org.apache.hadoop.mapred.JobHistory.Keys; //导入依赖的package包/类
/**
* Logs launch time of job.
*
* @param startTime start time of job.
* @param totalMaps total maps assigned by jobtracker.
* @param totalReduces total reduces.
*/
public void logInited(long startTime, int totalMaps, int totalReduces) {
if (disableHistory) {
return;
}
if (null != writers) {
log(writers, RecordTypes.Job,
new Keys[] {Keys.JOBID, Keys.LAUNCH_TIME, Keys.TOTAL_MAPS,
Keys.TOTAL_REDUCES, Keys.JOB_STATUS},
new String[] {jobId.toString(), String.valueOf(startTime),
String.valueOf(totalMaps),
String.valueOf(totalReduces),
Values.PREP.name()});
}
}
示例5: logFailed
import org.apache.hadoop.mapred.JobHistory.Keys; //导入依赖的package包/类
/**
* Logs job failed event. Closes the job history log file.
* @param timestamp time when job failure was detected in ms.
* @param finishedMaps no finished map tasks.
* @param finishedReduces no of finished reduce tasks.
*/
public void logFailed(long timestamp, int finishedMaps,
int finishedReduces, Counters counters) {
if (disableHistory) {
return;
}
if (null != writers) {
log(writers, RecordTypes.Job,
new Keys[] {Keys.JOBID, Keys.FINISH_TIME,
Keys.JOB_STATUS, Keys.FINISHED_MAPS,
Keys.FINISHED_REDUCES, Keys.COUNTERS},
new String[] {jobId.toString(),
String.valueOf(timestamp),
Values.FAILED.name(),
String.valueOf(finishedMaps),
String.valueOf(finishedReduces),
counters.makeEscapedCompactString()},
true);
closeAndClear(writers);
}
}
示例6: logKilled
import org.apache.hadoop.mapred.JobHistory.Keys; //导入依赖的package包/类
/**
* Logs job killed event. Closes the job history log file.
*
* @param timestamp
* time when job killed was issued in ms.
* @param finishedMaps
* no finished map tasks.
* @param finishedReduces
* no of finished reduce tasks.
*/
public void logKilled(long timestamp, int finishedMaps,
int finishedReduces, Counters counters) {
if (disableHistory) {
return;
}
if (null != writers) {
log(writers, RecordTypes.Job,
new Keys[] {Keys.JOBID,
Keys.FINISH_TIME, Keys.JOB_STATUS, Keys.FINISHED_MAPS,
Keys.FINISHED_REDUCES, Keys.COUNTERS },
new String[] {jobId.toString(),
String.valueOf(timestamp), Values.KILLED.name(),
String.valueOf(finishedMaps),
String.valueOf(finishedReduces),
counters.makeEscapedCompactString()},
true);
closeAndClear(writers);
}
}
示例7: logTaskStarted
import org.apache.hadoop.mapred.JobHistory.Keys; //导入依赖的package包/类
/**
* Log start time of task (TIP).
* @param taskId task id
* @param taskType MAP or REDUCE
* @param startTime startTime of tip.
*/
public void logTaskStarted(TaskID taskId, String taskType,
long startTime, String splitLocations) {
if (disableHistory) {
return;
}
JobID id = taskId.getJobID();
if (!this.jobId.equals(id)) {
throw new RuntimeException("JobId from task: " + id +
" does not match expected: " + jobId);
}
if (null != writers) {
log(writers, RecordTypes.Task,
new Keys[]{Keys.TASKID, Keys.TASK_TYPE ,
Keys.START_TIME, Keys.SPLITS},
new String[]{taskId.toString(), taskType,
String.valueOf(startTime),
splitLocations});
}
}
示例8: logTaskFinished
import org.apache.hadoop.mapred.JobHistory.Keys; //导入依赖的package包/类
/**
* Log finish time of task.
* @param taskId task id
* @param taskType MAP or REDUCE
* @param finishTime finish timeof task in ms
*/
public void logTaskFinished(TaskID taskId, String taskType,
long finishTime, Counters counters) {
if (disableHistory) {
return;
}
JobID id = taskId.getJobID();
if (!this.jobId.equals(id)) {
throw new RuntimeException("JobId from task: " + id +
" does not match expected: " + jobId);
}
if (null != writers) {
log(writers, RecordTypes.Task,
new Keys[]{Keys.TASKID, Keys.TASK_TYPE,
Keys.TASK_STATUS, Keys.FINISH_TIME,
Keys.COUNTERS},
new String[]{ taskId.toString(), taskType, Values.SUCCESS.name(),
String.valueOf(finishTime),
counters.makeEscapedCompactString()});
}
}
示例9: logTaskUpdates
import org.apache.hadoop.mapred.JobHistory.Keys; //导入依赖的package包/类
/**
* Update the finish time of task.
* @param taskId task id
* @param finishTime finish time of task in ms
*/
public void logTaskUpdates(TaskID taskId, long finishTime) {
if (disableHistory) {
return;
}
JobID id = taskId.getJobID();
if (!this.jobId.equals(id)) {
throw new RuntimeException("JobId from task: " + id +
" does not match expected: " + jobId);
}
if (null != writers) {
log(writers, RecordTypes.Task,
new Keys[]{Keys.TASKID, Keys.FINISH_TIME},
new String[]{ taskId.toString(),
String.valueOf(finishTime)});
}
}
示例10: log
import org.apache.hadoop.mapred.JobHistory.Keys; //导入依赖的package包/类
/**
* Log a number of keys and values with the record. This method allows to do
* it in a synchronous fashion
* @param writers the writers to send the data to
* @param recordType the type to log
* @param keys keys to log
* @param values values to log
* @param sync if true - will block until the data is written
*/
private void log(ArrayList<PrintWriter> writers, RecordTypes recordType,
Keys[] keys, String[] values, boolean sync) {
StringBuffer buf = new StringBuffer(recordType.name());
buf.append(JobHistory.DELIMITER);
for (int i = 0; i < keys.length; i++) {
buf.append(keys[i]);
buf.append("=\"");
values[i] = JobHistory.escapeString(values[i]);
buf.append(values[i]);
buf.append("\"");
buf.append(JobHistory.DELIMITER);
}
buf.append(JobHistory.LINE_DELIMITER_CHAR);
for (PrintWriter out : writers) {
LogTask task = new LogTask(out, buf.toString());
if (sync) {
task.run();
} else {
fileManager.addWriteTask(task);
}
}
}
示例11: getLastSuccessfulTaskAttempt
import org.apache.hadoop.mapred.JobHistory.Keys; //导入依赖的package包/类
private static Map<JobHistory.Keys, String> getLastSuccessfulTaskAttempt(
JobHistory.Task task) {
Map<String, JobHistory.TaskAttempt> taskAttempts = task
.getTaskAttempts();
int size = taskAttempts.size();
Iterator<Map.Entry<String, JobHistory.TaskAttempt>> kv = taskAttempts
.entrySet().iterator();
for (int i = 0; i < size; i++) {
// CHECK_IT: Only one SUCCESSFUL TASK ATTEMPT
Map.Entry<String, JobHistory.TaskAttempt> tae = kv.next();
JobHistory.TaskAttempt attempt = tae.getValue();
if (null != attempt && null != attempt.getValues() && attempt.getValues().containsKey(JobHistory.Keys.TASK_STATUS) && attempt.getValues().get(JobHistory.Keys.TASK_STATUS).equals(
"SUCCESS")) {
return attempt.getValues();
}
}
return null;
}
示例12: processTaskAttempt
import org.apache.hadoop.mapred.JobHistory.Keys; //导入依赖的package包/类
/**
* Adds a task-attempt in the listener
*/
private void processTaskAttempt(String taskAttemptId,
JobHistory.TaskAttempt attempt) {
TaskAttemptID id = TaskAttemptID.forName(taskAttemptId);
// Check if the transaction for this attempt can be committed
String taskStatus = attempt.get(Keys.TASK_STATUS);
if (taskStatus.length() > 0) {
// This means this is an update event
if (taskStatus.equals(Values.SUCCESS.name())) {
// Mark this attempt as hanging
hangingAttempts.put(id.getTaskID().toString(), taskAttemptId);
addSuccessfulAttempt(jip, id, attempt);
} else {
addUnsuccessfulAttempt(jip, id, attempt);
numEventsRecovered += 2;
}
} else {
createTaskAttempt(jip, id, attempt);
}
}
示例13: updateJob
import org.apache.hadoop.mapred.JobHistory.Keys; //导入依赖的package包/类
private JobStatusChangeEvent updateJob(JobInProgress jip,
JobHistory.JobInfo job) {
// Change the job priority
String jobpriority = job.get(Keys.JOB_PRIORITY);
JobPriority priority = JobPriority.valueOf(jobpriority);
// It's important to update this via the jobtracker's api as it will
// take care of updating the event listeners too
setJobPriority(jip.getJobID(), priority);
// Save the previous job status
JobStatus oldStatus = (JobStatus)jip.getStatus().clone();
// Set the start/launch time only if there are recovered tasks
// Increment the job's restart count
jip.updateJobInfo(job.getLong(JobHistory.Keys.SUBMIT_TIME),
job.getLong(JobHistory.Keys.LAUNCH_TIME));
// Save the new job status
JobStatus newStatus = (JobStatus)jip.getStatus().clone();
return new JobStatusChangeEvent(jip, EventType.START_TIME_CHANGED, oldStatus,
newStatus);
}
示例14: checkAndInit
import org.apache.hadoop.mapred.JobHistory.Keys; //导入依赖的package包/类
private void checkAndInit() throws IOException {
String jobStatus = this.job.get(Keys.JOB_STATUS);
if (Values.PREP.name().equals(jobStatus)) {
hasUpdates = true;
LOG.info("Calling init from RM for job " + jip.getJobID().toString());
try {
initJob(jip);
} catch (Throwable t) {
LOG.error("Job initialization failed : \n"
+ StringUtils.stringifyException(t));
jip.status.setFailureInfo("Job Initialization failed: \n"
+ StringUtils.stringifyException(t));
failJob(jip);
throw new IOException(t);
}
}
}