本文整理汇总了Java中org.apache.hadoop.mapred.JobHistory.RecordTypes类的典型用法代码示例。如果您正苦于以下问题:Java RecordTypes类的具体用法?Java RecordTypes怎么用?Java RecordTypes使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
RecordTypes类属于org.apache.hadoop.mapred.JobHistory包,在下文中一共展示了RecordTypes类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: logInited
import org.apache.hadoop.mapred.JobHistory.RecordTypes; //导入依赖的package包/类
/**
* Logs launch time of job.
*
* @param startTime start time of job.
* @param totalMaps total maps assigned by jobtracker.
* @param totalReduces total reduces.
*/
public void logInited(long startTime, int totalMaps, int totalReduces) {
if (disableHistory) {
return;
}
if (null != writers) {
log(writers, RecordTypes.Job,
new Keys[] {Keys.JOBID, Keys.LAUNCH_TIME, Keys.TOTAL_MAPS,
Keys.TOTAL_REDUCES, Keys.JOB_STATUS},
new String[] {jobId.toString(), String.valueOf(startTime),
String.valueOf(totalMaps),
String.valueOf(totalReduces),
Values.PREP.name()});
}
}
示例2: logFailed
import org.apache.hadoop.mapred.JobHistory.RecordTypes; //导入依赖的package包/类
/**
* Logs job failed event. Closes the job history log file.
* @param timestamp time when job failure was detected in ms.
* @param finishedMaps no finished map tasks.
* @param finishedReduces no of finished reduce tasks.
*/
public void logFailed(long timestamp, int finishedMaps,
int finishedReduces, Counters counters) {
if (disableHistory) {
return;
}
if (null != writers) {
log(writers, RecordTypes.Job,
new Keys[] {Keys.JOBID, Keys.FINISH_TIME,
Keys.JOB_STATUS, Keys.FINISHED_MAPS,
Keys.FINISHED_REDUCES, Keys.COUNTERS},
new String[] {jobId.toString(),
String.valueOf(timestamp),
Values.FAILED.name(),
String.valueOf(finishedMaps),
String.valueOf(finishedReduces),
counters.makeEscapedCompactString()},
true);
closeAndClear(writers);
}
}
示例3: logKilled
import org.apache.hadoop.mapred.JobHistory.RecordTypes; //导入依赖的package包/类
/**
* Logs job killed event. Closes the job history log file.
*
* @param timestamp
* time when job killed was issued in ms.
* @param finishedMaps
* no finished map tasks.
* @param finishedReduces
* no of finished reduce tasks.
*/
public void logKilled(long timestamp, int finishedMaps,
int finishedReduces, Counters counters) {
if (disableHistory) {
return;
}
if (null != writers) {
log(writers, RecordTypes.Job,
new Keys[] {Keys.JOBID,
Keys.FINISH_TIME, Keys.JOB_STATUS, Keys.FINISHED_MAPS,
Keys.FINISHED_REDUCES, Keys.COUNTERS },
new String[] {jobId.toString(),
String.valueOf(timestamp), Values.KILLED.name(),
String.valueOf(finishedMaps),
String.valueOf(finishedReduces),
counters.makeEscapedCompactString()},
true);
closeAndClear(writers);
}
}
示例4: logTaskStarted
import org.apache.hadoop.mapred.JobHistory.RecordTypes; //导入依赖的package包/类
/**
* Log start time of task (TIP).
* @param taskId task id
* @param taskType MAP or REDUCE
* @param startTime startTime of tip.
*/
public void logTaskStarted(TaskID taskId, String taskType,
long startTime, String splitLocations) {
if (disableHistory) {
return;
}
JobID id = taskId.getJobID();
if (!this.jobId.equals(id)) {
throw new RuntimeException("JobId from task: " + id +
" does not match expected: " + jobId);
}
if (null != writers) {
log(writers, RecordTypes.Task,
new Keys[]{Keys.TASKID, Keys.TASK_TYPE ,
Keys.START_TIME, Keys.SPLITS},
new String[]{taskId.toString(), taskType,
String.valueOf(startTime),
splitLocations});
}
}
示例5: logTaskFinished
import org.apache.hadoop.mapred.JobHistory.RecordTypes; //导入依赖的package包/类
/**
* Log finish time of task.
* @param taskId task id
* @param taskType MAP or REDUCE
* @param finishTime finish timeof task in ms
*/
public void logTaskFinished(TaskID taskId, String taskType,
long finishTime, Counters counters) {
if (disableHistory) {
return;
}
JobID id = taskId.getJobID();
if (!this.jobId.equals(id)) {
throw new RuntimeException("JobId from task: " + id +
" does not match expected: " + jobId);
}
if (null != writers) {
log(writers, RecordTypes.Task,
new Keys[]{Keys.TASKID, Keys.TASK_TYPE,
Keys.TASK_STATUS, Keys.FINISH_TIME,
Keys.COUNTERS},
new String[]{ taskId.toString(), taskType, Values.SUCCESS.name(),
String.valueOf(finishTime),
counters.makeEscapedCompactString()});
}
}
示例6: logTaskUpdates
import org.apache.hadoop.mapred.JobHistory.RecordTypes; //导入依赖的package包/类
/**
* Update the finish time of task.
* @param taskId task id
* @param finishTime finish time of task in ms
*/
public void logTaskUpdates(TaskID taskId, long finishTime) {
if (disableHistory) {
return;
}
JobID id = taskId.getJobID();
if (!this.jobId.equals(id)) {
throw new RuntimeException("JobId from task: " + id +
" does not match expected: " + jobId);
}
if (null != writers) {
log(writers, RecordTypes.Task,
new Keys[]{Keys.TASKID, Keys.FINISH_TIME},
new String[]{ taskId.toString(),
String.valueOf(finishTime)});
}
}
示例7: log
import org.apache.hadoop.mapred.JobHistory.RecordTypes; //导入依赖的package包/类
/**
* Log a number of keys and values with the record. This method allows to do
* it in a synchronous fashion
* @param writers the writers to send the data to
* @param recordType the type to log
* @param keys keys to log
* @param values values to log
* @param sync if true - will block until the data is written
*/
private void log(ArrayList<PrintWriter> writers, RecordTypes recordType,
Keys[] keys, String[] values, boolean sync) {
StringBuffer buf = new StringBuffer(recordType.name());
buf.append(JobHistory.DELIMITER);
for (int i = 0; i < keys.length; i++) {
buf.append(keys[i]);
buf.append("=\"");
values[i] = JobHistory.escapeString(values[i]);
buf.append(values[i]);
buf.append("\"");
buf.append(JobHistory.DELIMITER);
}
buf.append(JobHistory.LINE_DELIMITER_CHAR);
for (PrintWriter out : writers) {
LogTask task = new LogTask(out, buf.toString());
if (sync) {
task.run();
} else {
fileManager.addWriteTask(task);
}
}
}
示例8: logStarted
import org.apache.hadoop.mapred.JobHistory.RecordTypes; //导入依赖的package包/类
/**
* Logs job as running
*/
public void logStarted() {
if (disableHistory) {
return;
}
if (null != writers) {
log(writers, RecordTypes.Job,
new Keys[] {Keys.JOBID, Keys.JOB_STATUS},
new String[] {jobId.toString(),
Values.RUNNING.name()});
}
}
示例9: logFinished
import org.apache.hadoop.mapred.JobHistory.RecordTypes; //导入依赖的package包/类
/**
* Log job finished. closes the job file in history.
* @param finishTime finish time of job in ms.
* @param finishedMaps no of maps successfully finished.
* @param finishedReduces no of reduces finished sucessfully.
* @param failedMaps no of failed map tasks. (includes killed)
* @param failedReduces no of failed reduce tasks. (includes killed)
* @param killedMaps no of killed map tasks.
* @param killedReduces no of killed reduce tasks.
* @param counters the counters from the job
*/
public void logFinished(long finishTime,
int finishedMaps, int finishedReduces,
int failedMaps, int failedReduces,
int killedMaps, int killedReduces,
Counters mapCounters,
Counters reduceCounters,
Counters counters) {
if (disableHistory) {
return;
}
if (null != writers) {
log(writers, RecordTypes.Job,
new Keys[] {Keys.JOBID, Keys.FINISH_TIME,
Keys.JOB_STATUS, Keys.FINISHED_MAPS,
Keys.FINISHED_REDUCES,
Keys.FAILED_MAPS, Keys.FAILED_REDUCES,
Keys.KILLED_MAPS, Keys.KILLED_REDUCES,
Keys.MAP_COUNTERS, Keys.REDUCE_COUNTERS,
Keys.COUNTERS},
new String[] {jobId.toString(), Long.toString(finishTime),
Values.SUCCESS.name(),
String.valueOf(finishedMaps),
String.valueOf(finishedReduces),
String.valueOf(failedMaps),
String.valueOf(failedReduces),
String.valueOf(killedMaps),
String.valueOf(killedReduces),
mapCounters.makeEscapedCompactString(),
reduceCounters.makeEscapedCompactString(),
counters.makeEscapedCompactString()},
true);
closeAndClear(writers);
}
// NOTE: history cleaning stuff deleted from here. We should do that
// somewhere else!
}
示例10: logJobPriority
import org.apache.hadoop.mapred.JobHistory.RecordTypes; //导入依赖的package包/类
/**
* Log job's priority.
* @param priority Jobs priority
*/
public void logJobPriority(JobID jobid, JobPriority priority) {
if (disableHistory) {
return;
}
if (null != writers) {
log(writers, RecordTypes.Job,
new Keys[] {Keys.JOBID, Keys.JOB_PRIORITY},
new String[] {jobId.toString(), priority.toString()});
}
}
示例11: logTaskFailed
import org.apache.hadoop.mapred.JobHistory.RecordTypes; //导入依赖的package包/类
/**
* Log the task failure
*
* @param taskId the task that failed
* @param taskType the type of the task
* @param time the time of the failure
* @param error the error the task failed with
* @param failedDueToAttempt The attempt that caused the failure, if any
*/
public void logTaskFailed(TaskID taskId, String taskType, long time,
String error,
TaskAttemptID failedDueToAttempt) {
if (disableHistory) {
return;
}
JobID id = taskId.getJobID();
if (!this.jobId.equals(id)) {
throw new RuntimeException("JobId from task: " + id +
" does not match expected: " + jobId);
}
if (null != writers) {
String failedAttempt = failedDueToAttempt == null ?
"" :
failedDueToAttempt.toString();
log(writers, RecordTypes.Task,
new Keys[]{Keys.TASKID, Keys.TASK_TYPE,
Keys.TASK_STATUS, Keys.FINISH_TIME,
Keys.ERROR, Keys.TASK_ATTEMPT_ID},
new String[]{ taskId.toString(), taskType,
Values.FAILED.name(),
String.valueOf(time) , error,
failedAttempt});
}
}
示例12: logMapTaskStarted
import org.apache.hadoop.mapred.JobHistory.RecordTypes; //导入依赖的package包/类
/**
* Log start time of this map task attempt.
*
* @param taskAttemptId task attempt id
* @param startTime start time of task attempt as reported by task tracker.
* @param trackerName name of the tracker executing the task attempt.
* @param httpPort http port of the task tracker executing the task attempt
* @param taskType Whether the attempt is cleanup or setup or map
*/
public void logMapTaskStarted(TaskAttemptID taskAttemptId, long startTime,
String trackerName, int httpPort,
String taskType) {
if (disableHistory) {
return;
}
JobID id = taskAttemptId.getJobID();
if (!this.jobId.equals(id)) {
throw new RuntimeException("JobId from task: " + id +
" does not match expected: " + jobId);
}
if (null != writers) {
log(writers, RecordTypes.MapAttempt,
new Keys[]{ Keys.TASK_TYPE, Keys.TASKID,
Keys.TASK_ATTEMPT_ID, Keys.START_TIME,
Keys.TRACKER_NAME, Keys.HTTP_PORT},
new String[]{taskType,
taskAttemptId.getTaskID().toString(),
taskAttemptId.toString(),
String.valueOf(startTime), trackerName,
httpPort == -1 ? "" :
String.valueOf(httpPort)});
}
}
示例13: logMapTaskFinished
import org.apache.hadoop.mapred.JobHistory.RecordTypes; //导入依赖的package包/类
/**
* Log finish time of map task attempt.
*
* @param taskAttemptId task attempt id
* @param finishTime finish time
* @param hostName host name
* @param taskType Whether the attempt is cleanup or setup or map
* @param stateString state string of the task attempt
* @param counter counters of the task attempt
*/
public void logMapTaskFinished(TaskAttemptID taskAttemptId,
long finishTime,
String hostName,
String taskType,
String stateString,
Counters counter) {
if (disableHistory) {
return;
}
JobID id = taskAttemptId.getJobID();
if (!this.jobId.equals(id)) {
throw new RuntimeException("JobId from task: " + id +
" does not match expected: " + jobId);
}
if (null != writers) {
log(writers, RecordTypes.MapAttempt,
new Keys[]{ Keys.TASK_TYPE, Keys.TASKID,
Keys.TASK_ATTEMPT_ID, Keys.TASK_STATUS,
Keys.FINISH_TIME, Keys.HOSTNAME,
Keys.STATE_STRING, Keys.COUNTERS},
new String[]{taskType,
taskAttemptId.getTaskID().toString(),
taskAttemptId.toString(),
Values.SUCCESS.name(),
String.valueOf(finishTime), hostName,
stateString,
counter.makeEscapedCompactString()});
}
}
示例14: logMapTaskFailed
import org.apache.hadoop.mapred.JobHistory.RecordTypes; //导入依赖的package包/类
/**
* Log task attempt failed event.
*
* @param taskAttemptId task attempt id
* @param timestamp timestamp
* @param hostName hostname of this task attempt.
* @param error error message if any for this task attempt.
* @param taskType Whether the attempt is cleanup or setup or map
*/
public void logMapTaskFailed(TaskAttemptID taskAttemptId,
long timestamp, String hostName,
String error, String taskType) {
if (disableHistory) {
return;
}
JobID id = taskAttemptId.getJobID();
if (!this.jobId.equals(id)) {
throw new RuntimeException("JobId from task: " + id +
" does not match expected: " + jobId);
}
if (null != writers) {
log(writers, RecordTypes.MapAttempt,
new Keys[]{Keys.TASK_TYPE, Keys.TASKID,
Keys.TASK_ATTEMPT_ID, Keys.TASK_STATUS,
Keys.FINISH_TIME, Keys.HOSTNAME, Keys.ERROR},
new String[]{ taskType,
taskAttemptId.getTaskID().toString(),
taskAttemptId.toString(),
Values.FAILED.name(),
String.valueOf(timestamp),
hostName, error});
}
}
示例15: logMapTaskKilled
import org.apache.hadoop.mapred.JobHistory.RecordTypes; //导入依赖的package包/类
/**
* Log task attempt killed event.
*
* @param taskAttemptId task attempt id
* @param timestamp timestamp
* @param hostName hostname of this task attempt.
* @param error error message if any for this task attempt.
* @param taskType Whether the attempt is cleanup or setup or map
*/
public void logMapTaskKilled(TaskAttemptID taskAttemptId,
long timestamp, String hostName,
String error, String taskType) {
if (disableHistory) {
return;
}
JobID id = taskAttemptId.getJobID();
if (!this.jobId.equals(id)) {
throw new RuntimeException("JobId from task: " + id +
" does not match expected: " + jobId);
}
if (null != writers) {
log(writers, RecordTypes.MapAttempt,
new Keys[]{Keys.TASK_TYPE, Keys.TASKID,
Keys.TASK_ATTEMPT_ID, Keys.TASK_STATUS,
Keys.FINISH_TIME, Keys.HOSTNAME,
Keys.ERROR},
new String[]{ taskType,
taskAttemptId.getTaskID().toString(),
taskAttemptId.toString(),
Values.KILLED.name(),
String.valueOf(timestamp),
hostName, error});
}
}